hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
58bad4c3ec3ba6664c9a47e24a865055fdfdb046.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <hiprand/hiprand.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) { if (stat != HIPRAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; //enum MatrixLayout{ #define ROW_MAJOR 0 #define COL_MAJOR 1 //}; //ONLY THE PARAMETER HERE NEEDS TO BE CHANGED // Must be multiples of 16 for wmma code to work #define MATRIX_M (8) #define MATRIX_N (32) #define MATRIX_K (16) const int WMMA_M =8; const int WMMA_N =32; const int WMMA_K =16; typedef signed char atype; typedef signed char btype; typedef int ctype; typedef int dtype; typedef int host_type; #define A_LAYOUT ROW_MAJOR #define B_LAYOUT ROW_MAJOR #define C_LAYOUT ROW_MAJOR #define D_LAYOUT ROW_MAJOR #define NUM_CTA 1 #define WARP_IN_CTA 1 //Don't change anything after here #define THREAD_IN_WARP 32 #if A_LAYOUT==ROW_MAJOR #define LAYOUT_A wmma::row_major #define A_STRIDE MATRIX_K #else #define LAYOUT_A wmma::col_major #define A_STRIDE MATRIX_M #endif #if B_LAYOUT==ROW_MAJOR #define LAYOUT_B wmma::row_major #define B_STRIDE MATRIX_N #else #define LAYOUT_B wmma::col_major #define B_STRIDE MATRIX_K #endif #if C_LAYOUT==ROW_MAJOR #define LAYOUT_C wmma::mem_row_major #define C_STRIDE MATRIX_N #else #define LAYOUT_C wmma::mem_col_major #define C_STRIDE MATRIX_M #endif #if D_LAYOUT==ROW_MAJOR #define LAYOUT_D wmma::mem_row_major #define D_STRIDE MATRIX_N #else #define LAYOUT_D wmma::mem_col_major #define D_STRIDE MATRIX_M #endif enum MatrixInitializationType{ ZERO, ONE, RANDOM, IDENTITY, LINEAR }; int get_value(MatrixInitializationType init_type,int randomRange=6,bool RESET=false){ static int val=0; switch(init_type){ case ZERO: break; case ONE: val=1; break; case RANDOM: val=rand()%randomRange; break; case LINEAR: val++; break; default : printf("illegal MatrixInitializationType\n"); abort(); break; } if(RESET) val=0; return val; } template <typename T> void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ T val; if(layout==ROW_MAJOR) val=matrix[row*col_size+col]; else val=matrix[col*row_size+row]; printf("%.2f ",static_cast<float>(val)); } printf(";\n"); } } template <typename T> void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ if(init_type==IDENTITY){ assert(row_size==col_size);//only for square matrix can be used matrix[row*row_size+col]=static_cast<T>(1); } else{ if(layout==ROW_MAJOR){ matrix[row*col_size+col]=static_cast<T>(get_value(init_type)); } else{ matrix[col*row_size+row]=static_cast<T>(get_value(init_type)); } } } } get_value(init_type,10,true);//reseting the val counter print_matrix<T>(matrix,row_size,col_size,layout); } int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){ int index=0; if(layout==ROW_MAJOR){ index=row*col_size+col; } else{ index=col*row_size+row; } return index; } template <typename T> void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){ for(int row=0;row<M;row++){ for(int col=0;col<N;col++){ int rindex=get_index(row,col,M,N,resultlayout); int cindex=get_index(row,col,M,N,clayout); for(int k=0;k<K;k++){ int aindex=get_index(row,k,M,K,alayout); int bindex=get_index(k,col,K,N,blayout); result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex]; } result_matrix[rindex]+=matrix_c[cindex]; } } print_matrix<T>(result_matrix,M,N,resultlayout); } template <typename T> void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ int index_a,index_b; index_a=get_index(row,col,row_size,col_size,alayout); index_b=get_index(row,col,row_size,col_size,alayout); if(matrix_a[index_a]!=matrix_b[index_b]) printf("ERROR at index row=%d col=%d\n",row,col); } } } __global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d) { unsigned int start_time=0,end_time=0; // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag; // Bounds checking wmma::load_matrix_sync(a_frag, a, A_STRIDE); wmma::load_matrix_sync(b_frag, b, B_STRIDE); wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C); start_time=clock(); wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); end_time=clock(); wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D); printf("Time=%d\n",end_time-start_time); } template <typename T1,typename T2> __global__ void convert(T1 *out, T2 *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = in[idx]; } } int main(int argc, char* argv[]) { //data on device in host type format host_type *a_htype; host_type *b_htype; host_type *c_htype; host_type *d_htype; //data on device in gemm format atype *a_atype; btype *b_btype; ctype *c_ctype; dtype *d_dtype; srand(time(NULL)); host_type *a_host_wmma; host_type *b_host_wmma; host_type *c_host_wmma; host_type *d_host_wmma; host_type *d_cal_host_wmma; hipEvent_t startWMMA; hipEvent_t stopWMMA; cudaErrCheck(hipEventCreate(&startWMMA)); cudaErrCheck(hipEventCreate(&stopWMMA)); // Use tensor cores cudaErrCheck(hipMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(hipMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype))); cudaErrCheck(hipMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype))); cudaErrCheck(hipMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype))); cudaErrCheck(hipMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype))); a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type)); b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type)); c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); printf("a_host\n"); initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,RANDOM); printf("b_host\n"); initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,RANDOM); printf("c_host\n"); initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,ZERO); printf("d_cal_host\n"); initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO); printf("d_cal_host\n"); matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT); cudaErrCheck(hipMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice)); cudaErrCheck(hipMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( convert<atype,host_type>) , dim3((MATRIX_M * MATRIX_K + 255) / 256), dim3(256) , 0, 0, a_atype, a_htype, MATRIX_M * MATRIX_K); hipLaunchKernelGGL(( convert<btype,host_type>) , dim3((MATRIX_K * MATRIX_N + 255) / 256), dim3(256) , 0, 0, b_btype, b_htype, MATRIX_K * MATRIX_N); hipLaunchKernelGGL(( convert<ctype,host_type>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, c_ctype, c_htype, MATRIX_M * MATRIX_N); printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K); printf("Running with wmma...\n"); cudaErrCheck(hipEventRecord(startWMMA)); hipLaunchKernelGGL(( wmma_example) , dim3(NUM_CTA),dim3(WARP_IN_CTA*THREAD_IN_WARP), 0, 0, a_atype, b_btype, c_ctype, d_dtype); cudaErrCheck(hipEventRecord(stopWMMA)); hipLaunchKernelGGL(( convert<host_type,dtype>) , dim3((MATRIX_M * MATRIX_N + 255) / 256), dim3(256) , 0, 0, d_htype, d_dtype, MATRIX_M * MATRIX_N); cudaErrCheck(hipEventSynchronize(stopWMMA)); // Error checking printf("\nChecking results...\n"); cudaErrCheck(hipMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), hipMemcpyDeviceToHost)); printf("Results verified: cublas and WMMA agree.\n\n"); float wmmaTime; cudaErrCheck(hipEventElapsedTime(&wmmaTime, startWMMA, stopWMMA)); printf("wmma took %.2fms\n", wmmaTime); cudaErrCheck(hipEventDestroy(startWMMA)); cudaErrCheck(hipEventDestroy(stopWMMA)); printf("D_CALCULATED\n"); print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); printf("D_WMMA\n"); print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); printf("CHECKING\n"); compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT); cudaErrCheck(hipFree(a_htype)); cudaErrCheck(hipFree(b_htype)); cudaErrCheck(hipFree(c_htype)); cudaErrCheck(hipFree(d_htype)); cudaErrCheck(hipFree(a_atype)); cudaErrCheck(hipFree(b_btype)); cudaErrCheck(hipFree(c_ctype)); cudaErrCheck(hipFree(d_dtype)); free(a_host_wmma); free(b_host_wmma); free(c_host_wmma); free(d_host_wmma); free(d_cal_host_wmma); cudaErrCheck(hipDeviceReset()); return 0; }
58bad4c3ec3ba6664c9a47e24a865055fdfdb046.cu
#include <stdio.h> #include <curand.h> #include <ctime> #include <assert.h> // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } #include <mma.h> using namespace nvcuda; //enum MatrixLayout{ #define ROW_MAJOR 0 #define COL_MAJOR 1 //}; //ONLY THE PARAMETER HERE NEEDS TO BE CHANGED // Must be multiples of 16 for wmma code to work #define MATRIX_M (8) #define MATRIX_N (32) #define MATRIX_K (16) const int WMMA_M =8; const int WMMA_N =32; const int WMMA_K =16; typedef signed char atype; typedef signed char btype; typedef int ctype; typedef int dtype; typedef int host_type; #define A_LAYOUT ROW_MAJOR #define B_LAYOUT ROW_MAJOR #define C_LAYOUT ROW_MAJOR #define D_LAYOUT ROW_MAJOR #define NUM_CTA 1 #define WARP_IN_CTA 1 //Don't change anything after here #define THREAD_IN_WARP 32 #if A_LAYOUT==ROW_MAJOR #define LAYOUT_A wmma::row_major #define A_STRIDE MATRIX_K #else #define LAYOUT_A wmma::col_major #define A_STRIDE MATRIX_M #endif #if B_LAYOUT==ROW_MAJOR #define LAYOUT_B wmma::row_major #define B_STRIDE MATRIX_N #else #define LAYOUT_B wmma::col_major #define B_STRIDE MATRIX_K #endif #if C_LAYOUT==ROW_MAJOR #define LAYOUT_C wmma::mem_row_major #define C_STRIDE MATRIX_N #else #define LAYOUT_C wmma::mem_col_major #define C_STRIDE MATRIX_M #endif #if D_LAYOUT==ROW_MAJOR #define LAYOUT_D wmma::mem_row_major #define D_STRIDE MATRIX_N #else #define LAYOUT_D wmma::mem_col_major #define D_STRIDE MATRIX_M #endif enum MatrixInitializationType{ ZERO, ONE, RANDOM, IDENTITY, LINEAR }; int get_value(MatrixInitializationType init_type,int randomRange=6,bool RESET=false){ static int val=0; switch(init_type){ case ZERO: break; case ONE: val=1; break; case RANDOM: val=rand()%randomRange; break; case LINEAR: val++; break; default : printf("illegal MatrixInitializationType\n"); abort(); break; } if(RESET) val=0; return val; } template <typename T> void print_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ T val; if(layout==ROW_MAJOR) val=matrix[row*col_size+col]; else val=matrix[col*row_size+row]; printf("%.2f ",static_cast<float>(val)); } printf(";\n"); } } template <typename T> void initialize_matrix(T *matrix,int row_size,int col_size,int/*MatrixLayout*/ layout,MatrixInitializationType init_type){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ if(init_type==IDENTITY){ assert(row_size==col_size);//only for square matrix can be used matrix[row*row_size+col]=static_cast<T>(1); } else{ if(layout==ROW_MAJOR){ matrix[row*col_size+col]=static_cast<T>(get_value(init_type)); } else{ matrix[col*row_size+row]=static_cast<T>(get_value(init_type)); } } } } get_value(init_type,10,true);//reseting the val counter print_matrix<T>(matrix,row_size,col_size,layout); } int get_index(int row,int col,int row_size,int col_size,int/*MatrixLayout*/ layout){ int index=0; if(layout==ROW_MAJOR){ index=row*col_size+col; } else{ index=col*row_size+row; } return index; } template <typename T> void matrix_multiply(T *result_matrix, T *matrix_a,T* matrix_b,T *matrix_c,int M,int N,int K,int/*MatrixLayout*/ resultlayout,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout,int/*MatrixLayout*/ clayout){ for(int row=0;row<M;row++){ for(int col=0;col<N;col++){ int rindex=get_index(row,col,M,N,resultlayout); int cindex=get_index(row,col,M,N,clayout); for(int k=0;k<K;k++){ int aindex=get_index(row,k,M,K,alayout); int bindex=get_index(k,col,K,N,blayout); result_matrix[rindex]+=matrix_a[aindex]*matrix_b[bindex]; } result_matrix[rindex]+=matrix_c[cindex]; } } print_matrix<T>(result_matrix,M,N,resultlayout); } template <typename T> void compare_matrix(T *matrix_a, T *matrix_b,int row_size,int col_size,int/*MatrixLayout*/ alayout,int/*MatrixLayout*/ blayout){ for(int row=0;row<row_size;row++){ for(int col=0;col<col_size;col++){ int index_a,index_b; index_a=get_index(row,col,row_size,col_size,alayout); index_b=get_index(row,col,row_size,col_size,alayout); if(matrix_a[index_a]!=matrix_b[index_b]) printf("ERROR at index row=%d col=%d\n",row,col); } } } __global__ void wmma_example(atype *a, btype *b, ctype *c,dtype *d) { unsigned int start_time=0,end_time=0; // Declare the fragments wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, atype , LAYOUT_A> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, btype , LAYOUT_B> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, ctype> c_frag; // Bounds checking wmma::load_matrix_sync(a_frag, a, A_STRIDE); wmma::load_matrix_sync(b_frag, b, B_STRIDE); wmma::load_matrix_sync(c_frag, c, C_STRIDE,LAYOUT_C); start_time=clock(); wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); end_time=clock(); wmma::store_matrix_sync(d, c_frag, D_STRIDE, LAYOUT_D); printf("Time=%d\n",end_time-start_time); } template <typename T1,typename T2> __global__ void convert(T1 *out, T2 *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = in[idx]; } } int main(int argc, char* argv[]) { //data on device in host type format host_type *a_htype; host_type *b_htype; host_type *c_htype; host_type *d_htype; //data on device in gemm format atype *a_atype; btype *b_btype; ctype *c_ctype; dtype *d_dtype; srand(time(NULL)); host_type *a_host_wmma; host_type *b_host_wmma; host_type *c_host_wmma; host_type *d_host_wmma; host_type *d_cal_host_wmma; cudaEvent_t startWMMA; cudaEvent_t stopWMMA; cudaErrCheck(cudaEventCreate(&startWMMA)); cudaErrCheck(cudaEventCreate(&stopWMMA)); // Use tensor cores cudaErrCheck(cudaMalloc((void**)&a_htype, MATRIX_M * MATRIX_K * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&b_htype, MATRIX_K * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&c_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&d_htype, MATRIX_M * MATRIX_N * sizeof(host_type))); cudaErrCheck(cudaMalloc((void**)&a_atype, MATRIX_M * MATRIX_K * sizeof(atype))); cudaErrCheck(cudaMalloc((void**)&b_btype, MATRIX_K * MATRIX_N * sizeof(btype))); cudaErrCheck(cudaMalloc((void**)&c_ctype, MATRIX_M * MATRIX_N * sizeof(ctype))); cudaErrCheck(cudaMalloc((void**)&d_dtype, MATRIX_M * MATRIX_N * sizeof(dtype))); a_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_K * sizeof(host_type)); b_host_wmma = (host_type*)malloc(MATRIX_K * MATRIX_N * sizeof(host_type)); c_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); d_cal_host_wmma = (host_type*)malloc(MATRIX_M * MATRIX_N * sizeof(host_type)); printf("a_host\n"); initialize_matrix<host_type>(a_host_wmma,MATRIX_M,MATRIX_K,A_LAYOUT,RANDOM); printf("b_host\n"); initialize_matrix<host_type>(b_host_wmma,MATRIX_K,MATRIX_N,B_LAYOUT,RANDOM); printf("c_host\n"); initialize_matrix<host_type>(c_host_wmma,MATRIX_M,MATRIX_N,C_LAYOUT,ZERO); printf("d_cal_host\n"); initialize_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,ZERO); printf("d_cal_host\n"); matrix_multiply<host_type>(d_cal_host_wmma,a_host_wmma,b_host_wmma,c_host_wmma,MATRIX_M,MATRIX_N,MATRIX_K,D_LAYOUT,A_LAYOUT,B_LAYOUT,C_LAYOUT); cudaErrCheck(cudaMemcpy(a_htype,a_host_wmma, MATRIX_M * MATRIX_K * sizeof(host_type), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(b_htype,b_host_wmma, MATRIX_K * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice)); cudaErrCheck(cudaMemcpy(c_htype,c_host_wmma, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyHostToDevice)); convert<atype,host_type> <<< (MATRIX_M * MATRIX_K + 255) / 256, 256 >>> (a_atype, a_htype, MATRIX_M * MATRIX_K); convert<btype,host_type> <<< (MATRIX_K * MATRIX_N + 255) / 256, 256 >>> (b_btype, b_htype, MATRIX_K * MATRIX_N); convert<ctype,host_type> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (c_ctype, c_htype, MATRIX_M * MATRIX_N); printf("\nM = %d, N = %d, K = %d. \n", MATRIX_M, MATRIX_N, MATRIX_K); printf("Running with wmma...\n"); cudaErrCheck(cudaEventRecord(startWMMA)); wmma_example <<< NUM_CTA,WARP_IN_CTA*THREAD_IN_WARP>>> (a_atype, b_btype, c_ctype, d_dtype); cudaErrCheck(cudaEventRecord(stopWMMA)); convert<host_type,dtype> <<< (MATRIX_M * MATRIX_N + 255) / 256, 256 >>> (d_htype, d_dtype, MATRIX_M * MATRIX_N); cudaErrCheck(cudaEventSynchronize(stopWMMA)); // Error checking printf("\nChecking results...\n"); cudaErrCheck(cudaMemcpy(d_host_wmma, d_htype, MATRIX_M * MATRIX_N * sizeof(host_type), cudaMemcpyDeviceToHost)); printf("Results verified: cublas and WMMA agree.\n\n"); float wmmaTime; cudaErrCheck(cudaEventElapsedTime(&wmmaTime, startWMMA, stopWMMA)); printf("wmma took %.2fms\n", wmmaTime); cudaErrCheck(cudaEventDestroy(startWMMA)); cudaErrCheck(cudaEventDestroy(stopWMMA)); printf("D_CALCULATED\n"); print_matrix<host_type>(d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); printf("D_WMMA\n"); print_matrix<host_type>(d_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT); printf("CHECKING\n"); compare_matrix<host_type>(d_host_wmma,d_cal_host_wmma,MATRIX_M,MATRIX_N,D_LAYOUT,D_LAYOUT); cudaErrCheck(cudaFree(a_htype)); cudaErrCheck(cudaFree(b_htype)); cudaErrCheck(cudaFree(c_htype)); cudaErrCheck(cudaFree(d_htype)); cudaErrCheck(cudaFree(a_atype)); cudaErrCheck(cudaFree(b_btype)); cudaErrCheck(cudaFree(c_ctype)); cudaErrCheck(cudaFree(d_dtype)); free(a_host_wmma); free(b_host_wmma); free(c_host_wmma); free(d_host_wmma); free(d_cal_host_wmma); cudaErrCheck(cudaDeviceReset()); return 0; }
c99188315478bf6c9db74032860b1be2b24370a4.hip
// !!! This is a file automatically generated by hipify!!! /* maltest.cu */ #include <ftw_config_parser.h> #include <ftw_types.h> #include <stdio.h> #include <math.h> #include <unistd.h> main(int argc, char *argv[]) { int device_count; hipError_t err = hipErrorUnknown; // REMOVING either device count or the readGFG ALLOWS THE MALLOC TO WORK. WTF? hipGetDeviceCount(&device_count); printf("%d device found.\n", device_count); err=hipGetLastError(); fprintf(stderr, "main::%s\n", hipGetErrorString(err)); ftw_GFG65536 *gfg = readGFG65536(stdin); int *p_int; fprintf(stderr, "main::%s\n", hipGetErrorString(err)); fprintf(stderr, "main::p_int: %p\n", p_int); err = hipMalloc( (void **) &p_int, 65536 * sizeof(int)); fprintf(stderr, "main::p_int: %p\n", p_int); fprintf(stderr, "main::%s\n", hipGetErrorString(err)); sleep(1); hipSetDevice(1); err = hipMalloc( (void **) &p_int, 65536 * sizeof(int)); fprintf(stderr, "main::%s\n", hipGetErrorString(err)); }
c99188315478bf6c9db74032860b1be2b24370a4.cu
/* maltest.cu */ #include <ftw_config_parser.h> #include <ftw_types.h> #include <stdio.h> #include <math.h> #include <unistd.h> main(int argc, char *argv[]) { int device_count; cudaError_t err = cudaErrorUnknown; // REMOVING either device count or the readGFG ALLOWS THE MALLOC TO WORK. WTF? cudaGetDeviceCount(&device_count); printf("%d device found.\n", device_count); err=cudaGetLastError(); fprintf(stderr, "main::%s\n", cudaGetErrorString(err)); ftw_GFG65536 *gfg = readGFG65536(stdin); int *p_int; fprintf(stderr, "main::%s\n", cudaGetErrorString(err)); fprintf(stderr, "main::p_int: %p\n", p_int); err = cudaMalloc( (void **) &p_int, 65536 * sizeof(int)); fprintf(stderr, "main::p_int: %p\n", p_int); fprintf(stderr, "main::%s\n", cudaGetErrorString(err)); sleep(1); cudaSetDevice(1); err = cudaMalloc( (void **) &p_int, 65536 * sizeof(int)); fprintf(stderr, "main::%s\n", cudaGetErrorString(err)); }
a9bd2bce3df4caaced0c29bd14361aaceecb3065.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //======================================================================= // Copyright (c) 2017 Baptiste Wicht // Distributed under the terms of the MIT License. // (See accompanying file LICENSE or copy at // http://opensource.org/licenses/MIT) //======================================================================= #include "egblas/apxdby.hpp" #include "complex.hpp" template <typename T> __global__ void apxdby_kernel(size_t n, const T alpha, const T* x, size_t incx, T beta, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = (alpha + x[incx * index]) / (beta * y[incy * index]); } } template <typename T> void apxdby_kernel_run(size_t n, T alpha, const T* x, size_t incx, T beta, T* y, size_t incy) { int blockSize; int minGridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, apxdby_kernel<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( apxdby_kernel<T>), dim3(gridSize), dim3(blockSize), 0, 0, n, alpha, x, incx, beta, y, incy); #ifdef EGBLAS_SYNCHRONIZE hipDeviceSynchronize(); #endif } void egblas_sapxdby(size_t n, float alpha, const float* x, size_t incx, float beta, float* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); } void egblas_dapxdby(size_t n, double alpha, const double* x, size_t incx, double beta, double* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); } void egblas_capxdby(size_t n, hipComplex alpha, const hipComplex* x, size_t incx, hipComplex beta, hipComplex* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); } void egblas_zapxdby(size_t n, hipDoubleComplex alpha, const hipDoubleComplex* x, size_t incx, hipDoubleComplex beta, hipDoubleComplex* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); }
a9bd2bce3df4caaced0c29bd14361aaceecb3065.cu
//======================================================================= // Copyright (c) 2017 Baptiste Wicht // Distributed under the terms of the MIT License. // (See accompanying file LICENSE or copy at // http://opensource.org/licenses/MIT) //======================================================================= #include "egblas/apxdby.hpp" #include "complex.hpp" template <typename T> __global__ void apxdby_kernel(size_t n, const T alpha, const T* x, size_t incx, T beta, T* y, size_t incy) { auto index = threadIdx.x + blockIdx.x * blockDim.x; auto stride = blockDim.x * gridDim.x; for (; index < n; index += stride) { y[incy * index] = (alpha + x[incx * index]) / (beta * y[incy * index]); } } template <typename T> void apxdby_kernel_run(size_t n, T alpha, const T* x, size_t incx, T beta, T* y, size_t incy) { int blockSize; int minGridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, apxdby_kernel<T>, 0, 0); int gridSize = ((n / incy) + blockSize - 1) / blockSize; apxdby_kernel<T><<<gridSize, blockSize>>>(n, alpha, x, incx, beta, y, incy); #ifdef EGBLAS_SYNCHRONIZE cudaDeviceSynchronize(); #endif } void egblas_sapxdby(size_t n, float alpha, const float* x, size_t incx, float beta, float* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); } void egblas_dapxdby(size_t n, double alpha, const double* x, size_t incx, double beta, double* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); } void egblas_capxdby(size_t n, cuComplex alpha, const cuComplex* x, size_t incx, cuComplex beta, cuComplex* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); } void egblas_zapxdby(size_t n, cuDoubleComplex alpha, const cuDoubleComplex* x, size_t incx, cuDoubleComplex beta, cuDoubleComplex* y, size_t incy) { apxdby_kernel_run(n, alpha, x, incx, beta, y, incy); }
c162b33492003cf0ee5d4d446c6e222d429d86cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //========================================================================================================================================================================================================200 // findRangeK function //========================================================================================================================================================================================================200 __global__ void findRangeK( long height, knode *knodesD, long knodes_elem, long *currKnodeD, long *offsetD, long *lastKnodeD, long *offset_2D, int *startD, int *endD, int *RecstartD, int *ReclenD) { // private thread IDs int thid = threadIdx.x; int bid = blockIdx.x; // ??? int i; for(i = 0; i < height; i++){ if((knodesD[currKnodeD[bid]].keys[thid] <= startD[bid]) && (knodesD[currKnodeD[bid]].keys[thid+1] > startD[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodesD[currKnodeD[bid]].indices[thid] < knodes_elem){ offsetD[bid] = knodesD[currKnodeD[bid]].indices[thid]; } } if((knodesD[lastKnodeD[bid]].keys[thid] <= endD[bid]) && (knodesD[lastKnodeD[bid]].keys[thid+1] > endD[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodesD[lastKnodeD[bid]].indices[thid] < knodes_elem){ offset_2D[bid] = knodesD[lastKnodeD[bid]].indices[thid]; } } __syncthreads(); // set for next tree level if(thid==0){ currKnodeD[bid] = offsetD[bid]; lastKnodeD[bid] = offset_2D[bid]; } __syncthreads(); } // Find the index of the starting record if(knodesD[currKnodeD[bid]].keys[thid] == startD[bid]){ RecstartD[bid] = knodesD[currKnodeD[bid]].indices[thid]; } __syncthreads(); // Find the index of the ending record if(knodesD[lastKnodeD[bid]].keys[thid] == endD[bid]){ ReclenD[bid] = knodesD[lastKnodeD[bid]].indices[thid] - RecstartD[bid]+1; } } //========================================================================================================================================================================================================200 // End //========================================================================================================================================================================================================200
c162b33492003cf0ee5d4d446c6e222d429d86cb.cu
//========================================================================================================================================================================================================200 // findRangeK function //========================================================================================================================================================================================================200 __global__ void findRangeK( long height, knode *knodesD, long knodes_elem, long *currKnodeD, long *offsetD, long *lastKnodeD, long *offset_2D, int *startD, int *endD, int *RecstartD, int *ReclenD) { // private thread IDs int thid = threadIdx.x; int bid = blockIdx.x; // ??? int i; for(i = 0; i < height; i++){ if((knodesD[currKnodeD[bid]].keys[thid] <= startD[bid]) && (knodesD[currKnodeD[bid]].keys[thid+1] > startD[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodesD[currKnodeD[bid]].indices[thid] < knodes_elem){ offsetD[bid] = knodesD[currKnodeD[bid]].indices[thid]; } } if((knodesD[lastKnodeD[bid]].keys[thid] <= endD[bid]) && (knodesD[lastKnodeD[bid]].keys[thid+1] > endD[bid])){ // this conditional statement is inserted to avoid crush due to but in original code // "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault // more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address if(knodesD[lastKnodeD[bid]].indices[thid] < knodes_elem){ offset_2D[bid] = knodesD[lastKnodeD[bid]].indices[thid]; } } __syncthreads(); // set for next tree level if(thid==0){ currKnodeD[bid] = offsetD[bid]; lastKnodeD[bid] = offset_2D[bid]; } __syncthreads(); } // Find the index of the starting record if(knodesD[currKnodeD[bid]].keys[thid] == startD[bid]){ RecstartD[bid] = knodesD[currKnodeD[bid]].indices[thid]; } __syncthreads(); // Find the index of the ending record if(knodesD[lastKnodeD[bid]].keys[thid] == endD[bid]){ ReclenD[bid] = knodesD[lastKnodeD[bid]].indices[thid] - RecstartD[bid]+1; } } //========================================================================================================================================================================================================200 // End //========================================================================================================================================================================================================200
d4aed48bf302861632876888606049ce8c9fb587.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_unpool.h" #include "hip/hip_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_unpool_max_fwd(Dtype * out_data, \ const Dtype* in_data, const Dtype* in_max_index, const int in_n_stride, const int in_c_stride, const int out_n_stride, const int out_c_stride, const int in_n, const int in_c, const int num_threads) { CUDA_KERNEL_LOOP(tid, num_threads){ int n = (tid / in_n_stride) % in_n; int c = (tid / in_c_stride) % in_c; int out_offset = n * out_n_stride + c * out_c_stride; int index = in_max_index[tid]; out_data[out_offset + index] = in_data[tid]; } } template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberUnpool<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\ const std::vector<Tensor<NV, inDtype, LayOutType_in> *>& inputs, \ std::vector<Tensor<NV, outDtype, LayOutType_out> *>& outputs,\ PoolingParam<OpTensor>& param) { const InDataType* in_data = inputs[0]->data(); const OutDataType* in_max_index = inputs[1]->data(); OutDataType* out_data = outputs[0]->mutable_data(); hipStream_t cuda_stream = this->_ctx.get_compute_stream(); int count = inputs[0]->valid_size(); int in_n = inputs[0]->num(); int in_c = inputs[0]->channel(); if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { hipMemsetAsync(out_data, 0, sizeof(InDataType) * outputs[0]->valid_size(), cuda_stream); hipLaunchKernelGGL(( ker_unpool_max_fwd<InDataType>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ out_data, in_data, in_max_index,\ _in_n_stride, _in_c_stride,\ _out_n_stride, _out_c_stride,\ in_n, in_c, count); } return SaberSuccess; } } //namespace saber } //namespace anakin
d4aed48bf302861632876888606049ce8c9fb587.cu
#include "saber/funcs/impl/cuda/saber_unpool.h" #include "cuda_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_unpool_max_fwd(Dtype * out_data, \ const Dtype* in_data, const Dtype* in_max_index, const int in_n_stride, const int in_c_stride, const int out_n_stride, const int out_c_stride, const int in_n, const int in_c, const int num_threads) { CUDA_KERNEL_LOOP(tid, num_threads){ int n = (tid / in_n_stride) % in_n; int c = (tid / in_c_stride) % in_c; int out_offset = n * out_n_stride + c * out_c_stride; int index = in_max_index[tid]; out_data[out_offset + index] = in_data[tid]; } } template <DataType OpDtype, DataType inDtype, DataType outDtype, typename LayOutType_op, typename LayOutType_in, typename LayOutType_out> SaberStatus SaberUnpool<NV, OpDtype, inDtype, outDtype,\ LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(\ const std::vector<Tensor<NV, inDtype, LayOutType_in> *>& inputs, \ std::vector<Tensor<NV, outDtype, LayOutType_out> *>& outputs,\ PoolingParam<OpTensor>& param) { const InDataType* in_data = inputs[0]->data(); const OutDataType* in_max_index = inputs[1]->data(); OutDataType* out_data = outputs[0]->mutable_data(); cudaStream_t cuda_stream = this->_ctx.get_compute_stream(); int count = inputs[0]->valid_size(); int in_n = inputs[0]->num(); int in_c = inputs[0]->channel(); if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { cudaMemsetAsync(out_data, 0, sizeof(InDataType) * outputs[0]->valid_size(), cuda_stream); ker_unpool_max_fwd<InDataType>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, in_data, in_max_index,\ _in_n_stride, _in_c_stride,\ _out_n_stride, _out_c_stride,\ in_n, in_c, count); } return SaberSuccess; } } //namespace saber } //namespace anakin
4749195b5699036660900beab2262db79bdc1a86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "VoxelInvert.h" __global__ void KernelTestWriteSurface(uint32_t voxel_dim, hipSurfaceObject_t surf_obj); void LaunchKernelVoxelInvert(uint32_t voxel_dim, hipSurfaceObject_t surf_obj) { dim3 block_dim(8, 8, 8); dim3 grid_dim(voxel_dim / block_dim.x, voxel_dim / block_dim.y, voxel_dim / block_dim.z); KernelTestWriteSurface << <grid_dim, block_dim >> >(voxel_dim, surf_obj); HANDLE_KERNEL_ERROR_SYNC; } __global__ void KernelTestWriteSurface(uint32_t voxel_dim, hipSurfaceObject_t surf_obj) { uint32_t x = blockIdx.x*blockDim.x + threadIdx.x; uint32_t y = blockIdx.y*blockDim.y + threadIdx.y; uint32_t z = blockIdx.z*blockDim.z + threadIdx.z; if (x >= voxel_dim || y >= voxel_dim || z >= voxel_dim) return; uint32_t element; surf3Dread(&element, surf_obj, x * sizeof(uint32_t), y, z); uint8_t r = (element & 0xFF000000) >> 24U; r = 0xFF - r; uint8_t g = (element & 0x00FF0000) >> 16U; g = 0xFF - g; uint8_t b = (element & 0x0000FF00) >> 8U; b = 0xFF - b; uint8_t a = (element & 0x000000FF); element = (r & 0x000000FF) << 24U | (g & 0x000000FF) << 16U | (b & 0x000000FF) << 8U | (a & 0x000000FF); //element.x = 0xFF - element.x; //element.y = 0xFF - element.y; //element.z = 0xFF - element.z; //element.w = element.w; //surf3Dwrite(element, surf_obj, x * sizeof(uint32_t), y, z); }
4749195b5699036660900beab2262db79bdc1a86.cu
#pragma once #include "VoxelInvert.h" __global__ void KernelTestWriteSurface(uint32_t voxel_dim, cudaSurfaceObject_t surf_obj); void LaunchKernelVoxelInvert(uint32_t voxel_dim, cudaSurfaceObject_t surf_obj) { dim3 block_dim(8, 8, 8); dim3 grid_dim(voxel_dim / block_dim.x, voxel_dim / block_dim.y, voxel_dim / block_dim.z); KernelTestWriteSurface << <grid_dim, block_dim >> >(voxel_dim, surf_obj); HANDLE_KERNEL_ERROR_SYNC; } __global__ void KernelTestWriteSurface(uint32_t voxel_dim, cudaSurfaceObject_t surf_obj) { uint32_t x = blockIdx.x*blockDim.x + threadIdx.x; uint32_t y = blockIdx.y*blockDim.y + threadIdx.y; uint32_t z = blockIdx.z*blockDim.z + threadIdx.z; if (x >= voxel_dim || y >= voxel_dim || z >= voxel_dim) return; uint32_t element; surf3Dread(&element, surf_obj, x * sizeof(uint32_t), y, z); uint8_t r = (element & 0xFF000000) >> 24U; r = 0xFF - r; uint8_t g = (element & 0x00FF0000) >> 16U; g = 0xFF - g; uint8_t b = (element & 0x0000FF00) >> 8U; b = 0xFF - b; uint8_t a = (element & 0x000000FF); element = (r & 0x000000FF) << 24U | (g & 0x000000FF) << 16U | (b & 0x000000FF) << 8U | (a & 0x000000FF); //element.x = 0xFF - element.x; //element.y = 0xFF - element.y; //element.z = 0xFF - element.z; //element.w = element.w; //surf3Dwrite(element, surf_obj, x * sizeof(uint32_t), y, z); }
a67fb33dd4a10c38458fcfff011c96915ed5a289.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const PathSegment *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < n){ bools[index] = (idata[index].remainingBounces != 0); } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, PathSegment *odata, const PathSegment *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < n && bools[index]){ odata[indices[index]] = idata[index]; } } } }
a67fb33dd4a10c38458fcfff011c96915ed5a289.cu
#include "common.h" namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const PathSegment *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < n){ bools[index] = (idata[index].remainingBounces != 0); } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, PathSegment *odata, const PathSegment *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < n && bools[index]){ odata[indices[index]] = idata[index]; } } } }
8d567859fb25cde3ac768b9c0abfd8a62bb4ee22.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <cusp/detail/lu.h> void TestLUFactorAndSolve(void) { cusp::array2d<float, cusp::host_memory> A(4,4); A(0,0) = 0.83228434; A(0,1) = 0.41106598; A(0,2) = 0.72609841; A(0,3) = 0.80428486; A(1,0) = 0.00890590; A(1,1) = 0.29940800; A(1,2) = 0.60630740; A(1,3) = 0.33654542; A(2,0) = 0.22525064; A(2,1) = 0.93054253; A(2,2) = 0.37939225; A(2,3) = 0.16235888; A(3,0) = 0.83911960; A(3,1) = 0.21176293; A(3,2) = 0.21010691; A(3,3) = 0.52911885; cusp::array1d<float, cusp::host_memory> b(4); b[0] = 1.31699541; b[1] = 0.87768331; b[2] = 1.18994714; b[3] = 0.61914723; // std::cout << "\nA" << std::endl; // cusp::print_matrix(A); // std::cout << "b" << std::endl; // cusp::print_matrix(b); cusp::array1d<int, cusp::host_memory> pivot(4); cusp::array1d<float, cusp::host_memory> x(4); cusp::detail::lu_factor(A, pivot); cusp::detail::lu_solve(A, pivot, b, x); // std::cout << "LU" << std::endl; // cusp::print_matrix(A); // std::cout << "pivot" << std::endl; // cusp::print_matrix(pivot); // std::cout << "x" << std::endl; // cusp::print_matrix(x); cusp::array1d<float, cusp::host_memory> expected(4); expected[0] = 0.21713221; expected[1] = 0.80528582; expected[2] = 0.98416811; expected[3] = 0.11271028; ASSERT_EQUAL(::fabs(expected[0] - x[0]) < 1e-4, true); ASSERT_EQUAL(::fabs(expected[1] - x[1]) < 1e-4, true); ASSERT_EQUAL(::fabs(expected[2] - x[2]) < 1e-4, true); ASSERT_EQUAL(::fabs(expected[3] - x[3]) < 1e-4, true); } DECLARE_UNITTEST(TestLUFactorAndSolve); void TestLUSolver(void) { cusp::array2d<float, cusp::host_memory> A(3,3); A(0,0) = 2.0; A(0,1) = 0.0; A(0,2) = 0.0; A(1,0) = 0.0; A(1,1) = 4.0; A(1,2) = 0.0; A(2,0) = 0.0; A(2,1) = 0.0; A(2,2) = 8.0; cusp::array1d<float, cusp::host_memory> b(3, 1.0); cusp::array1d<float, cusp::host_memory> x(3, 0.0); cusp::detail::lu_solver<float, cusp::host_memory> lu(A); lu(b, x); ASSERT_EQUAL(x[0], 0.500); ASSERT_EQUAL(x[1], 0.250); ASSERT_EQUAL(x[2], 0.125); } DECLARE_UNITTEST(TestLUSolver);
8d567859fb25cde3ac768b9c0abfd8a62bb4ee22.cu
#include <unittest/unittest.h> #include <cusp/detail/lu.h> void TestLUFactorAndSolve(void) { cusp::array2d<float, cusp::host_memory> A(4,4); A(0,0) = 0.83228434; A(0,1) = 0.41106598; A(0,2) = 0.72609841; A(0,3) = 0.80428486; A(1,0) = 0.00890590; A(1,1) = 0.29940800; A(1,2) = 0.60630740; A(1,3) = 0.33654542; A(2,0) = 0.22525064; A(2,1) = 0.93054253; A(2,2) = 0.37939225; A(2,3) = 0.16235888; A(3,0) = 0.83911960; A(3,1) = 0.21176293; A(3,2) = 0.21010691; A(3,3) = 0.52911885; cusp::array1d<float, cusp::host_memory> b(4); b[0] = 1.31699541; b[1] = 0.87768331; b[2] = 1.18994714; b[3] = 0.61914723; // std::cout << "\nA" << std::endl; // cusp::print_matrix(A); // std::cout << "b" << std::endl; // cusp::print_matrix(b); cusp::array1d<int, cusp::host_memory> pivot(4); cusp::array1d<float, cusp::host_memory> x(4); cusp::detail::lu_factor(A, pivot); cusp::detail::lu_solve(A, pivot, b, x); // std::cout << "LU" << std::endl; // cusp::print_matrix(A); // std::cout << "pivot" << std::endl; // cusp::print_matrix(pivot); // std::cout << "x" << std::endl; // cusp::print_matrix(x); cusp::array1d<float, cusp::host_memory> expected(4); expected[0] = 0.21713221; expected[1] = 0.80528582; expected[2] = 0.98416811; expected[3] = 0.11271028; ASSERT_EQUAL(std::fabs(expected[0] - x[0]) < 1e-4, true); ASSERT_EQUAL(std::fabs(expected[1] - x[1]) < 1e-4, true); ASSERT_EQUAL(std::fabs(expected[2] - x[2]) < 1e-4, true); ASSERT_EQUAL(std::fabs(expected[3] - x[3]) < 1e-4, true); } DECLARE_UNITTEST(TestLUFactorAndSolve); void TestLUSolver(void) { cusp::array2d<float, cusp::host_memory> A(3,3); A(0,0) = 2.0; A(0,1) = 0.0; A(0,2) = 0.0; A(1,0) = 0.0; A(1,1) = 4.0; A(1,2) = 0.0; A(2,0) = 0.0; A(2,1) = 0.0; A(2,2) = 8.0; cusp::array1d<float, cusp::host_memory> b(3, 1.0); cusp::array1d<float, cusp::host_memory> x(3, 0.0); cusp::detail::lu_solver<float, cusp::host_memory> lu(A); lu(b, x); ASSERT_EQUAL(x[0], 0.500); ASSERT_EQUAL(x[1], 0.250); ASSERT_EQUAL(x[2], 0.125); } DECLARE_UNITTEST(TestLUSolver);
6c1d977d0fdbdacfc49a99b67662a73c6aeb0b36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sub_f32 (float* left_op, float* right_op, float* output, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { output[idx] = left_op[idx] - right_op[idx]; } }
6c1d977d0fdbdacfc49a99b67662a73c6aeb0b36.cu
#include "includes.h" __global__ void sub_f32 (float* left_op, float* right_op, float* output, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < len) { output[idx] = left_op[idx] - right_op[idx]; } }
8307be3f5c091d698c2af6b820de209bfdaf184b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <errno.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <cutil_inline.h> #include <unistd.h> #include "benchmark_common.h" #include <iomanip> #include <vector> #include "../src/gpgpu-sim/ConfigOptions.h" #include "../src/gpgpu-sim/App.h" #include "../src/common.h" thread_to_appID_struct* thread_to_appID; struct app_data { app_data(char* app_name, pthread_mutex_t* app_mutex, bool concurrent, hipEvent_t* done, std::vector<hipEvent_t>* done_events, size_t app_num) : app_name(app_name), app_mutex(app_mutex), concurrent(concurrent), done(done), done_events(done_events), appID(app_num) { cutilSafeCall(hipStreamCreate(&stream)); } hipStream_t stream; hipEvent_t* done; std::vector<hipEvent_t>* done_events; char* app_name; pthread_mutex_t* app_mutex; bool concurrent; size_t appID; }; #define N 32 #define ITER 10 #define BLOCKS 30 #define THREADS 2 __global__ void add(int *a, int *b) { /* int bi = blockIdx.x; */ int bi = 0; int ti = threadIdx.x; int eti = bi * THREADS + ti; int sum = 0; for(int i = 0; i < ITER; i++) { int index = 4096 * ( ITER * eti + i); sum += a[index]; } int index = 4096 * ( ITER * eti); a[index] = sum; } int main() { int n_apps = 1; std::vector<app_data> apps; std::vector<hipEvent_t> done_events(n_apps, NULL); pthread_mutex_t app_mutex; pthread_mutex_init(&app_mutex, NULL); thread_to_appID = (thread_to_appID_struct *)malloc(sizeof(thread_to_appID_struct)); thread_to_appID->init(); ConfigOptions::n_apps = n_apps; cutilSafeCall(hipEventCreate(&done_events[0])); apps.push_back(app_data("simple", &app_mutex, false, &done_events[0], &done_events, 0)); struct app_data *app = &apps[0]; printf("---------------------------%d stream\n\n\n", app->stream); printf("Launch code in main.cu:launching a new benchmark, appID = %d, already registered? = %d\n", app->appID, App::is_registered(app->appID)); if(App::is_registered(app->appID)) thread_to_appID->add((void*)pthread_self(), App::get_app_id(app->appID)); else thread_to_appID->add((void*)pthread_self(), App::register_app(app->appID)); /* 4 gb */ int size = 4096 * ITER * BLOCKS * THREADS; /* int size = 32 * 4096; */ int *ha, *hb; ha = (int*) malloc(sizeof(int) * size); hb = (int*) malloc(sizeof(int) * size); if(ha == NULL || hb == NULL) { printf("memory allocation failed\n"); exit(0); } int *da, *db; hipMalloc((void **)&da, size*sizeof(int)); hipMalloc((void **)&db, size*sizeof(int)); /* for (int i = 0; i<N; i+=32) { */ /* ha[i] = i; */ /* } */ hipMemcpyAsync(da, ha, size*sizeof(int), hipMemcpyHostToDevice, app->stream); hipLaunchKernelGGL(( add), dim3(BLOCKS), dim3(THREADS), 0, app->stream, da, db); /* cutilSafeCall(hipDeviceSynchronize()); */ cutilSafeCall(hipStreamSynchronize(app->stream)); /* add<<<1, N, 0, app->stream>>>(da, db); */ /* cutilSafeCall(hipStreamSynchronize(app->stream)); */ hipMemcpyAsync(hb, db, size*sizeof(int), hipMemcpyDeviceToHost, app->stream); cutilSafeCall(hipEventRecord(*app->done, app->stream)); /* cutilSafeCall(hipStreamSynchronize(app->stream)); */ /* /1* cutilSafeCall(hipDeviceSynchronize()); *1/ */ /* hipFree(da); */ /* hipFree(db); */ bool some_running; do { some_running = false; for (std::vector<hipEvent_t>::iterator e = app->done_events->begin(); e != app->done_events->end(); e++) { if (hipEventQuery(*e) == hipErrorNotReady) { some_running = true; break; } } sleep(1); }while(some_running); sleep(1); cutilSafeCall(hipStreamDestroy(apps[0].stream)); /* for (int i = 0; i<N; ++i) { */ /* printf("%d\n", hb[i]); */ /* } */ return 0; }
8307be3f5c091d698c2af6b820de209bfdaf184b.cu
#include <stdio.h> #include <errno.h> #include <pthread.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <cutil_inline.h> #include <unistd.h> #include "benchmark_common.h" #include <iomanip> #include <vector> #include "../src/gpgpu-sim/ConfigOptions.h" #include "../src/gpgpu-sim/App.h" #include "../src/common.h" thread_to_appID_struct* thread_to_appID; struct app_data { app_data(char* app_name, pthread_mutex_t* app_mutex, bool concurrent, cudaEvent_t* done, std::vector<cudaEvent_t>* done_events, size_t app_num) : app_name(app_name), app_mutex(app_mutex), concurrent(concurrent), done(done), done_events(done_events), appID(app_num) { cutilSafeCall(cudaStreamCreate(&stream)); } cudaStream_t stream; cudaEvent_t* done; std::vector<cudaEvent_t>* done_events; char* app_name; pthread_mutex_t* app_mutex; bool concurrent; size_t appID; }; #define N 32 #define ITER 10 #define BLOCKS 30 #define THREADS 2 __global__ void add(int *a, int *b) { /* int bi = blockIdx.x; */ int bi = 0; int ti = threadIdx.x; int eti = bi * THREADS + ti; int sum = 0; for(int i = 0; i < ITER; i++) { int index = 4096 * ( ITER * eti + i); sum += a[index]; } int index = 4096 * ( ITER * eti); a[index] = sum; } int main() { int n_apps = 1; std::vector<app_data> apps; std::vector<cudaEvent_t> done_events(n_apps, NULL); pthread_mutex_t app_mutex; pthread_mutex_init(&app_mutex, NULL); thread_to_appID = (thread_to_appID_struct *)malloc(sizeof(thread_to_appID_struct)); thread_to_appID->init(); ConfigOptions::n_apps = n_apps; cutilSafeCall(cudaEventCreate(&done_events[0])); apps.push_back(app_data("simple", &app_mutex, false, &done_events[0], &done_events, 0)); struct app_data *app = &apps[0]; printf("---------------------------%d stream\n\n\n", app->stream); printf("Launch code in main.cu:launching a new benchmark, appID = %d, already registered? = %d\n", app->appID, App::is_registered(app->appID)); if(App::is_registered(app->appID)) thread_to_appID->add((void*)pthread_self(), App::get_app_id(app->appID)); else thread_to_appID->add((void*)pthread_self(), App::register_app(app->appID)); /* 4 gb */ int size = 4096 * ITER * BLOCKS * THREADS; /* int size = 32 * 4096; */ int *ha, *hb; ha = (int*) malloc(sizeof(int) * size); hb = (int*) malloc(sizeof(int) * size); if(ha == NULL || hb == NULL) { printf("memory allocation failed\n"); exit(0); } int *da, *db; cudaMalloc((void **)&da, size*sizeof(int)); cudaMalloc((void **)&db, size*sizeof(int)); /* for (int i = 0; i<N; i+=32) { */ /* ha[i] = i; */ /* } */ cudaMemcpyAsync(da, ha, size*sizeof(int), cudaMemcpyHostToDevice, app->stream); add<<<BLOCKS, THREADS, 0, app->stream>>>(da, db); /* cutilSafeCall(cudaThreadSynchronize()); */ cutilSafeCall(cudaStreamSynchronize(app->stream)); /* add<<<1, N, 0, app->stream>>>(da, db); */ /* cutilSafeCall(cudaStreamSynchronize(app->stream)); */ cudaMemcpyAsync(hb, db, size*sizeof(int), cudaMemcpyDeviceToHost, app->stream); cutilSafeCall(cudaEventRecord(*app->done, app->stream)); /* cutilSafeCall(cudaStreamSynchronize(app->stream)); */ /* /1* cutilSafeCall(cudaThreadSynchronize()); *1/ */ /* cudaFree(da); */ /* cudaFree(db); */ bool some_running; do { some_running = false; for (std::vector<cudaEvent_t>::iterator e = app->done_events->begin(); e != app->done_events->end(); e++) { if (cudaEventQuery(*e) == cudaErrorNotReady) { some_running = true; break; } } sleep(1); }while(some_running); sleep(1); cutilSafeCall(cudaStreamDestroy(apps[0].stream)); /* for (int i = 0; i<N; ++i) { */ /* printf("%d\n", hb[i]); */ /* } */ return 0; }
ee5ee1b24da4d82e5905d149be2b1e210911bb08.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/numeric.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/math.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct SquareImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = x * x; } }; } // namespace void CudaDevice::Square(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(SquareImpl<T>{}, x, out); }); } namespace { template <typename T> struct SqrtImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sqrt(x); } }; } // namespace void CudaDevice::Sqrt(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); CudaSetDeviceScope scope{index()}; VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(SqrtImpl<T>{}, x_cast, out); }); } namespace { template <typename T> struct IsNanImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, bool& out) { out = cuda::IsNan(x); } }; } // namespace void CudaDevice::IsNan(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; VisitDtype(x.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, bool>(IsNanImpl<T>{}, x, out); }); } namespace { template <typename T> struct IsInfImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, bool& out) { out = cuda::IsInf(x); } }; } // namespace void CudaDevice::IsInf(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; VisitDtype(x.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, bool>(IsInfImpl<T>{}, x, out); }); } template <typename T> struct CeilImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Ceil(x); } }; class CudaCeilOp : public CeilOp { public: void Call(const Array& x, const Array& out) override { Device& device = x.device(); device.CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{device.index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CeilImpl<T>{}, x_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(CeilOp, CudaCeilOp); } // namespace cuda } // namespace chainerx
ee5ee1b24da4d82e5905d149be2b1e210911bb08.cu
#include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/numeric.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/math.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct SquareImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = x * x; } }; } // namespace void CudaDevice::Square(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(SquareImpl<T>{}, x, out); }); } namespace { template <typename T> struct SqrtImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sqrt(x); } }; } // namespace void CudaDevice::Sqrt(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); CudaSetDeviceScope scope{index()}; VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(SqrtImpl<T>{}, x_cast, out); }); } namespace { template <typename T> struct IsNanImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, bool& out) { out = cuda::IsNan(x); } }; } // namespace void CudaDevice::IsNan(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; VisitDtype(x.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, bool>(IsNanImpl<T>{}, x, out); }); } namespace { template <typename T> struct IsInfImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, bool& out) { out = cuda::IsInf(x); } }; } // namespace void CudaDevice::IsInf(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; VisitDtype(x.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, bool>(IsInfImpl<T>{}, x, out); }); } template <typename T> struct CeilImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Ceil(x); } }; class CudaCeilOp : public CeilOp { public: void Call(const Array& x, const Array& out) override { Device& device = x.device(); device.CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{device.index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CeilImpl<T>{}, x_cast, out); }); } }; CHAINERX_REGISTER_OP_CUDA(CeilOp, CudaCeilOp); } // namespace cuda } // namespace chainerx
096ca69a5ff365a3b37f36f4d271c397da0f9d93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void fupdate(float *f, float *z, float *g, float invlambda, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; float DIVZ; if (px<nx && py<ny) { // compute the divergence DIVZ = 0; if (!(px == (nx - 1))) DIVZ += z[2 * (idx)+0]; if (!(py == (ny - 1))) DIVZ += z[2 * (idx)+1]; if (!(px == 0)) DIVZ -= z[2 * (idx - 1) + 0]; if (!(py == 0)) DIVZ -= z[2 * (idx - nx) + 1]; // update f f[idx] = DIVZ - g[idx] * invlambda; } }
096ca69a5ff365a3b37f36f4d271c397da0f9d93.cu
#include "includes.h" __global__ void fupdate(float *f, float *z, float *g, float invlambda, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; float DIVZ; if (px<nx && py<ny) { // compute the divergence DIVZ = 0; if (!(px == (nx - 1))) DIVZ += z[2 * (idx)+0]; if (!(py == (ny - 1))) DIVZ += z[2 * (idx)+1]; if (!(px == 0)) DIVZ -= z[2 * (idx - 1) + 0]; if (!(py == 0)) DIVZ -= z[2 * (idx - nx) + 1]; // update f f[idx] = DIVZ - g[idx] * invlambda; } }
318752c8f14e1cffa53e5136ad81282d6e4186a6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <rocblas.h> #include <hip/hip_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { hipblasHandle_t cublasH = NULL; hipStream_t stream = NULL; const int m = 2; const int n = 2; const int k = 2; const int lda = 2; const int ldb = 2; const int ldc = 2; const int batch_count = 2; const long long int strideA = m * k; const long long int strideB = k * n; const long long int strideC = m * n; /* * A = | 1.0 | 2.0 | 5.0 | 6.0 | * | 3.0 | 4.0 | 7.0 | 8.0 | * * B = | 5.0 | 6.0 | 9.0 | 10.0 | * | 7.0 | 8.0 | 11.0 | 12.0 | */ const std::vector<data_type> A = {1.0, 3.0, 2.0, 4.0, 5.0, 7.0, 6.0, 8.0}; const std::vector<data_type> B = {5.0, 7.0, 6.0, 8.0, 9.0, 11.0, 10.0, 12.0}; std::vector<data_type> C(m * n * batch_count); const data_type alpha = 1.0; const data_type beta = 0.0; data_type *d_A = nullptr; data_type *d_B = nullptr; data_type *d_C = nullptr; hipblasOperation_t transa = HIPBLAS_OP_N; hipblasOperation_t transb = HIPBLAS_OP_N; printf("A[0]\n"); print_matrix(m, k, A.data(), lda); printf("=====\n"); printf("A[1]\n"); print_matrix(m, k, A.data() + (m * k), lda); printf("=====\n"); printf("B[0]\n"); print_matrix(k, n, B.data(), ldb); printf("=====\n"); printf("B[1]\n"); print_matrix(k, n, B.data() + (k * n), ldb); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(hipblasCreate(&cublasH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUBLAS_CHECK(hipblasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size())); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_C), sizeof(data_type) * C.size())); CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice, stream)); CUDA_CHECK(hipMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), hipMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(hipblasGemmStridedBatchedEx( cublasH, transa, transb, m, n, k, &alpha, d_A, traits<data_type>::cuda_data_type, lda, strideA, d_B, traits<data_type>::cuda_data_type, ldb, strideB, &beta, d_C, traits<data_type>::cuda_data_type, ldc, strideC, batch_count, CUBLAS_COMPUTE_64F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); /* step 4: copy data to host */ CUDA_CHECK(hipMemcpyAsync(C.data(), d_C, sizeof(data_type) * C.size(), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); /* * C = | 19.0 | 22.0 | 111.0 | 122.0 | * | 43.0 | 50.0 | 151.0 | 166.0 | */ printf("C[0]\n"); print_matrix(m, n, C.data(), ldc); printf("=====\n"); printf("C[1]\n"); print_matrix(m, n, C.data() + (m * n), ldc); printf("=====\n"); /* free resources */ CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_B)); CUDA_CHECK(hipFree(d_C)); CUBLAS_CHECK(hipblasDestroy(cublasH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
318752c8f14e1cffa53e5136ad81282d6e4186a6.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cublas_v2.h> #include <cuda_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { cublasHandle_t cublasH = NULL; cudaStream_t stream = NULL; const int m = 2; const int n = 2; const int k = 2; const int lda = 2; const int ldb = 2; const int ldc = 2; const int batch_count = 2; const long long int strideA = m * k; const long long int strideB = k * n; const long long int strideC = m * n; /* * A = | 1.0 | 2.0 | 5.0 | 6.0 | * | 3.0 | 4.0 | 7.0 | 8.0 | * * B = | 5.0 | 6.0 | 9.0 | 10.0 | * | 7.0 | 8.0 | 11.0 | 12.0 | */ const std::vector<data_type> A = {1.0, 3.0, 2.0, 4.0, 5.0, 7.0, 6.0, 8.0}; const std::vector<data_type> B = {5.0, 7.0, 6.0, 8.0, 9.0, 11.0, 10.0, 12.0}; std::vector<data_type> C(m * n * batch_count); const data_type alpha = 1.0; const data_type beta = 0.0; data_type *d_A = nullptr; data_type *d_B = nullptr; data_type *d_C = nullptr; cublasOperation_t transa = CUBLAS_OP_N; cublasOperation_t transb = CUBLAS_OP_N; printf("A[0]\n"); print_matrix(m, k, A.data(), lda); printf("=====\n"); printf("A[1]\n"); print_matrix(m, k, A.data() + (m * k), lda); printf("=====\n"); printf("B[0]\n"); print_matrix(k, n, B.data(), ldb); printf("=====\n"); printf("B[1]\n"); print_matrix(k, n, B.data() + (k * n), ldb); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(cublasCreate(&cublasH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUBLAS_CHECK(cublasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_B), sizeof(data_type) * B.size())); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_C), sizeof(data_type) * C.size())); CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice, stream)); CUDA_CHECK(cudaMemcpyAsync(d_B, B.data(), sizeof(data_type) * B.size(), cudaMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(cublasGemmStridedBatchedEx( cublasH, transa, transb, m, n, k, &alpha, d_A, traits<data_type>::cuda_data_type, lda, strideA, d_B, traits<data_type>::cuda_data_type, ldb, strideB, &beta, d_C, traits<data_type>::cuda_data_type, ldc, strideC, batch_count, CUBLAS_COMPUTE_64F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); /* step 4: copy data to host */ CUDA_CHECK(cudaMemcpyAsync(C.data(), d_C, sizeof(data_type) * C.size(), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); /* * C = | 19.0 | 22.0 | 111.0 | 122.0 | * | 43.0 | 50.0 | 151.0 | 166.0 | */ printf("C[0]\n"); print_matrix(m, n, C.data(), ldc); printf("=====\n"); printf("C[1]\n"); print_matrix(m, n, C.data() + (m * n), ldc); printf("=====\n"); /* free resources */ CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_B)); CUDA_CHECK(cudaFree(d_C)); CUBLAS_CHECK(cublasDestroy(cublasH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
c0ff544ddaa694cd29d3c1742d0c5031a2c7ec10.hip
// !!! This is a file automatically generated by hipify!!! #include "SAX.h" #include <hip/hip_runtime.h> //#define THREADS 250 static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) uchar* d_collisionTable; uchar* d_cols; CudaWord* d_words; void startEvent(hipEvent_t &start, hipEvent_t &stop){ HANDLE_ERROR( hipEventCreate( &start ) ); HANDLE_ERROR( hipEventCreate( &stop ) ); HANDLE_ERROR( hipEventRecord( start, 0 ) ); } float endEvent(hipEvent_t &start, hipEvent_t &stop){ float elapsedTime; HANDLE_ERROR( hipEventRecord( stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( stop ) ); HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) ); HANDLE_ERROR( hipEventDestroy( start ) ); HANDLE_ERROR( hipEventDestroy( stop ) ); return elapsedTime; } void devInit(){ int size0 = NUM_WORDS * NUM_WORDS * sizeof(uchar); int size1 = MASK_SIZE * ITERATIONS * sizeof(uchar); int size2 = NUM_WORDS * sizeof(CudaWord); clock_t start, end; start = clock(); hipMalloc( &d_collisionTable, size0 ); hipMalloc( &d_cols, size1 ); hipMalloc( &d_words, size2 ); end = clock(); printf("\tExec time cudaMallocs: %d ms\n", end - start ); start = clock(); hipMemset( d_collisionTable, 0, size0 ); end = clock(); printf("\tExec time hipMemset: %d ms\n", end - start ); start = clock(); hipMemcpy( d_words, h_words, size2, hipMemcpyHostToDevice ); end = clock(); printf("\tExec time hipMemcpy: %d ms\n", end - start ); } void devFree() { hipFree( d_collisionTable ); hipFree( d_cols ); hipFree( d_words ); } __global__ void cudaRP_1D_OneIteration(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; if( index_i >= NUM_WORDS ) return; uint index_j = blockIdx.y * blockDim.y; __shared__ CudaWord _words [ THREADS ]; __shared__ CudaWord _maskedWords [ THREADS ]; _maskedWords [ threadIdx.x ] = words [ index_i ]; _words [ threadIdx.x ] = words [ index_j + threadIdx.x ]; __syncthreads(); bool flag; for( uint i = 0; i < blockDim.x; i++ ) { if ( index_i != index_j && index_j < NUM_WORDS ) { flag = true; for (uchar j = 0; j < MASK_SIZE; j++) { if ( _words[ threadIdx.x ].values[ cols[j] ] != _maskedWords[ i ].values[ cols[j] ] ) { flag = false; break; } } if ( flag ) { collisionTable[index_j * NUM_WORDS + index_i] += 1; } } index_j++; } } __global__ void cudaRP_1D_AllIterations(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; if( index_i >= NUM_WORDS ) return; uint index_j = blockIdx.y * blockDim.y; __shared__ CudaWord _words [ THREADS ]; __shared__ CudaWord _maskedWords [ THREADS ]; _maskedWords [ threadIdx.x ] = words [ index_i ]; _words [ threadIdx.x ] = words [ index_j + threadIdx.x ]; __syncthreads(); bool flag; uchar count; for( uint i = 0; i < blockDim.x; i++ ) { if ( index_i != index_j && index_j < NUM_WORDS ) { count = 0; for( int iter = 0; iter < ITERATIONS; iter++ ) { flag = true; for (uchar j = iter * MASK_SIZE; j < iter * MASK_SIZE + MASK_SIZE; j++) { if ( _words[ threadIdx.x ].values[ cols[j] ] != _maskedWords[ i ].values[ cols[j] ] ) { flag = false; break; } } if ( flag ) { count++; } } collisionTable[index_j * NUM_WORDS + index_i] = count; } index_j++; } } __global__ void cudaRP_2D_OneIteration(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; uint index_j = blockIdx.y * blockDim.y + threadIdx.y; if( index_i == index_j || index_i >= NUM_WORDS || index_j >= NUM_WORDS ) return; uchar count = 0; uint index = index_j * NUM_WORDS + index_i; // CudaWord word_i = words[ index_i ]; // CudaWord word_j = words[ index_j ]; bool flag = true; for (uchar c = 0; c < MASK_SIZE; c++) { // if ( word_i.values[ cols[c] ] != word_j.values[ cols[c] ] ) { if ( words[ index_j ].values[ cols[c] ] != words[ index_i ].values[ cols[c] ] ) { flag = false; break; } } if ( flag ) { collisionTable[index] += 1; } } __global__ void cudaRP_2D_AllIterations(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; uint index_j = blockIdx.y * blockDim.y + threadIdx.y; if( index_i == index_j || index_i >= NUM_WORDS || index_j >= NUM_WORDS ) return; uchar count = 0; uint index = index_j * NUM_WORDS + index_i; // CudaWord word_i = words[ index_i ]; // CudaWord word_j = words[ index_j ]; for( int iter = 0; iter < ITERATIONS; iter++ ){ bool flag = true; for (uchar c = iter * MASK_SIZE; c < iter * MASK_SIZE + MASK_SIZE; c++) { // if ( word_i.values[ cols[c] ] != word_j.values[ cols[c] ] ) { if ( words[ index_j ].values[ cols[c] ] != words[ index_i ].values[ cols[c] ] ) { flag = false; break; } } if ( flag ) { count++; } } collisionTable[index] = count; } void devRandomProjection(){ clock_t start, end; start = clock(); for (int it = 0; it < ITERATIONS; it++) { std::set<int> cols_set; for (int i = 0; i < MASK_SIZE; i++) { int tentativeColumn = rand() % WORD_SIZE; if ( cols_set.find(tentativeColumn) == cols_set.end() ) cols_set.insert(tentativeColumn); else i--; } std::copy( cols_set.begin(), cols_set.end (), &h_cols[ MASK_SIZE * it ] ); // printf("Mask columns (%d): [", it); // for (int i = 0; i < cols_set.size(); i++) { // printf("%d, ", h_cols[it * MASK_SIZE + i]); // } // printf("]\nWORD LIST SIZE: %d \n", cols_set.size()); hipMemcpy(d_cols, h_cols, MASK_SIZE * ITERATIONS, hipMemcpyHostToDevice); } end = clock(); printf("\tExec time cols generation: %d ms\n", (end - start)); int b = (NUM_WORDS + THREADS - 1) / THREADS; dim3 blocks ( b , b ); dim3 threads ( THREADS , THREADS ); hipEvent_t e_start, e_stop; startEvent(e_start, e_stop); for (int i = 0; i < ITERATIONS; i++ ) { hipLaunchKernelGGL(( cudaRP_1D_OneIteration), dim3(blocks) , dim3(THREADS) , 0, 0, d_collisionTable, d_words, &d_cols[i*MASK_SIZE] ); // cudaRP_2D_OneIteration<<<blocks , threads >>>( d_collisionTable, d_words, &d_cols[i*MASK_SIZE] ); // cudaRP_2D_AllIterations<<<blocks , threads >>>( d_collisionTable, d_words, d_cols ); // cudaRP_1D_AllIterations<<<blocks , THREADS >>>( d_collisionTable, d_words, d_cols ); } printf("\tExec time #%d cudaRP_1D_OneIteration: %lf ms\n", ITERATIONS, endEvent(e_start, e_stop)); // printf("\tEXEC TIME cudaRP_2D_AllIterations: %3.1f ms\n", endEvent(e_start, e_stop)); printf("\tConfiguration: <<< (%d,%d) , (%d,%d) >>>\n", b, b, THREADS, 1 ); startEvent(e_start, e_stop); hipMemcpy(h_collisionTable, d_collisionTable, NUM_WORDS * NUM_WORDS * sizeof(uchar), hipMemcpyDeviceToHost); printf("\tExec time hipMemcpy collision_table: %3.1f ms\n", endEvent(e_start, e_stop)); } void show1MotifResult() { int bestMotifSoFar = 0; std::vector<int> bestMotifLocationSoFar; for (int i = 0; i < NUM_WORDS; i++) { int counter = 0; std::multimap<uchar, int, std::greater<uchar> > pointers; for(int j = 0; j < NUM_WORDS; j++) { int index = i * NUM_WORDS + j; uchar count = h_collisionTable[index]; counter += count; pointers.insert( std::make_pair(count, j) ); } if( counter > bestMotifSoFar ) { bestMotifSoFar = counter; bestMotifLocationSoFar.clear(); bestMotifLocationSoFar.push_back(i); std::multimap<uchar, int, std::greater<uchar> >::iterator iter = pointers.begin(); for ( ; iter != pointers.end(); iter++) { if (iter->first > 0) bestMotifLocationSoFar.push_back(iter->second); } } } int topK = 25; printf("SIZE: %d\n", NUM_WORDS * NUM_WORDS); printf("1-MOTIF:\n "); for (int t = 0; t < min(topK, (int)bestMotifLocationSoFar.size()); t++) { printf("%d, ", bestMotifLocationSoFar[t]); } printf("\n"); } void testCudaRandomProjection(){ clock_t start = clock(); std::string train = DATASET; SAX::loadData(train); clock_t end = clock(); printf("Exec time loadData(): %d ms\n", (end - start)); start = clock(); devInit(); end = clock(); printf("Exec time devInit(): %d ms\n", (end - start)); start = clock(); devRandomProjection(); end = clock(); printf("Exec time devRandomProjection(): %d ms\n", (end - start)); devFree(); start = clock(); show1MotifResult(); end = clock(); printf("Exec time show1MotifResult(): %d ms\n", (end - start)); } int main(){ printf("\nWORDS = %d\n", NUM_WORDS); clock_t begin = clock(); testCudaRandomProjection(); clock_t end = clock(); printf("Total exec time was: %d ms\n\n", end - begin); return 0; }
c0ff544ddaa694cd29d3c1742d0c5031a2c7ec10.cu
#include "SAX.h" #include <cuda.h> //#define THREADS 250 static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) uchar* d_collisionTable; uchar* d_cols; CudaWord* d_words; void startEvent(cudaEvent_t &start, cudaEvent_t &stop){ HANDLE_ERROR( cudaEventCreate( &start ) ); HANDLE_ERROR( cudaEventCreate( &stop ) ); HANDLE_ERROR( cudaEventRecord( start, 0 ) ); } float endEvent(cudaEvent_t &start, cudaEvent_t &stop){ float elapsedTime; HANDLE_ERROR( cudaEventRecord( stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( stop ) ); HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) ); HANDLE_ERROR( cudaEventDestroy( start ) ); HANDLE_ERROR( cudaEventDestroy( stop ) ); return elapsedTime; } void devInit(){ int size0 = NUM_WORDS * NUM_WORDS * sizeof(uchar); int size1 = MASK_SIZE * ITERATIONS * sizeof(uchar); int size2 = NUM_WORDS * sizeof(CudaWord); clock_t start, end; start = clock(); cudaMalloc( &d_collisionTable, size0 ); cudaMalloc( &d_cols, size1 ); cudaMalloc( &d_words, size2 ); end = clock(); printf("\tExec time cudaMallocs: %d ms\n", end - start ); start = clock(); cudaMemset( d_collisionTable, 0, size0 ); end = clock(); printf("\tExec time cudaMemset: %d ms\n", end - start ); start = clock(); cudaMemcpy( d_words, h_words, size2, cudaMemcpyHostToDevice ); end = clock(); printf("\tExec time cudaMemcpy: %d ms\n", end - start ); } void devFree() { cudaFree( d_collisionTable ); cudaFree( d_cols ); cudaFree( d_words ); } __global__ void cudaRP_1D_OneIteration(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; if( index_i >= NUM_WORDS ) return; uint index_j = blockIdx.y * blockDim.y; __shared__ CudaWord _words [ THREADS ]; __shared__ CudaWord _maskedWords [ THREADS ]; _maskedWords [ threadIdx.x ] = words [ index_i ]; _words [ threadIdx.x ] = words [ index_j + threadIdx.x ]; __syncthreads(); bool flag; for( uint i = 0; i < blockDim.x; i++ ) { if ( index_i != index_j && index_j < NUM_WORDS ) { flag = true; for (uchar j = 0; j < MASK_SIZE; j++) { if ( _words[ threadIdx.x ].values[ cols[j] ] != _maskedWords[ i ].values[ cols[j] ] ) { flag = false; break; } } if ( flag ) { collisionTable[index_j * NUM_WORDS + index_i] += 1; } } index_j++; } } __global__ void cudaRP_1D_AllIterations(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; if( index_i >= NUM_WORDS ) return; uint index_j = blockIdx.y * blockDim.y; __shared__ CudaWord _words [ THREADS ]; __shared__ CudaWord _maskedWords [ THREADS ]; _maskedWords [ threadIdx.x ] = words [ index_i ]; _words [ threadIdx.x ] = words [ index_j + threadIdx.x ]; __syncthreads(); bool flag; uchar count; for( uint i = 0; i < blockDim.x; i++ ) { if ( index_i != index_j && index_j < NUM_WORDS ) { count = 0; for( int iter = 0; iter < ITERATIONS; iter++ ) { flag = true; for (uchar j = iter * MASK_SIZE; j < iter * MASK_SIZE + MASK_SIZE; j++) { if ( _words[ threadIdx.x ].values[ cols[j] ] != _maskedWords[ i ].values[ cols[j] ] ) { flag = false; break; } } if ( flag ) { count++; } } collisionTable[index_j * NUM_WORDS + index_i] = count; } index_j++; } } __global__ void cudaRP_2D_OneIteration(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; uint index_j = blockIdx.y * blockDim.y + threadIdx.y; if( index_i == index_j || index_i >= NUM_WORDS || index_j >= NUM_WORDS ) return; uchar count = 0; uint index = index_j * NUM_WORDS + index_i; // CudaWord word_i = words[ index_i ]; // CudaWord word_j = words[ index_j ]; bool flag = true; for (uchar c = 0; c < MASK_SIZE; c++) { // if ( word_i.values[ cols[c] ] != word_j.values[ cols[c] ] ) { if ( words[ index_j ].values[ cols[c] ] != words[ index_i ].values[ cols[c] ] ) { flag = false; break; } } if ( flag ) { collisionTable[index] += 1; } } __global__ void cudaRP_2D_AllIterations(uchar* collisionTable, CudaWord* words, uchar* cols){ uint index_i = blockIdx.x * blockDim.x + threadIdx.x; uint index_j = blockIdx.y * blockDim.y + threadIdx.y; if( index_i == index_j || index_i >= NUM_WORDS || index_j >= NUM_WORDS ) return; uchar count = 0; uint index = index_j * NUM_WORDS + index_i; // CudaWord word_i = words[ index_i ]; // CudaWord word_j = words[ index_j ]; for( int iter = 0; iter < ITERATIONS; iter++ ){ bool flag = true; for (uchar c = iter * MASK_SIZE; c < iter * MASK_SIZE + MASK_SIZE; c++) { // if ( word_i.values[ cols[c] ] != word_j.values[ cols[c] ] ) { if ( words[ index_j ].values[ cols[c] ] != words[ index_i ].values[ cols[c] ] ) { flag = false; break; } } if ( flag ) { count++; } } collisionTable[index] = count; } void devRandomProjection(){ clock_t start, end; start = clock(); for (int it = 0; it < ITERATIONS; it++) { std::set<int> cols_set; for (int i = 0; i < MASK_SIZE; i++) { int tentativeColumn = rand() % WORD_SIZE; if ( cols_set.find(tentativeColumn) == cols_set.end() ) cols_set.insert(tentativeColumn); else i--; } std::copy( cols_set.begin(), cols_set.end (), &h_cols[ MASK_SIZE * it ] ); // printf("Mask columns (%d): [", it); // for (int i = 0; i < cols_set.size(); i++) { // printf("%d, ", h_cols[it * MASK_SIZE + i]); // } // printf("]\nWORD LIST SIZE: %d \n", cols_set.size()); cudaMemcpy(d_cols, h_cols, MASK_SIZE * ITERATIONS, cudaMemcpyHostToDevice); } end = clock(); printf("\tExec time cols generation: %d ms\n", (end - start)); int b = (NUM_WORDS + THREADS - 1) / THREADS; dim3 blocks ( b , b ); dim3 threads ( THREADS , THREADS ); cudaEvent_t e_start, e_stop; startEvent(e_start, e_stop); for (int i = 0; i < ITERATIONS; i++ ) { cudaRP_1D_OneIteration<<<blocks , THREADS >>>( d_collisionTable, d_words, &d_cols[i*MASK_SIZE] ); // cudaRP_2D_OneIteration<<<blocks , threads >>>( d_collisionTable, d_words, &d_cols[i*MASK_SIZE] ); // cudaRP_2D_AllIterations<<<blocks , threads >>>( d_collisionTable, d_words, d_cols ); // cudaRP_1D_AllIterations<<<blocks , THREADS >>>( d_collisionTable, d_words, d_cols ); } printf("\tExec time #%d cudaRP_1D_OneIteration: %lf ms\n", ITERATIONS, endEvent(e_start, e_stop)); // printf("\tEXEC TIME cudaRP_2D_AllIterations: %3.1f ms\n", endEvent(e_start, e_stop)); printf("\tConfiguration: <<< (%d,%d) , (%d,%d) >>>\n", b, b, THREADS, 1 ); startEvent(e_start, e_stop); cudaMemcpy(h_collisionTable, d_collisionTable, NUM_WORDS * NUM_WORDS * sizeof(uchar), cudaMemcpyDeviceToHost); printf("\tExec time cudaMemcpy collision_table: %3.1f ms\n", endEvent(e_start, e_stop)); } void show1MotifResult() { int bestMotifSoFar = 0; std::vector<int> bestMotifLocationSoFar; for (int i = 0; i < NUM_WORDS; i++) { int counter = 0; std::multimap<uchar, int, std::greater<uchar> > pointers; for(int j = 0; j < NUM_WORDS; j++) { int index = i * NUM_WORDS + j; uchar count = h_collisionTable[index]; counter += count; pointers.insert( std::make_pair(count, j) ); } if( counter > bestMotifSoFar ) { bestMotifSoFar = counter; bestMotifLocationSoFar.clear(); bestMotifLocationSoFar.push_back(i); std::multimap<uchar, int, std::greater<uchar> >::iterator iter = pointers.begin(); for ( ; iter != pointers.end(); iter++) { if (iter->first > 0) bestMotifLocationSoFar.push_back(iter->second); } } } int topK = 25; printf("SIZE: %d\n", NUM_WORDS * NUM_WORDS); printf("1-MOTIF:\n "); for (int t = 0; t < min(topK, (int)bestMotifLocationSoFar.size()); t++) { printf("%d, ", bestMotifLocationSoFar[t]); } printf("\n"); } void testCudaRandomProjection(){ clock_t start = clock(); std::string train = DATASET; SAX::loadData(train); clock_t end = clock(); printf("Exec time loadData(): %d ms\n", (end - start)); start = clock(); devInit(); end = clock(); printf("Exec time devInit(): %d ms\n", (end - start)); start = clock(); devRandomProjection(); end = clock(); printf("Exec time devRandomProjection(): %d ms\n", (end - start)); devFree(); start = clock(); show1MotifResult(); end = clock(); printf("Exec time show1MotifResult(): %d ms\n", (end - start)); } int main(){ printf("\nWORDS = %d\n", NUM_WORDS); clock_t begin = clock(); testCudaRandomProjection(); clock_t end = clock(); printf("Total exec time was: %d ms\n\n", end - begin); return 0; }
69fda69288b20b8373290fea1810d9578f9b6df4.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------------------------------- // Copyrighted by Marko Rakita. // Author: Marko Rakita // File contains: Mocked neural network softmax layer, used in tests. // Created: 02/20/2016. // ---------------------------------------------------------------------------------------------------- #include "include/mocksoftmaxlayer.cuh" MockSoftMaxLayer::MockSoftMaxLayer(uint inputDataSize, uint inputDataCount) { m_layerType = LayerType::SoftMax; m_indexInTier = 0; m_tierSize = 1; m_inputDataSize = m_activationDataSize = inputDataSize; m_inputDataCount = inputDataCount; m_holdsInputData = true; // Allocating input data buffer. m_inputBufferSize = m_inputDataSize * m_inputDataCount * sizeof(float); if (m_holdsInputData) { CudaAssert(hipHostMalloc<float>(&m_inputDataBuffer, m_inputBufferSize)); } // Allocating input gradients buffer. CudaAssert(hipHostMalloc<float>(&m_inputGradientsBuffer, m_inputBufferSize)); // Allocating input activations maximums buffer. CudaAssert(hipHostMalloc<float>(&m_inputActivationsMaxBuffer, m_inputDataCount * sizeof(float))); // Allocating input activations maximums buffer. CudaAssert(hipHostMalloc<float>(&m_exponentialsSumBuffer, m_inputDataCount * sizeof(float))); // Allocating activation data buffers. m_activationBufferSize = m_inputBufferSize; CudaAssert(hipHostMalloc<float>(&m_activationDataBuffer, m_activationBufferSize)); m_activationGradientsBuffer = NULL; m_holdsActivationGradients = false; } MockSoftMaxLayer::~MockSoftMaxLayer() { if (m_holdsInputData) { CudaAssert(hipHostFree(m_inputDataBuffer)); } m_inputDataBuffer = NULL; CudaAssert(hipHostFree(m_inputGradientsBuffer)); m_inputGradientsBuffer = NULL; CudaAssert(hipHostFree(m_inputActivationsMaxBuffer)); CudaAssert(hipHostFree(m_exponentialsSumBuffer)); CudaAssert(hipHostFree(m_activationDataBuffer)); m_activationDataBuffer = NULL; if (m_activationGradientsBuffer != NULL) { CudaAssert(hipHostFree(m_activationGradientsBuffer)); } m_activationGradientsBuffer = NULL; } void MockSoftMaxLayer::LoadInputs() { TestingAssert(m_prevLayers.size() == 1, "We do not support more than one previous layer in tests, for now."); CudaAssert(hipMemcpy(m_inputDataBuffer, m_prevLayers[0]->GetActivationDataBuffer(), m_inputBufferSize, hipMemcpyDeviceToHost)); } void MockSoftMaxLayer::LoadActivationGradients() { TestingAssert(m_nextLayers.size() == 1, "We do not support more than one previous layer in tests, for now."); CudaAssert(hipHostMalloc<float>(&m_activationGradientsBuffer, m_activationBufferSize)); CudaAssert(hipMemcpy(m_activationGradientsBuffer, m_nextLayers[0]->GetInputGradientsBuffer(), m_activationBufferSize, hipMemcpyDeviceToHost)); } void MockSoftMaxLayer::StabilizeInputs() { // Finding maximums of input activations. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { m_inputActivationsMaxBuffer[dataIndex] = m_inputDataBuffer[dataIndex]; for (uint activationIndex = 1; activationIndex < m_activationDataSize; ++activationIndex) { m_inputActivationsMaxBuffer[dataIndex] = max(m_inputActivationsMaxBuffer[dataIndex], m_inputDataBuffer[activationIndex * m_inputDataCount + dataIndex]); } } // Substracting maximums of input activations from all the input activations. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex] = m_inputDataBuffer[activationIndex * m_inputDataCount + dataIndex] - m_inputActivationsMaxBuffer[dataIndex]; } } } void MockSoftMaxLayer::CalculateSoftMaximums() { // Computing the exponentials. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex] = exp(m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex]); } } // Computing sum of the exponentials. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { m_exponentialsSumBuffer[dataIndex] = m_activationDataBuffer[dataIndex]; for (uint activationIndex = 1; activationIndex < m_activationDataSize; ++activationIndex) { m_exponentialsSumBuffer[dataIndex] += m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex]; } } // Dividing exponentials with their sum to get soft maximums. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex] /= m_exponentialsSumBuffer[dataIndex]; } } } void MockSoftMaxLayer::DoForwardProp(PropagationMode propagationMode) { StabilizeInputs(); CalculateSoftMaximums(); } void MockSoftMaxLayer::LogisticRegressionBackwardProp(uint* dataLabels) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { uint activationsOffset = activationIndex * m_inputDataCount + dataIndex; m_inputGradientsBuffer[activationsOffset] = (dataLabels[dataIndex] == activationIndex ? 1.f : 0.f) - m_activationDataBuffer[activationsOffset]; } } } void MockSoftMaxLayer::DoBackwardProp() { if (m_nextLayers[0]->GetLayerType() == LayerType::Output) { OutputLayer* outputLayer = static_cast<OutputLayer*>(m_nextLayers[0]); if (outputLayer->GetLossFunctionType() == LossFunctionType::LogisticRegression) { uint* tempHostLabelsBuffer; size_t labelsBufferSize = m_inputDataCount * sizeof(uint); CudaAssert(hipHostMalloc<uint>(&tempHostLabelsBuffer, labelsBufferSize)); CudaAssert(hipMemcpy(tempHostLabelsBuffer, outputLayer->GetDataLabels(), labelsBufferSize, hipMemcpyDeviceToHost)); LogisticRegressionBackwardProp(tempHostLabelsBuffer); CudaAssert(hipHostFree(tempHostLabelsBuffer)); } else { TestingAssert(false, "Currently not supported!"); } } else { TestingAssert(false, "Currently not supported!"); } }
69fda69288b20b8373290fea1810d9578f9b6df4.cu
// ---------------------------------------------------------------------------------------------------- // Copyrighted by Marko Rakita. // Author: Marko Rakita // File contains: Mocked neural network softmax layer, used in tests. // Created: 02/20/2016. // ---------------------------------------------------------------------------------------------------- #include "include/mocksoftmaxlayer.cuh" MockSoftMaxLayer::MockSoftMaxLayer(uint inputDataSize, uint inputDataCount) { m_layerType = LayerType::SoftMax; m_indexInTier = 0; m_tierSize = 1; m_inputDataSize = m_activationDataSize = inputDataSize; m_inputDataCount = inputDataCount; m_holdsInputData = true; // Allocating input data buffer. m_inputBufferSize = m_inputDataSize * m_inputDataCount * sizeof(float); if (m_holdsInputData) { CudaAssert(cudaMallocHost<float>(&m_inputDataBuffer, m_inputBufferSize)); } // Allocating input gradients buffer. CudaAssert(cudaMallocHost<float>(&m_inputGradientsBuffer, m_inputBufferSize)); // Allocating input activations maximums buffer. CudaAssert(cudaMallocHost<float>(&m_inputActivationsMaxBuffer, m_inputDataCount * sizeof(float))); // Allocating input activations maximums buffer. CudaAssert(cudaMallocHost<float>(&m_exponentialsSumBuffer, m_inputDataCount * sizeof(float))); // Allocating activation data buffers. m_activationBufferSize = m_inputBufferSize; CudaAssert(cudaMallocHost<float>(&m_activationDataBuffer, m_activationBufferSize)); m_activationGradientsBuffer = NULL; m_holdsActivationGradients = false; } MockSoftMaxLayer::~MockSoftMaxLayer() { if (m_holdsInputData) { CudaAssert(cudaFreeHost(m_inputDataBuffer)); } m_inputDataBuffer = NULL; CudaAssert(cudaFreeHost(m_inputGradientsBuffer)); m_inputGradientsBuffer = NULL; CudaAssert(cudaFreeHost(m_inputActivationsMaxBuffer)); CudaAssert(cudaFreeHost(m_exponentialsSumBuffer)); CudaAssert(cudaFreeHost(m_activationDataBuffer)); m_activationDataBuffer = NULL; if (m_activationGradientsBuffer != NULL) { CudaAssert(cudaFreeHost(m_activationGradientsBuffer)); } m_activationGradientsBuffer = NULL; } void MockSoftMaxLayer::LoadInputs() { TestingAssert(m_prevLayers.size() == 1, "We do not support more than one previous layer in tests, for now."); CudaAssert(cudaMemcpy(m_inputDataBuffer, m_prevLayers[0]->GetActivationDataBuffer(), m_inputBufferSize, cudaMemcpyDeviceToHost)); } void MockSoftMaxLayer::LoadActivationGradients() { TestingAssert(m_nextLayers.size() == 1, "We do not support more than one previous layer in tests, for now."); CudaAssert(cudaMallocHost<float>(&m_activationGradientsBuffer, m_activationBufferSize)); CudaAssert(cudaMemcpy(m_activationGradientsBuffer, m_nextLayers[0]->GetInputGradientsBuffer(), m_activationBufferSize, cudaMemcpyDeviceToHost)); } void MockSoftMaxLayer::StabilizeInputs() { // Finding maximums of input activations. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { m_inputActivationsMaxBuffer[dataIndex] = m_inputDataBuffer[dataIndex]; for (uint activationIndex = 1; activationIndex < m_activationDataSize; ++activationIndex) { m_inputActivationsMaxBuffer[dataIndex] = max(m_inputActivationsMaxBuffer[dataIndex], m_inputDataBuffer[activationIndex * m_inputDataCount + dataIndex]); } } // Substracting maximums of input activations from all the input activations. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex] = m_inputDataBuffer[activationIndex * m_inputDataCount + dataIndex] - m_inputActivationsMaxBuffer[dataIndex]; } } } void MockSoftMaxLayer::CalculateSoftMaximums() { // Computing the exponentials. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex] = exp(m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex]); } } // Computing sum of the exponentials. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { m_exponentialsSumBuffer[dataIndex] = m_activationDataBuffer[dataIndex]; for (uint activationIndex = 1; activationIndex < m_activationDataSize; ++activationIndex) { m_exponentialsSumBuffer[dataIndex] += m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex]; } } // Dividing exponentials with their sum to get soft maximums. for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { m_activationDataBuffer[activationIndex * m_inputDataCount + dataIndex] /= m_exponentialsSumBuffer[dataIndex]; } } } void MockSoftMaxLayer::DoForwardProp(PropagationMode propagationMode) { StabilizeInputs(); CalculateSoftMaximums(); } void MockSoftMaxLayer::LogisticRegressionBackwardProp(uint* dataLabels) { for (uint activationIndex = 0; activationIndex < m_activationDataSize; ++activationIndex) { for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex) { uint activationsOffset = activationIndex * m_inputDataCount + dataIndex; m_inputGradientsBuffer[activationsOffset] = (dataLabels[dataIndex] == activationIndex ? 1.f : 0.f) - m_activationDataBuffer[activationsOffset]; } } } void MockSoftMaxLayer::DoBackwardProp() { if (m_nextLayers[0]->GetLayerType() == LayerType::Output) { OutputLayer* outputLayer = static_cast<OutputLayer*>(m_nextLayers[0]); if (outputLayer->GetLossFunctionType() == LossFunctionType::LogisticRegression) { uint* tempHostLabelsBuffer; size_t labelsBufferSize = m_inputDataCount * sizeof(uint); CudaAssert(cudaMallocHost<uint>(&tempHostLabelsBuffer, labelsBufferSize)); CudaAssert(cudaMemcpy(tempHostLabelsBuffer, outputLayer->GetDataLabels(), labelsBufferSize, cudaMemcpyDeviceToHost)); LogisticRegressionBackwardProp(tempHostLabelsBuffer); CudaAssert(cudaFreeHost(tempHostLabelsBuffer)); } else { TestingAssert(false, "Currently not supported!"); } } else { TestingAssert(false, "Currently not supported!"); } }
81de405dd5f830fa29fdcd0fbfae7327546f5288.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cutil_math.h> #include <thrust\random.h> #include <cmath> #include "cutil_inline.h" #define DEG_TO_RAD 0.017453292519943 #define EPS 0.001 #define EPS1 0.01 #define PI 3.1415926535897932 #define HALFPI 1.5707963267948966 #define QUARTPI 0.7853981633974483 #define ROOTTHREE 0.57735027 #define HUGEVAL 1e20 #define MAX_ITER 32 __device__ float dBox( float3 p, float3 b ) { float3 d = make_float3( abs(p.x) - b.x, abs(p.y) - b.y, abs(p.z) - b.z ); float3 m = make_float3( max(d.x,0.0), max(d.y,0.0), max(d.z,0.0) ); return min( max( d.x, max( d.y, d.z ) ), 0.0 ) + length( m ); } __device__ float d2Box( float3 p, float3 b ) { float box1 = dBox( p + make_float3( 0.5, 0.0, 0.0 ), b ); float box2 = dBox( p - make_float3( 0.5, 0.0, 0.0 ) , b ); return min(box1, box2); } __device__ float dFloor( float3 p ) { return dBox( p, make_float3( HUGEVAL, 0.2, HUGEVAL ) ); } __device__ float dSphere( float3 p, float r ) { return length( p ) - r; } __device__ float dTorus( float3 p, float2 t ) { float2 q = make_float2(length(make_float2(p.x, p.y))-t.x,p.y); return length(q)-t.y; } __device__ float dCone( float3 p, float2 c ) { // c must be normalized float q = length(make_float2(p.x, p.y)); return dot(c,make_float2(q,p.z)); } __device__ float map( float3 p, int & mat ) { float dist = min(dBox( p, make_float3( 1, 1, 0.25 ) ), dSphere( p, 0.5 ) ); mat = 1; return dist; } __device__ float rayMarch( float3 p, float3 view, int & mat ) { float dist; float totaldist = 0.0; for( int it = 0; it < MAX_ITER; it ++ ) { dist = map( p, mat ) * 0.8; totaldist += dist; if( abs( dist ) < EPS ) { break; } p += view * dist; } if( abs( dist ) > 1e-2 ) totaldist = -1.0; return totaldist; } __device__ float3 gradientNormal(float3 p) { int m; return normalize(make_float3( map(p + make_float3(EPS, 0, 0), m) - map(p - make_float3(EPS, 0, 0), m), map(p + make_float3(0, EPS, 0), m) - map(p - make_float3(0, EPS, 0), m), map(p + make_float3(0, 0, EPS), m) - map(p - make_float3(0, 0, EPS), m))); } __device__ float getAmbientOcclusion( float3 p, float3 dir ) { int m; float sample0 = map( p + 0.1 * dir, m ) / 0.1; float sample1 = map( p + 0.2 * dir, m ) / 0.2; float sample2 = map( p + 0.3 * dir, m ) / 0.3; float sample3 = map( p + 0.4 * dir, m ) / 0.4; return ( sample0*0.05+sample1*0.1+sample2*0.25+sample3*0.6 ); } //Yanked from nop's code // source: [email protected] #define SS_K 15.0 __device__ float getShadow (float3 pos, float3 toLight, float lightDist) { float shadow = 1.0; float t = EPS1; float dt; for(int i=0; i<MAX_ITER; ++i) { int m; dt = map(pos+(toLight*t), m) * 0.8; if(dt < EPS) // stop if intersect object return 0.0; shadow = min(shadow, SS_K*(dt/t)); t += dt; if(t > lightDist) // stop if reach light break; } return clamp(shadow, 0.0, 1.0); } #undef SS_K __device__ float3 rayCast( float3 pos, float3 lpos, float3 view, int & mat, float3 & newpos ) { float dist = rayMarch( pos, view, mat ); float3 color = make_float3( 1.0 ); if( mat == 1 ) { color = make_float3( 1.0, 0.5, 0.5 ); } else if( mat == 2 ) { color = make_float3( 0.5, 1.0, 0.5 ); } else if( mat == 3 ) { color = make_float3( 0.5, 0.5, 1.0 ); } if( dist < 0.0 ) color = make_float3( 0.0 ); else { newpos = pos + view*dist; float3 ldir = normalize( lpos - newpos ); float ldist = length( lpos - newpos ); float3 norm = gradientNormal( newpos ); float diffuse = max( 0.0, dot( norm, ldir ) ); float specular = pow( max( 0.0, dot( reflect( view, norm ), ldir ) ), 150.0 ); float shadow = 1.0;//getShadow( newpos + 0.01*ldir, ldir, length( lpos - newpos ) ); float AO = 1.0;//getAmbientOcclusion( newpos, norm ); float fog = 1.0;//exp( -0.05*dist ); color = color*(AO*(diffuse*shadow + 0.1)*fog); color += make_float3(5.0*specular*fog); } return color; } __global__ void render_k( float4 * out, float3 campos, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if( x >= width || y >= height ) { return; } float3 color; float3 lpos = { 0.0, 4.0, 7.0 }; campos = campos - make_float3( 0, -50, 0 ); float3 look = -campos; float2 position = ( make_float2(x,y) - make_float2(width,height) / 2.0 ) / height * sin( 45.0 * DEG_TO_RAD / 2.0 ); look = normalize( look ); float3 right = cross( look, make_float3( 0.0, 1.0, 0.0 ) ); float3 up = cross( right, look ); float3 view = normalize( look + position.x*right + position.y*up ); float3 pos = campos + view; float3 newpos; int mat; color = rayCast( pos, lpos, view, mat, newpos ); out[x+y*width] = make_float4( color, 1.0 ); } extern "C" void render( dim3 dimGrid, dim3 dimBlock, float4 * out, float3 campos, unsigned int width, unsigned int height ) { float4 * tmp; hipMalloc( &tmp, width * height * sizeof( float4 ) ); hipLaunchKernelGGL(( render_k), dim3(dimGrid), dim3(dimBlock) , 0, 0, tmp, campos, width, height ); hipMemcpy( out, tmp, width * height * sizeof( float4 ), hipMemcpyDeviceToHost ); hipFree( tmp ); }
81de405dd5f830fa29fdcd0fbfae7327546f5288.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <cutil_math.h> #include <thrust\random.h> #include <cmath> #include "cutil_inline.h" #define DEG_TO_RAD 0.017453292519943 #define EPS 0.001 #define EPS1 0.01 #define PI 3.1415926535897932 #define HALFPI 1.5707963267948966 #define QUARTPI 0.7853981633974483 #define ROOTTHREE 0.57735027 #define HUGEVAL 1e20 #define MAX_ITER 32 __device__ float dBox( float3 p, float3 b ) { float3 d = make_float3( abs(p.x) - b.x, abs(p.y) - b.y, abs(p.z) - b.z ); float3 m = make_float3( max(d.x,0.0), max(d.y,0.0), max(d.z,0.0) ); return min( max( d.x, max( d.y, d.z ) ), 0.0 ) + length( m ); } __device__ float d2Box( float3 p, float3 b ) { float box1 = dBox( p + make_float3( 0.5, 0.0, 0.0 ), b ); float box2 = dBox( p - make_float3( 0.5, 0.0, 0.0 ) , b ); return min(box1, box2); } __device__ float dFloor( float3 p ) { return dBox( p, make_float3( HUGEVAL, 0.2, HUGEVAL ) ); } __device__ float dSphere( float3 p, float r ) { return length( p ) - r; } __device__ float dTorus( float3 p, float2 t ) { float2 q = make_float2(length(make_float2(p.x, p.y))-t.x,p.y); return length(q)-t.y; } __device__ float dCone( float3 p, float2 c ) { // c must be normalized float q = length(make_float2(p.x, p.y)); return dot(c,make_float2(q,p.z)); } __device__ float map( float3 p, int & mat ) { float dist = min(dBox( p, make_float3( 1, 1, 0.25 ) ), dSphere( p, 0.5 ) ); mat = 1; return dist; } __device__ float rayMarch( float3 p, float3 view, int & mat ) { float dist; float totaldist = 0.0; for( int it = 0; it < MAX_ITER; it ++ ) { dist = map( p, mat ) * 0.8; totaldist += dist; if( abs( dist ) < EPS ) { break; } p += view * dist; } if( abs( dist ) > 1e-2 ) totaldist = -1.0; return totaldist; } __device__ float3 gradientNormal(float3 p) { int m; return normalize(make_float3( map(p + make_float3(EPS, 0, 0), m) - map(p - make_float3(EPS, 0, 0), m), map(p + make_float3(0, EPS, 0), m) - map(p - make_float3(0, EPS, 0), m), map(p + make_float3(0, 0, EPS), m) - map(p - make_float3(0, 0, EPS), m))); } __device__ float getAmbientOcclusion( float3 p, float3 dir ) { int m; float sample0 = map( p + 0.1 * dir, m ) / 0.1; float sample1 = map( p + 0.2 * dir, m ) / 0.2; float sample2 = map( p + 0.3 * dir, m ) / 0.3; float sample3 = map( p + 0.4 * dir, m ) / 0.4; return ( sample0*0.05+sample1*0.1+sample2*0.25+sample3*0.6 ); } //Yanked from nop's code // source: [email protected] #define SS_K 15.0 __device__ float getShadow (float3 pos, float3 toLight, float lightDist) { float shadow = 1.0; float t = EPS1; float dt; for(int i=0; i<MAX_ITER; ++i) { int m; dt = map(pos+(toLight*t), m) * 0.8; if(dt < EPS) // stop if intersect object return 0.0; shadow = min(shadow, SS_K*(dt/t)); t += dt; if(t > lightDist) // stop if reach light break; } return clamp(shadow, 0.0, 1.0); } #undef SS_K __device__ float3 rayCast( float3 pos, float3 lpos, float3 view, int & mat, float3 & newpos ) { float dist = rayMarch( pos, view, mat ); float3 color = make_float3( 1.0 ); if( mat == 1 ) { color = make_float3( 1.0, 0.5, 0.5 ); } else if( mat == 2 ) { color = make_float3( 0.5, 1.0, 0.5 ); } else if( mat == 3 ) { color = make_float3( 0.5, 0.5, 1.0 ); } if( dist < 0.0 ) color = make_float3( 0.0 ); else { newpos = pos + view*dist; float3 ldir = normalize( lpos - newpos ); float ldist = length( lpos - newpos ); float3 norm = gradientNormal( newpos ); float diffuse = max( 0.0, dot( norm, ldir ) ); float specular = pow( max( 0.0, dot( reflect( view, norm ), ldir ) ), 150.0 ); float shadow = 1.0;//getShadow( newpos + 0.01*ldir, ldir, length( lpos - newpos ) ); float AO = 1.0;//getAmbientOcclusion( newpos, norm ); float fog = 1.0;//exp( -0.05*dist ); color = color*(AO*(diffuse*shadow + 0.1)*fog); color += make_float3(5.0*specular*fog); } return color; } __global__ void render_k( float4 * out, float3 campos, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if( x >= width || y >= height ) { return; } float3 color; float3 lpos = { 0.0, 4.0, 7.0 }; campos = campos - make_float3( 0, -50, 0 ); float3 look = -campos; float2 position = ( make_float2(x,y) - make_float2(width,height) / 2.0 ) / height * sin( 45.0 * DEG_TO_RAD / 2.0 ); look = normalize( look ); float3 right = cross( look, make_float3( 0.0, 1.0, 0.0 ) ); float3 up = cross( right, look ); float3 view = normalize( look + position.x*right + position.y*up ); float3 pos = campos + view; float3 newpos; int mat; color = rayCast( pos, lpos, view, mat, newpos ); out[x+y*width] = make_float4( color, 1.0 ); } extern "C" void render( dim3 dimGrid, dim3 dimBlock, float4 * out, float3 campos, unsigned int width, unsigned int height ) { float4 * tmp; cudaMalloc( &tmp, width * height * sizeof( float4 ) ); render_k<<< dimGrid, dimBlock >>>( tmp, campos, width, height ); cudaMemcpy( out, tmp, width * height * sizeof( float4 ), cudaMemcpyDeviceToHost ); cudaFree( tmp ); }
0a1ccfee99e6cbed2511a4405f6bf0f69c5c45cc.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "WarpSelectImpl_hip.cuh" namespace faiss { namespace gpu { WARP_SELECT_IMPL(float, true, 512, 8); } } // namespace
0a1ccfee99e6cbed2511a4405f6bf0f69c5c45cc.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "WarpSelectImpl.cuh" namespace faiss { namespace gpu { WARP_SELECT_IMPL(float, true, 512, 8); } } // namespace
547054d43f4ba9a80af4c68f20146cb132182d9b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "hist_inGlobal.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int *values = NULL; hipMalloc(&values, XSIZE*YSIZE); int length = 1; int *hist = NULL; hipMalloc(&hist, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( hist_inGlobal), dim3(gridBlock),dim3(threadBlock), 0, 0, values,length,hist); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( hist_inGlobal), dim3(gridBlock),dim3(threadBlock), 0, 0, values,length,hist); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( hist_inGlobal), dim3(gridBlock),dim3(threadBlock), 0, 0, values,length,hist); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
547054d43f4ba9a80af4c68f20146cb132182d9b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "hist_inGlobal.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int *values = NULL; cudaMalloc(&values, XSIZE*YSIZE); int length = 1; int *hist = NULL; cudaMalloc(&hist, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); hist_inGlobal<<<gridBlock,threadBlock>>>(values,length,hist); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { hist_inGlobal<<<gridBlock,threadBlock>>>(values,length,hist); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { hist_inGlobal<<<gridBlock,threadBlock>>>(values,length,hist); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e68ce4d299307373d7064252d368c1f1e6833feb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <device_types.h> #include "Common/CUDATools.h" #include "Common/CUDADeviceTools.h" template<typename T, size_t threadsPerBlock> __global__ void kernel_reductionSum(T *data, T *sum, int count, int repeatCount) { __shared__ T ssum[threadsPerBlock]; for (int i = 0; i < repeatCount; i++) { unsigned int threadID = threadIdx.x; unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; ssum[threadID] = 0; T *pBase = data + blockIdx.x * countPerBlock; T *pValue = pBase + threadID; T *pValueMax = pBase + countPerBlock; if (pValueMax > data + count) pValueMax = data + count; T *pResult = sum + blockIdx.x; while (pValue < pValueMax) { ssum[threadID] += *pValue; pValue += blockDim.x; } __syncthreads(); for (int i = blockDim.x >> 1; i > 16; i >>= 1) { if (threadID < i) ssum[threadID] += ssum[threadID + i]; __syncthreads(); } #ifdef CUDA50_ T value = ssum[threadID]; if (sizeof(T) == sizeof(int)) { value += __shfl_xor((T)value, 16, 32); value += __shfl_xor((T)value, 8, 32); value += __shfl_xor((T)value, 4, 32); value += __shfl_xor((T)value, 2, 32); value += __shfl_xor((T)value, 1, 32); } else if (sizeof(T) == sizeof(double)) { //!! } if (threadID == 0) *pResult = value; #else #pragma unroll for (int i = 16; i > 0; i >>= 1) { if (threadID < i) ssum[threadID] += ssum[threadID + i]; __syncthreads(); } if (threadID == 0) *pResult = ssum[threadID]; #endif __syncthreads(); } } template<typename T> __global__ void kernel_alignedRead(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; T *pmax = data + blockIdx.x * countPerBlock + countPerBlock; size_t inc = blockDim.x; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x; T sum = 0; while (p < pmax) { sum += *p; p += inc; } if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f\n", sum); } } template<typename T> __global__ void kernel_notAlignedRead(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x; T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread; size_t inc = 1; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread; T sum = 0; while (p < pmax) { sum += *p; p += inc; } if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f\n", sum); } } template<typename T> __global__ void kernel_alignedWrite(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; T *pmax = data + blockIdx.x * countPerBlock + countPerBlock; size_t inc = blockDim.x; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x; while (p < pmax) { *p = 0; p += inc; } } } template<typename T> __global__ void kernel_notAlignedWrite(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x; T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread; size_t inc = 1; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread; while (p < pmax) { *p = 0; p += inc; } } } template<typename T> void cuda_alignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { hipLaunchKernelGGL(( kernel_alignedRead<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount); } template<typename T> void cuda_notAlignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { hipLaunchKernelGGL(( kernel_notAlignedRead<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount); } template<typename T> void cuda_alignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { hipLaunchKernelGGL(( kernel_alignedWrite<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount); } template<typename T> void cuda_notAlignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { hipLaunchKernelGGL(( kernel_notAlignedWrite<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount); } template<typename T> void cuda_reductionSum(T *data, T *sum, int count, int repeatCount, int blockCount, int threadsPerBlock) { deviceMem<T> temp; temp.allocate(blockCount); switch (threadsPerBlock) { case 1: case 2: case 4: case 8: case 16: case 32: hipLaunchKernelGGL(( kernel_reductionSum<T, 32>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp.dptr, count, repeatCount); hipLaunchKernelGGL(( kernel_reductionSum<T, 32>), dim3(1), dim3(threadsPerBlock), 0, 0, temp.dptr, sum, blockCount, 1); break; case 64: hipLaunchKernelGGL(( kernel_reductionSum<T, 64>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp.dptr, count, repeatCount); hipLaunchKernelGGL(( kernel_reductionSum<T, 64>), dim3(1), dim3(threadsPerBlock), 0, 0, temp.dptr, sum, blockCount, 1); break; case 128: hipLaunchKernelGGL(( kernel_reductionSum<T, 128>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp.dptr, count, repeatCount); hipLaunchKernelGGL(( kernel_reductionSum<T, 128>), dim3(1), dim3(threadsPerBlock), 0, 0, temp.dptr, sum, blockCount, 1); break; case 256: hipLaunchKernelGGL(( kernel_reductionSum<T, 256>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp.dptr, count, repeatCount); hipLaunchKernelGGL(( kernel_reductionSum<T, 256>), dim3(1), dim3(threadsPerBlock), 0, 0, temp.dptr, sum, blockCount, 1); break; case 512: hipLaunchKernelGGL(( kernel_reductionSum<T, 512>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp.dptr, count, repeatCount); hipLaunchKernelGGL(( kernel_reductionSum<T, 512>), dim3(1), dim3(threadsPerBlock), 0, 0, temp.dptr, sum, blockCount, 1); break; case 1024: hipLaunchKernelGGL(( kernel_reductionSum<T, 1024>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp.dptr, count, repeatCount); hipLaunchKernelGGL(( kernel_reductionSum<T, 1024>), dim3(1), dim3(threadsPerBlock), 0, 0, temp.dptr, sum, blockCount, 1); break; } } __global__ void kernel_doTinyTask(int a, int b) { int sum = a + b; if (threadIdx.x > 1024) // to avoid removal by optimization printf("%d", sum); } void cuda_doTinyTask(int blockCount, int threadCount) { hipLaunchKernelGGL(( kernel_doTinyTask), dim3(blockCount), dim3(threadCount), 0, 0, blockCount, threadCount); } template<typename T> __global__ void kernel_doAdd(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = i; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f", value); } } template<typename T> void cuda_doAdd(int count, int blockCount, int threadCount) { hipLaunchKernelGGL(( kernel_doAdd<T>), dim3(blockCount), dim3(threadCount), 0, 0, count); } template<typename T> __global__ void kernel_doAdd2(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value1 = i, value2 = (T)1.0 + i, value3 = (T)2.0 + i; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f, %f, %f", value1, value2, value3); } } template<typename T> void cuda_doAdd2(int count, int blockCount, int threadCount) { hipLaunchKernelGGL(( kernel_doAdd2<T>), dim3(blockCount), dim3(threadCount), 0, 0, count); } template<typename T> __global__ void kernel_doAddMulMix(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value1 = i, value2 = (T)1.0 + i; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f, %f", value1, value2); } } template<typename T> void cuda_doAddMulMix(int count, int blockCount, int threadCount) { hipLaunchKernelGGL(( kernel_doAddMulMix<T>), dim3(blockCount), dim3(threadCount), 0, 0, count); } template<typename T> __global__ void kernel_doMul(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = (T)i; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } template<typename T> void cuda_doMul(int count, int blockCount, int threadCount) { hipLaunchKernelGGL(( kernel_doMul<T>), dim3(blockCount), dim3(threadCount), 0, 0, count); } template<typename T> __global__ void kernel_doDiv(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = (T)i + (T)1.2345; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } template<typename T> void cuda_doDiv(int count, int blockCount, int threadCount) { hipLaunchKernelGGL(( kernel_doDiv<T>), dim3(blockCount), dim3(threadCount), 0, 0, count); } template<typename T> __global__ void kernel_doSin(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = (T)1.0 + i; value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } /* template<> __global__ void kernel_doSin<float>(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { float value = 1.0f + i; value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } */ template<typename T> void cuda_doSin(int count, int blockCount, int threadCount) { hipLaunchKernelGGL(( kernel_doSin<T>), dim3(blockCount), dim3(threadCount), 0, 0, count); } #ifdef CUDA50 template<bool waitForCompletion> __global__ void kernel_doDynamicTinyTask(int blockCount, int threadCount, double *time) { DTimingCounter counter; DTimingClearAndStart(counter); for (int i = 0; i < 1000; i++) { hipLaunchKernelGGL(( kernel_doTinyTask), dim3(blockCount), dim3(threadCount), 0, 0, i, i); if (waitForCompletion) hipDeviceSynchronize(); } DTimingFinish(counter); *time = DTimingSeconds(counter) / 1000; } double cuda_doDynamicTinyTask(int blockCount, int threadCount, bool waitForCompletion) { deviceMem<double> d_time(1); if (waitForCompletion) hipLaunchKernelGGL(( kernel_doDynamicTinyTask<true>), dim3(1), dim3(1), 0, 0, blockCount, threadCount, d_time.dptr); else hipLaunchKernelGGL(( kernel_doDynamicTinyTask<false>), dim3(1), dim3(1), 0, 0, blockCount, threadCount, d_time.dptr); cudaSafeCall(hipDeviceSynchronize()); double result; d_time.copyTo(&result); return result; } #endif // template instantiation template void cuda_reductionSum<int>(int *, int *, int, int, int, int); template void cuda_reductionSum<__int64>(__int64 *, __int64 *, int, int, int, int); template void cuda_reductionSum<float>(float *, float *, int, int, int, int); template void cuda_reductionSum<double>(double *, double *, int, int, int, int); template void cuda_alignedRead<int>(int *, int, int, int, int); template void cuda_alignedRead<__int64>(__int64 *, int, int, int, int); template void cuda_alignedRead<float>(float *, int, int, int, int); template void cuda_alignedRead<double>(double *, int, int, int, int); template void cuda_notAlignedRead<int>(int *, int, int, int, int); template void cuda_notAlignedRead<__int64>(__int64 *, int, int, int, int); template void cuda_notAlignedRead<float>(float *, int, int, int, int); template void cuda_notAlignedRead<double>(double *, int, int, int, int); template void cuda_alignedWrite<int>(int *, int, int, int, int); template void cuda_alignedWrite<__int64>(__int64 *, int, int, int, int); template void cuda_alignedWrite<float>(float *, int, int, int, int); template void cuda_alignedWrite<double>(double *, int, int, int, int); template void cuda_notAlignedWrite<int>(int *, int, int, int, int); template void cuda_notAlignedWrite<__int64>(__int64 *, int, int, int, int); template void cuda_notAlignedWrite<float>(float *, int, int, int, int); template void cuda_notAlignedWrite<double>(double *, int, int, int, int); template void cuda_doAdd<float>(int, int, int); template void cuda_doAdd<double>(int, int, int); template void cuda_doAdd2<float>(int, int, int); template void cuda_doAdd2<double>(int, int, int); template void cuda_doAddMulMix<float>(int, int, int); template void cuda_doAddMulMix<double>(int, int, int); template void cuda_doMul<float>(int, int, int); template void cuda_doMul<double>(int, int, int); template void cuda_doDiv<float>(int, int, int); template void cuda_doDiv<double>(int, int, int); template void cuda_doSin<float>(int, int, int); template void cuda_doSin<double>(int, int, int);
e68ce4d299307373d7064252d368c1f1e6833feb.cu
#include <device_types.h> #include "Common/CUDATools.h" #include "Common/CUDADeviceTools.h" template<typename T, size_t threadsPerBlock> __global__ void kernel_reductionSum(T *data, T *sum, int count, int repeatCount) { __shared__ T ssum[threadsPerBlock]; for (int i = 0; i < repeatCount; i++) { unsigned int threadID = threadIdx.x; unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; ssum[threadID] = 0; T *pBase = data + blockIdx.x * countPerBlock; T *pValue = pBase + threadID; T *pValueMax = pBase + countPerBlock; if (pValueMax > data + count) pValueMax = data + count; T *pResult = sum + blockIdx.x; while (pValue < pValueMax) { ssum[threadID] += *pValue; pValue += blockDim.x; } __syncthreads(); for (int i = blockDim.x >> 1; i > 16; i >>= 1) { if (threadID < i) ssum[threadID] += ssum[threadID + i]; __syncthreads(); } #ifdef CUDA50_ T value = ssum[threadID]; if (sizeof(T) == sizeof(int)) { value += __shfl_xor((T)value, 16, 32); value += __shfl_xor((T)value, 8, 32); value += __shfl_xor((T)value, 4, 32); value += __shfl_xor((T)value, 2, 32); value += __shfl_xor((T)value, 1, 32); } else if (sizeof(T) == sizeof(double)) { //!! } if (threadID == 0) *pResult = value; #else #pragma unroll for (int i = 16; i > 0; i >>= 1) { if (threadID < i) ssum[threadID] += ssum[threadID + i]; __syncthreads(); } if (threadID == 0) *pResult = ssum[threadID]; #endif __syncthreads(); } } template<typename T> __global__ void kernel_alignedRead(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; T *pmax = data + blockIdx.x * countPerBlock + countPerBlock; size_t inc = blockDim.x; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x; T sum = 0; while (p < pmax) { sum += *p; p += inc; } if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f\n", sum); } } template<typename T> __global__ void kernel_notAlignedRead(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x; T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread; size_t inc = 1; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread; T sum = 0; while (p < pmax) { sum += *p; p += inc; } if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f\n", sum); } } template<typename T> __global__ void kernel_alignedWrite(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; T *pmax = data + blockIdx.x * countPerBlock + countPerBlock; size_t inc = blockDim.x; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x; while (p < pmax) { *p = 0; p += inc; } } } template<typename T> __global__ void kernel_notAlignedWrite(T *data, int count, int repeatCount) { unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x; unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x; T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread; size_t inc = 1; for (int i = 0; i < repeatCount; i++) { T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread; while (p < pmax) { *p = 0; p += inc; } } } template<typename T> void cuda_alignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { kernel_alignedRead<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount); } template<typename T> void cuda_notAlignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { kernel_notAlignedRead<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount); } template<typename T> void cuda_alignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { kernel_alignedWrite<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount); } template<typename T> void cuda_notAlignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock) { kernel_notAlignedWrite<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount); } template<typename T> void cuda_reductionSum(T *data, T *sum, int count, int repeatCount, int blockCount, int threadsPerBlock) { deviceMem<T> temp; temp.allocate(blockCount); switch (threadsPerBlock) { case 1: case 2: case 4: case 8: case 16: case 32: kernel_reductionSum<T, 32><<<blockCount, threadsPerBlock>>>(data, temp.dptr, count, repeatCount); kernel_reductionSum<T, 32><<<1, threadsPerBlock>>>(temp.dptr, sum, blockCount, 1); break; case 64: kernel_reductionSum<T, 64><<<blockCount, threadsPerBlock>>>(data, temp.dptr, count, repeatCount); kernel_reductionSum<T, 64><<<1, threadsPerBlock>>>(temp.dptr, sum, blockCount, 1); break; case 128: kernel_reductionSum<T, 128><<<blockCount, threadsPerBlock>>>(data, temp.dptr, count, repeatCount); kernel_reductionSum<T, 128><<<1, threadsPerBlock>>>(temp.dptr, sum, blockCount, 1); break; case 256: kernel_reductionSum<T, 256><<<blockCount, threadsPerBlock>>>(data, temp.dptr, count, repeatCount); kernel_reductionSum<T, 256><<<1, threadsPerBlock>>>(temp.dptr, sum, blockCount, 1); break; case 512: kernel_reductionSum<T, 512><<<blockCount, threadsPerBlock>>>(data, temp.dptr, count, repeatCount); kernel_reductionSum<T, 512><<<1, threadsPerBlock>>>(temp.dptr, sum, blockCount, 1); break; case 1024: kernel_reductionSum<T, 1024><<<blockCount, threadsPerBlock>>>(data, temp.dptr, count, repeatCount); kernel_reductionSum<T, 1024><<<1, threadsPerBlock>>>(temp.dptr, sum, blockCount, 1); break; } } __global__ void kernel_doTinyTask(int a, int b) { int sum = a + b; if (threadIdx.x > 1024) // to avoid removal by optimization printf("%d", sum); } void cuda_doTinyTask(int blockCount, int threadCount) { kernel_doTinyTask<<<blockCount, threadCount>>>(blockCount, threadCount); } template<typename T> __global__ void kernel_doAdd(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = i; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; value = value + value; if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f", value); } } template<typename T> void cuda_doAdd(int count, int blockCount, int threadCount) { kernel_doAdd<T><<<blockCount, threadCount>>>(count); } template<typename T> __global__ void kernel_doAdd2(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value1 = i, value2 = (T)1.0 + i, value3 = (T)2.0 + i; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; value1 = value1 + value1; value2 = value2 + value2; value3 = value3 + value3; if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f, %f, %f", value1, value2, value3); } } template<typename T> void cuda_doAdd2(int count, int blockCount, int threadCount) { kernel_doAdd2<T><<<blockCount, threadCount>>>(count); } template<typename T> __global__ void kernel_doAddMulMix(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value1 = i, value2 = (T)1.0 + i; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; value2 = value2 * value2; value1 = value1 + value1; if (threadIdx.x > 1024) // to avoid removal by optimization printf("sum: %f, %f", value1, value2); } } template<typename T> void cuda_doAddMulMix(int count, int blockCount, int threadCount) { kernel_doAddMulMix<T><<<blockCount, threadCount>>>(count); } template<typename T> __global__ void kernel_doMul(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = (T)i; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; value = value * value; if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } template<typename T> void cuda_doMul(int count, int blockCount, int threadCount) { kernel_doMul<T><<<blockCount, threadCount>>>(count); } template<typename T> __global__ void kernel_doDiv(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = (T)i + (T)1.2345; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; value = value / value; if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } template<typename T> void cuda_doDiv(int count, int blockCount, int threadCount) { kernel_doDiv<T><<<blockCount, threadCount>>>(count); } template<typename T> __global__ void kernel_doSin(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { T value = (T)1.0 + i; value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); value = sin(value); if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } /* template<> __global__ void kernel_doSin<float>(int count) { int bulkCount = count >> 5; for (int i = 0; i < bulkCount; i++) { float value = 1.0f + i; value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); value = sinf(value); if (threadIdx.x > 1024) // to avoid removal by optimization printf("%f", value); } } */ template<typename T> void cuda_doSin(int count, int blockCount, int threadCount) { kernel_doSin<T><<<blockCount, threadCount>>>(count); } #ifdef CUDA50 template<bool waitForCompletion> __global__ void kernel_doDynamicTinyTask(int blockCount, int threadCount, double *time) { DTimingCounter counter; DTimingClearAndStart(counter); for (int i = 0; i < 1000; i++) { kernel_doTinyTask<<<blockCount, threadCount>>>(i, i); if (waitForCompletion) cudaDeviceSynchronize(); } DTimingFinish(counter); *time = DTimingSeconds(counter) / 1000; } double cuda_doDynamicTinyTask(int blockCount, int threadCount, bool waitForCompletion) { deviceMem<double> d_time(1); if (waitForCompletion) kernel_doDynamicTinyTask<true><<<1, 1>>>(blockCount, threadCount, d_time.dptr); else kernel_doDynamicTinyTask<false><<<1, 1>>>(blockCount, threadCount, d_time.dptr); cudaSafeCall(cudaThreadSynchronize()); double result; d_time.copyTo(&result); return result; } #endif // template instantiation template void cuda_reductionSum<int>(int *, int *, int, int, int, int); template void cuda_reductionSum<__int64>(__int64 *, __int64 *, int, int, int, int); template void cuda_reductionSum<float>(float *, float *, int, int, int, int); template void cuda_reductionSum<double>(double *, double *, int, int, int, int); template void cuda_alignedRead<int>(int *, int, int, int, int); template void cuda_alignedRead<__int64>(__int64 *, int, int, int, int); template void cuda_alignedRead<float>(float *, int, int, int, int); template void cuda_alignedRead<double>(double *, int, int, int, int); template void cuda_notAlignedRead<int>(int *, int, int, int, int); template void cuda_notAlignedRead<__int64>(__int64 *, int, int, int, int); template void cuda_notAlignedRead<float>(float *, int, int, int, int); template void cuda_notAlignedRead<double>(double *, int, int, int, int); template void cuda_alignedWrite<int>(int *, int, int, int, int); template void cuda_alignedWrite<__int64>(__int64 *, int, int, int, int); template void cuda_alignedWrite<float>(float *, int, int, int, int); template void cuda_alignedWrite<double>(double *, int, int, int, int); template void cuda_notAlignedWrite<int>(int *, int, int, int, int); template void cuda_notAlignedWrite<__int64>(__int64 *, int, int, int, int); template void cuda_notAlignedWrite<float>(float *, int, int, int, int); template void cuda_notAlignedWrite<double>(double *, int, int, int, int); template void cuda_doAdd<float>(int, int, int); template void cuda_doAdd<double>(int, int, int); template void cuda_doAdd2<float>(int, int, int); template void cuda_doAdd2<double>(int, int, int); template void cuda_doAddMulMix<float>(int, int, int); template void cuda_doAddMulMix<double>(int, int, int); template void cuda_doMul<float>(int, int, int); template void cuda_doMul<double>(int, int, int); template void cuda_doDiv<float>(int, int, int); template void cuda_doDiv<double>(int, int, int); template void cuda_doSin<float>(int, int, int); template void cuda_doSin<double>(int, int, int);
9a524a25135ac5cfff346bf317c2ca38698d1ac9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sort_hip.cuh" #include "header_hip.cuh" //extern __constant__ float cities[N_CITIES][2]; __device__ void swap(Individu *p, int index1, int index2){ if (index1 == index2) return; Individu tmp = p[index1]; p[index1] = p[index2]; p[index2] = tmp; } __device__ void fusion(Individu *p, int i, int j, int endj){ while (true) { int endi = j, k = i; int iMoved = 0; for (;k < endi && j < endj; k++) { if (p[i].score > p[j].score) { if (!iMoved) i++; else { swap(p, k, i); for (int o = i; o < i + iMoved - 1; o++) swap(p, o, o + 1); } } else { swap(p, k, j); if (!iMoved) { i = j; } iMoved++; j++; } } if (k < endi && iMoved) { endj = i + iMoved; j = i; i = k; continue; } else if (i < j && j < endj) { continue; } break; } } __device__ void merge_sort(Individu *population){ int modulo = 2; int nbElt = 1; while (true){ if (threadIdx.x % modulo == 0){ int maxElt = threadIdx.x + nbElt * 2; maxElt = maxElt < blockDim.x ? maxElt : blockDim.x; fusion(population, threadIdx.x, threadIdx.x+nbElt, maxElt); nbElt = maxElt - threadIdx.x; if (nbElt == blockDim.x) return; modulo *= 2; } else { return; } __syncthreads(); } } __device__ void bubble_sort(Individu *population){ if((threadIdx.x % 2) == 0) { int even = true; for(int i = 0; i < blockDim.x; i++, even = !even) { __syncthreads(); int tab_index = threadIdx.x + even; if(tab_index < blockDim.x - 1) { if(population[tab_index].score > population[tab_index + 1].score) { swap(population, tab_index, tab_index+1); } } } } }
9a524a25135ac5cfff346bf317c2ca38698d1ac9.cu
#include "sort.cuh" #include "header.cuh" //extern __constant__ float cities[N_CITIES][2]; __device__ void swap(Individu *p, int index1, int index2){ if (index1 == index2) return; Individu tmp = p[index1]; p[index1] = p[index2]; p[index2] = tmp; } __device__ void fusion(Individu *p, int i, int j, int endj){ while (true) { int endi = j, k = i; int iMoved = 0; for (;k < endi && j < endj; k++) { if (p[i].score > p[j].score) { if (!iMoved) i++; else { swap(p, k, i); for (int o = i; o < i + iMoved - 1; o++) swap(p, o, o + 1); } } else { swap(p, k, j); if (!iMoved) { i = j; } iMoved++; j++; } } if (k < endi && iMoved) { endj = i + iMoved; j = i; i = k; continue; } else if (i < j && j < endj) { continue; } break; } } __device__ void merge_sort(Individu *population){ int modulo = 2; int nbElt = 1; while (true){ if (threadIdx.x % modulo == 0){ int maxElt = threadIdx.x + nbElt * 2; maxElt = maxElt < blockDim.x ? maxElt : blockDim.x; fusion(population, threadIdx.x, threadIdx.x+nbElt, maxElt); nbElt = maxElt - threadIdx.x; if (nbElt == blockDim.x) return; modulo *= 2; } else { return; } __syncthreads(); } } __device__ void bubble_sort(Individu *population){ if((threadIdx.x % 2) == 0) { int even = true; for(int i = 0; i < blockDim.x; i++, even = !even) { __syncthreads(); int tab_index = threadIdx.x + even; if(tab_index < blockDim.x - 1) { if(population[tab_index].score > population[tab_index + 1].score) { swap(population, tab_index, tab_index+1); } } } } }
cf130e8a5d8668dcfa0b89b60a3bcc3f419d4f40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "io_util.cuh" // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__) #define CudaCheckError() __cudaCheckError(__FILE__, __LINE__) inline void __cudaSafeCall(hipError_t err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } // More careful checking. However, this will affect performance. // Comment away if needed. // err = hipDeviceSynchronize(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } /* IO HELPERS */ void parseArgs(const int &numArgs, char** args, unsigned int &k, unsigned int &iterations, double &learningRate, double &threshold, float &sigmoidTuner, std::string &baseDirectory){ std::map<std::string, int> parameters; parameters["-k"] = 0; parameters["-i"] = 1; parameters["-l"] = 2; parameters["-t"] = 3; parameters["-s"] = 4; parameters["-h"] = 5; parameters["-help"] = 6; if(numArgs < 2){ std::cout << "Usage: ./exe <directory of timepoint tifs>\nfor help ./exe -h"<<std::endl; exit(-1); } else if (numArgs >= 2){ if(parameters[args[1]] < 5){ baseDirectory = args[1]; } else{ std::ifstream readme; std::string line = ""; readme.open("README.txt"); if(readme.is_open()){ while(getline(readme, line)){ std::cout << line << std::endl; } readme.close(); } exit(0); } for(int i = 2; i < numArgs; ++i) { switch(parameters[args[i]]){ case 0: k = std::stoi(args[++i]); break; case 1: iterations = std::stoi(args[++i]); break; case 2: learningRate = std::stod(args[++i]); break; case 3: threshold = std::stod(args[++i]); break; case 4: sigmoidTuner = std::stof(args[++i]); break; default: std::cout<<"Please use all arguments listed in documentation"<<std::endl; exit(-1); } } } } std::string createFourCharInt(int i) { std::string strInt; if (i < 10) { strInt = "000" + std::to_string(i); } else if (i < 100) { strInt = "00" + std::to_string(i); } else if (i < 1000) { strInt = "0" + std::to_string(i); } else { strInt = std::to_string(i); } return strInt; } void extractMartrices(TIFF *tif, uint32* &imageMatrix, unsigned int width, unsigned int height, unsigned int scanLineSize) { tdata_t buf; buf = _TIFFmalloc(scanLineSize); for (uint32 row = 0; row < height; row++) { if (TIFFReadScanline(tif, buf, row, 0) != -1) { memcpy(&imageMatrix[row * width], buf, scanLineSize); } else { std::cout << "ERROR READING SCANLINE" << std::endl; exit(-1); } } _TIFFfree(buf); } uint32* readTiffVideo(std::string videoDirectoryPath, unsigned int &width, unsigned int &height, unsigned int &numTimePoints, std::string &baseName){ DIR* dir; if (NULL == (dir = opendir(videoDirectoryPath.c_str()))){ printf("Error : Failed to open input directory %s\n",videoDirectoryPath.c_str()); exit(-1); } struct dirent* in_file; std::vector<uint32*> videoVector; unsigned int scanLineSize; std::cout<<"reading tif timepoint files from "<<videoDirectoryPath<<std::endl; std::vector<std::string> fileNames; while((in_file = readdir(dir)) != NULL){ std::string currentFileName = in_file->d_name; if (currentFileName == "." || currentFileName == ".." || currentFileName.length() < 5|| currentFileName.substr(currentFileName.length() - 3) != "tif") continue; //TODO check if it is a tif if (numTimePoints == 0) { baseName = currentFileName.substr(currentFileName.find_last_of("/") + 1, currentFileName.length() - 8); } currentFileName = videoDirectoryPath + currentFileName; fileNames.push_back(currentFileName); ++numTimePoints; } closedir(dir); std::sort(fileNames.begin(), fileNames.end()); for(int i = 0; i < numTimePoints; ++i){ TIFF *tif = TIFFOpen(fileNames[i].c_str(), "r"); if (tif) { if (i == 0) { TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height); scanLineSize = TIFFScanlineSize(tif); } uint32 *tpMatrix = new uint32[height*width]; extractMartrices(tif, tpMatrix, width, height, scanLineSize); videoVector.push_back(tpMatrix); TIFFClose(tif); } else{ std::cout<<"READING IN TIFF DIRECTORY FAILED AT TP = "<<numTimePoints<<std::endl; exit(-1); } } uint32* videoMatrix = new uint32[height*width*numTimePoints]; for(int r = 0; r < height*width; ++r){ for(int c = 0; c < numTimePoints; ++c){ videoMatrix[r*numTimePoints + c] = videoVector[c][r]; } } printf("width = %d::height = %d::numTimePoints = %d\nbaseName = %s\n", width, height, numTimePoints, baseName.c_str()); return videoMatrix; } int createKey(float* &mtx, bool* &key, unsigned int numTimePoints, unsigned long numPixels){ int nonZeroCounter = 0; int lastGoodIndex = 0; for (unsigned i = 0; i < numPixels; i++) { nonZeroCounter = 0; for (unsigned j = 0; j < numTimePoints; j++) { if (mtx[(numTimePoints * i) + j] != 0.0f) { nonZeroCounter++; break; } } if (nonZeroCounter != 0) { key[i] = true; lastGoodIndex++; } } return lastGoodIndex; } void createSpatialImages(std::string outDir, std::string firstTimePointLocation, std::string baseName, int k, unsigned int width, unsigned int height, float* W, bool* key, uint32 &samplesPerPixel, uint32 &bitsPerSample, uint32 &photo){ uint32 max = 0; uint32 min = UINT32_MAX; uint32 ***kMatrix = new uint32**[k]; for (int i = 0; i < k; ++i) { kMatrix[i] = new uint32*[height]; for (int ii = 0; ii < height; ++ii) { kMatrix[i][ii] = new uint32[width]; } } TIFF *tif = TIFFOpen(firstTimePointLocation.c_str(), "r"); if(tif){ tdata_t buf; tsize_t scanLineSize; uint32 row; std::vector<uint32 *> currentPlane; TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &samplesPerPixel); TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bitsPerSample); TIFFGetField(tif, TIFFTAG_PHOTOMETRIC, &photo); scanLineSize = TIFFScanlineSize(tif); buf = _TIFFmalloc(scanLineSize); for (row = 0; row < height; ++row) { if (TIFFReadScanline(tif, buf, row, 0) != -1) { for(int i = 0; i < k; ++i){ memcpy(kMatrix[i][row], buf, scanLineSize); } for (int col = 0; col < width; ++col) { if (kMatrix[0][row][col] > max) max = kMatrix[0][row][col]; if (kMatrix[0][row][col] < min) min = kMatrix[0][row][col]; } } else { std::cout << "ERROR READING SCANLINE" << std::endl; exit(-1); } } printf("first timepoint - (uint32) min = %d, max = %d\n",min,max); TIFFClose(tif); _TIFFfree(buf); } else{ std::cout<<"cannot open "<<firstTimePointLocation<<std::endl; exit(-1); } int largest = 0; float largestValue = 0.0f; float currentValue = 0.0f; for(int row = 0; row < height; ++row){ for (int col = 0; col < width; ++col) { for (int kFocus = 0; kFocus < k; kFocus++) { kMatrix[kFocus][row][col] -= min; } if(key[row*width + col]){ largest = 0; largestValue = 0.0f; currentValue = 0.0f; for (int kFocus = 0; kFocus < k; kFocus++) { currentValue = W[(row*width + col)*k + kFocus]; if (largestValue < currentValue) { largest = kFocus; largestValue = currentValue; } } kMatrix[largest][row][col] += (max - min) / 2; } } } for (int kFocus = 0; kFocus < k; ++kFocus) { std::string fileName = outDir + baseName + "_spacial_K" + std::to_string(k) + "_k" + std::to_string(kFocus) + ".tif"; TIFF *resultTif = TIFFOpen(fileName.c_str(), "w"); if (resultTif) { TIFFSetField(resultTif, TIFFTAG_IMAGEWIDTH, width); TIFFSetField(resultTif, TIFFTAG_IMAGELENGTH, height); TIFFSetField(resultTif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(resultTif, TIFFTAG_BITSPERSAMPLE, bitsPerSample); TIFFSetField(resultTif, TIFFTAG_PHOTOMETRIC, photo); for(uint32 row = 0; row < height; ++row){ if (TIFFWriteScanline(resultTif, kMatrix[kFocus][row], row, 0) != 1) { std::cout << "ERROR WRITING FIRST TIMEPOINT" << std::endl; exit(-1); } } std::cout<<fileName<<" has been created"<<std::endl; TIFFClose(resultTif); } else{ std::cout<<"COULD NOT OPEN TIF"<<std::endl; exit(-1); } } for (int i = 0; i < k; ++i) { for (int ii = 0; ii < height; ++ii) { delete[] kMatrix[i][ii]; } delete[] kMatrix[i]; } delete[] kMatrix; } void createKVideos(std::string outDir, std::string baseName, int k, unsigned int width, unsigned int height, unsigned int numTimePoints, float* W, float* H, bool* key, uint32 samplesPerPixel, uint32 bitsPerSample, uint32 photo){ float* wColDevice; float* hRowDevice; float* resultTransposeDevice; float* resultTranspose = new float[height*width*numTimePoints]; float* wCol = new float[height*width]; CudaSafeCall(hipMalloc((void**)&wColDevice, height*width*sizeof(float))); CudaSafeCall(hipMalloc((void**)&hRowDevice, numTimePoints*sizeof(float))); CudaSafeCall(hipMalloc((void**)&resultTransposeDevice, width*height*numTimePoints*sizeof(float))); dim3 grid = {1,1,1}; dim3 block = {1,1,1}; float greatestTPK = 0.0f; int signatureTimePoint; getFlatGridBlock(height*width*numTimePoints, grid, block); for(int kFocus = 0; kFocus < k; ++kFocus){ for(int w = 0; w < height*width; ++w){ wCol[w] = W[w*k + kFocus]; } CudaSafeCall(hipMemcpy(wColDevice, wCol, height*width*sizeof(float), hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(hRowDevice, H + (kFocus*numTimePoints), numTimePoints*sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( multiplyRowColumn), dim3(grid),dim3(block), 0, 0, wColDevice, hRowDevice, resultTransposeDevice, height*width, numTimePoints); CudaCheckError(); CudaSafeCall(hipMemcpy(resultTranspose, resultTransposeDevice, width*height*numTimePoints*sizeof(float), hipMemcpyDeviceToHost)); greatestTPK = 0.0f; for(int tp = 0; tp < numTimePoints; ++tp){ if(H[kFocus*numTimePoints + tp] > greatestTPK){ greatestTPK = H[kFocus*numTimePoints + tp]; signatureTimePoint = tp; } } std::string newTif = outDir + baseName + "_temporal_K" + std::to_string(k) + "_k" + std::to_string(kFocus) + "_tp" + createFourCharInt(signatureTimePoint) + ".tif"; TIFF *tpfTif = TIFFOpen(newTif.c_str(), "w"); if(tpfTif){ TIFFSetField(tpfTif, TIFFTAG_IMAGEWIDTH, width); TIFFSetField(tpfTif, TIFFTAG_IMAGELENGTH, height); TIFFSetField(tpfTif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(tpfTif, TIFFTAG_BITSPERSAMPLE, bitsPerSample); TIFFSetField(tpfTif, TIFFTAG_PHOTOMETRIC, photo); for(uint32 row = 0; row < height; ++row){ if(TIFFWriteScanline(tpfTif, resultTranspose + ((signatureTimePoint*width*height) + (row*width)), row, 0) != 1) { std::cout << "ERROR WRITING FIRST TIMEPOINT" << std::endl; exit(-1); } } TIFFClose(tpfTif); } else{ std::cout<<"COULD NOT CREATE "<<newTif<<std::endl; exit(-1); } std::cout<<newTif<<" has been created"<<std::endl; } CudaSafeCall(hipFree(wColDevice)); CudaSafeCall(hipFree(hRowDevice)); CudaSafeCall(hipFree(resultTransposeDevice)); delete[] wCol; delete[] resultTranspose; } void createVisualization(std::string videoDirectoryPath, int k, unsigned int width, unsigned int height, unsigned int numTimePoints, float* W, float* H, bool* key, std::string baseName){ std::string outDir = videoDirectoryPath + "out/"; if(mkdir(outDir.c_str(), 0777) == -1){ std::cout<<"CANNOT CREATE "<<outDir<<std::endl; } DIR* dir; std::string firstTimePointLocation = ""; if (NULL == (dir = opendir(videoDirectoryPath.c_str()))){ printf("Error : Failed to open input directory %s\n",videoDirectoryPath.c_str()); exit(-1); } struct dirent* in_file; std::string currentFileName = ""; while((in_file = readdir(dir)) != NULL){ if (in_file->d_name == "." || in_file->d_name == "..") continue; currentFileName = in_file->d_name; if(currentFileName.find("0000.tif") != std::string::npos){ firstTimePointLocation = videoDirectoryPath + currentFileName; break; } } closedir(dir); uint32 samplesPerPixel = 0, bitsPerSample = 0, photo = 0; createSpatialImages(outDir, firstTimePointLocation, baseName, k, width, height, W, key, samplesPerPixel, bitsPerSample, photo); createKVideos(outDir, baseName, k, width, height, numTimePoints, W, H, key, samplesPerPixel, bitsPerSample, photo); }
cf130e8a5d8668dcfa0b89b60a3bcc3f419d4f40.cu
#include "io_util.cuh" // Define this to turn on error checking #define CUDA_ERROR_CHECK #define CudaSafeCall(err) __cudaSafeCall(err, __FILE__, __LINE__) #define CudaCheckError() __cudaCheckError(__FILE__, __LINE__) inline void __cudaSafeCall(cudaError err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } // More careful checking. However, this will affect performance. // Comment away if needed. // err = cudaDeviceSynchronize(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } /* IO HELPERS */ void parseArgs(const int &numArgs, char** args, unsigned int &k, unsigned int &iterations, double &learningRate, double &threshold, float &sigmoidTuner, std::string &baseDirectory){ std::map<std::string, int> parameters; parameters["-k"] = 0; parameters["-i"] = 1; parameters["-l"] = 2; parameters["-t"] = 3; parameters["-s"] = 4; parameters["-h"] = 5; parameters["-help"] = 6; if(numArgs < 2){ std::cout << "Usage: ./exe <directory of timepoint tifs>\nfor help ./exe -h"<<std::endl; exit(-1); } else if (numArgs >= 2){ if(parameters[args[1]] < 5){ baseDirectory = args[1]; } else{ std::ifstream readme; std::string line = ""; readme.open("README.txt"); if(readme.is_open()){ while(getline(readme, line)){ std::cout << line << std::endl; } readme.close(); } exit(0); } for(int i = 2; i < numArgs; ++i) { switch(parameters[args[i]]){ case 0: k = std::stoi(args[++i]); break; case 1: iterations = std::stoi(args[++i]); break; case 2: learningRate = std::stod(args[++i]); break; case 3: threshold = std::stod(args[++i]); break; case 4: sigmoidTuner = std::stof(args[++i]); break; default: std::cout<<"Please use all arguments listed in documentation"<<std::endl; exit(-1); } } } } std::string createFourCharInt(int i) { std::string strInt; if (i < 10) { strInt = "000" + std::to_string(i); } else if (i < 100) { strInt = "00" + std::to_string(i); } else if (i < 1000) { strInt = "0" + std::to_string(i); } else { strInt = std::to_string(i); } return strInt; } void extractMartrices(TIFF *tif, uint32* &imageMatrix, unsigned int width, unsigned int height, unsigned int scanLineSize) { tdata_t buf; buf = _TIFFmalloc(scanLineSize); for (uint32 row = 0; row < height; row++) { if (TIFFReadScanline(tif, buf, row, 0) != -1) { memcpy(&imageMatrix[row * width], buf, scanLineSize); } else { std::cout << "ERROR READING SCANLINE" << std::endl; exit(-1); } } _TIFFfree(buf); } uint32* readTiffVideo(std::string videoDirectoryPath, unsigned int &width, unsigned int &height, unsigned int &numTimePoints, std::string &baseName){ DIR* dir; if (NULL == (dir = opendir(videoDirectoryPath.c_str()))){ printf("Error : Failed to open input directory %s\n",videoDirectoryPath.c_str()); exit(-1); } struct dirent* in_file; std::vector<uint32*> videoVector; unsigned int scanLineSize; std::cout<<"reading tif timepoint files from "<<videoDirectoryPath<<std::endl; std::vector<std::string> fileNames; while((in_file = readdir(dir)) != NULL){ std::string currentFileName = in_file->d_name; if (currentFileName == "." || currentFileName == ".." || currentFileName.length() < 5|| currentFileName.substr(currentFileName.length() - 3) != "tif") continue; //TODO check if it is a tif if (numTimePoints == 0) { baseName = currentFileName.substr(currentFileName.find_last_of("/") + 1, currentFileName.length() - 8); } currentFileName = videoDirectoryPath + currentFileName; fileNames.push_back(currentFileName); ++numTimePoints; } closedir(dir); std::sort(fileNames.begin(), fileNames.end()); for(int i = 0; i < numTimePoints; ++i){ TIFF *tif = TIFFOpen(fileNames[i].c_str(), "r"); if (tif) { if (i == 0) { TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height); scanLineSize = TIFFScanlineSize(tif); } uint32 *tpMatrix = new uint32[height*width]; extractMartrices(tif, tpMatrix, width, height, scanLineSize); videoVector.push_back(tpMatrix); TIFFClose(tif); } else{ std::cout<<"READING IN TIFF DIRECTORY FAILED AT TP = "<<numTimePoints<<std::endl; exit(-1); } } uint32* videoMatrix = new uint32[height*width*numTimePoints]; for(int r = 0; r < height*width; ++r){ for(int c = 0; c < numTimePoints; ++c){ videoMatrix[r*numTimePoints + c] = videoVector[c][r]; } } printf("width = %d::height = %d::numTimePoints = %d\nbaseName = %s\n", width, height, numTimePoints, baseName.c_str()); return videoMatrix; } int createKey(float* &mtx, bool* &key, unsigned int numTimePoints, unsigned long numPixels){ int nonZeroCounter = 0; int lastGoodIndex = 0; for (unsigned i = 0; i < numPixels; i++) { nonZeroCounter = 0; for (unsigned j = 0; j < numTimePoints; j++) { if (mtx[(numTimePoints * i) + j] != 0.0f) { nonZeroCounter++; break; } } if (nonZeroCounter != 0) { key[i] = true; lastGoodIndex++; } } return lastGoodIndex; } void createSpatialImages(std::string outDir, std::string firstTimePointLocation, std::string baseName, int k, unsigned int width, unsigned int height, float* W, bool* key, uint32 &samplesPerPixel, uint32 &bitsPerSample, uint32 &photo){ uint32 max = 0; uint32 min = UINT32_MAX; uint32 ***kMatrix = new uint32**[k]; for (int i = 0; i < k; ++i) { kMatrix[i] = new uint32*[height]; for (int ii = 0; ii < height; ++ii) { kMatrix[i][ii] = new uint32[width]; } } TIFF *tif = TIFFOpen(firstTimePointLocation.c_str(), "r"); if(tif){ tdata_t buf; tsize_t scanLineSize; uint32 row; std::vector<uint32 *> currentPlane; TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &samplesPerPixel); TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bitsPerSample); TIFFGetField(tif, TIFFTAG_PHOTOMETRIC, &photo); scanLineSize = TIFFScanlineSize(tif); buf = _TIFFmalloc(scanLineSize); for (row = 0; row < height; ++row) { if (TIFFReadScanline(tif, buf, row, 0) != -1) { for(int i = 0; i < k; ++i){ memcpy(kMatrix[i][row], buf, scanLineSize); } for (int col = 0; col < width; ++col) { if (kMatrix[0][row][col] > max) max = kMatrix[0][row][col]; if (kMatrix[0][row][col] < min) min = kMatrix[0][row][col]; } } else { std::cout << "ERROR READING SCANLINE" << std::endl; exit(-1); } } printf("first timepoint - (uint32) min = %d, max = %d\n",min,max); TIFFClose(tif); _TIFFfree(buf); } else{ std::cout<<"cannot open "<<firstTimePointLocation<<std::endl; exit(-1); } int largest = 0; float largestValue = 0.0f; float currentValue = 0.0f; for(int row = 0; row < height; ++row){ for (int col = 0; col < width; ++col) { for (int kFocus = 0; kFocus < k; kFocus++) { kMatrix[kFocus][row][col] -= min; } if(key[row*width + col]){ largest = 0; largestValue = 0.0f; currentValue = 0.0f; for (int kFocus = 0; kFocus < k; kFocus++) { currentValue = W[(row*width + col)*k + kFocus]; if (largestValue < currentValue) { largest = kFocus; largestValue = currentValue; } } kMatrix[largest][row][col] += (max - min) / 2; } } } for (int kFocus = 0; kFocus < k; ++kFocus) { std::string fileName = outDir + baseName + "_spacial_K" + std::to_string(k) + "_k" + std::to_string(kFocus) + ".tif"; TIFF *resultTif = TIFFOpen(fileName.c_str(), "w"); if (resultTif) { TIFFSetField(resultTif, TIFFTAG_IMAGEWIDTH, width); TIFFSetField(resultTif, TIFFTAG_IMAGELENGTH, height); TIFFSetField(resultTif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(resultTif, TIFFTAG_BITSPERSAMPLE, bitsPerSample); TIFFSetField(resultTif, TIFFTAG_PHOTOMETRIC, photo); for(uint32 row = 0; row < height; ++row){ if (TIFFWriteScanline(resultTif, kMatrix[kFocus][row], row, 0) != 1) { std::cout << "ERROR WRITING FIRST TIMEPOINT" << std::endl; exit(-1); } } std::cout<<fileName<<" has been created"<<std::endl; TIFFClose(resultTif); } else{ std::cout<<"COULD NOT OPEN TIF"<<std::endl; exit(-1); } } for (int i = 0; i < k; ++i) { for (int ii = 0; ii < height; ++ii) { delete[] kMatrix[i][ii]; } delete[] kMatrix[i]; } delete[] kMatrix; } void createKVideos(std::string outDir, std::string baseName, int k, unsigned int width, unsigned int height, unsigned int numTimePoints, float* W, float* H, bool* key, uint32 samplesPerPixel, uint32 bitsPerSample, uint32 photo){ float* wColDevice; float* hRowDevice; float* resultTransposeDevice; float* resultTranspose = new float[height*width*numTimePoints]; float* wCol = new float[height*width]; CudaSafeCall(cudaMalloc((void**)&wColDevice, height*width*sizeof(float))); CudaSafeCall(cudaMalloc((void**)&hRowDevice, numTimePoints*sizeof(float))); CudaSafeCall(cudaMalloc((void**)&resultTransposeDevice, width*height*numTimePoints*sizeof(float))); dim3 grid = {1,1,1}; dim3 block = {1,1,1}; float greatestTPK = 0.0f; int signatureTimePoint; getFlatGridBlock(height*width*numTimePoints, grid, block); for(int kFocus = 0; kFocus < k; ++kFocus){ for(int w = 0; w < height*width; ++w){ wCol[w] = W[w*k + kFocus]; } CudaSafeCall(cudaMemcpy(wColDevice, wCol, height*width*sizeof(float), cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(hRowDevice, H + (kFocus*numTimePoints), numTimePoints*sizeof(float), cudaMemcpyHostToDevice)); multiplyRowColumn<<<grid,block>>>(wColDevice, hRowDevice, resultTransposeDevice, height*width, numTimePoints); CudaCheckError(); CudaSafeCall(cudaMemcpy(resultTranspose, resultTransposeDevice, width*height*numTimePoints*sizeof(float), cudaMemcpyDeviceToHost)); greatestTPK = 0.0f; for(int tp = 0; tp < numTimePoints; ++tp){ if(H[kFocus*numTimePoints + tp] > greatestTPK){ greatestTPK = H[kFocus*numTimePoints + tp]; signatureTimePoint = tp; } } std::string newTif = outDir + baseName + "_temporal_K" + std::to_string(k) + "_k" + std::to_string(kFocus) + "_tp" + createFourCharInt(signatureTimePoint) + ".tif"; TIFF *tpfTif = TIFFOpen(newTif.c_str(), "w"); if(tpfTif){ TIFFSetField(tpfTif, TIFFTAG_IMAGEWIDTH, width); TIFFSetField(tpfTif, TIFFTAG_IMAGELENGTH, height); TIFFSetField(tpfTif, TIFFTAG_SAMPLESPERPIXEL, samplesPerPixel); TIFFSetField(tpfTif, TIFFTAG_BITSPERSAMPLE, bitsPerSample); TIFFSetField(tpfTif, TIFFTAG_PHOTOMETRIC, photo); for(uint32 row = 0; row < height; ++row){ if(TIFFWriteScanline(tpfTif, resultTranspose + ((signatureTimePoint*width*height) + (row*width)), row, 0) != 1) { std::cout << "ERROR WRITING FIRST TIMEPOINT" << std::endl; exit(-1); } } TIFFClose(tpfTif); } else{ std::cout<<"COULD NOT CREATE "<<newTif<<std::endl; exit(-1); } std::cout<<newTif<<" has been created"<<std::endl; } CudaSafeCall(cudaFree(wColDevice)); CudaSafeCall(cudaFree(hRowDevice)); CudaSafeCall(cudaFree(resultTransposeDevice)); delete[] wCol; delete[] resultTranspose; } void createVisualization(std::string videoDirectoryPath, int k, unsigned int width, unsigned int height, unsigned int numTimePoints, float* W, float* H, bool* key, std::string baseName){ std::string outDir = videoDirectoryPath + "out/"; if(mkdir(outDir.c_str(), 0777) == -1){ std::cout<<"CANNOT CREATE "<<outDir<<std::endl; } DIR* dir; std::string firstTimePointLocation = ""; if (NULL == (dir = opendir(videoDirectoryPath.c_str()))){ printf("Error : Failed to open input directory %s\n",videoDirectoryPath.c_str()); exit(-1); } struct dirent* in_file; std::string currentFileName = ""; while((in_file = readdir(dir)) != NULL){ if (in_file->d_name == "." || in_file->d_name == "..") continue; currentFileName = in_file->d_name; if(currentFileName.find("0000.tif") != std::string::npos){ firstTimePointLocation = videoDirectoryPath + currentFileName; break; } } closedir(dir); uint32 samplesPerPixel = 0, bitsPerSample = 0, photo = 0; createSpatialImages(outDir, firstTimePointLocation, baseName, k, width, height, W, key, samplesPerPixel, bitsPerSample, photo); createKVideos(outDir, baseName, k, width, height, numTimePoints, W, H, key, samplesPerPixel, bitsPerSample, photo); }
7e062f5773494ab50d254987d3dea04a9cc2bab1.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void atan2_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::atan2(a, b); }); }); } void bitwise_xor_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { // Boolean type does not work with ^ (bitwise XOR) in C++. bitwise_xor wraps this operation for both Boolean and // integral types. gpu_kernel_with_scalars( iter, []GPU_LAMBDA(bool a, bool b) { return a != b; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_xor_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a ^ b; }); }); } } void logical_and_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_and_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a && b; }); }); } void logical_or_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_or_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a || b; }); }); } void logical_xor_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_xor_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return bool(a) != bool(b); }); }); } void smooth_l1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto z = fabs(a - b); return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5); }); }); } void sigmoid_backward_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t(1.) - b) * b; }); }); } void mse_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto diff = a - b; return diff * diff; }); }); } REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda); REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda); REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda); REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda); REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda); REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda); REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda); REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda); }} // namespace at::native
7e062f5773494ab50d254987d3dea04a9cc2bab1.cu
#include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void atan2_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::atan2(a, b); }); }); } void bitwise_xor_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { // Boolean type does not work with ^ (bitwise XOR) in C++. bitwise_xor wraps this operation for both Boolean and // integral types. gpu_kernel_with_scalars( iter, []GPU_LAMBDA(bool a, bool b) { return a != b; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_xor_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a ^ b; }); }); } } void logical_and_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_and_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a && b; }); }); } void logical_or_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_or_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return a || b; }); }); } void logical_xor_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_xor_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return bool(a) != bool(b); }); }); } void smooth_l1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto z = fabs(a - b); return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5); }); }); } void sigmoid_backward_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t(1.) - b) * b; }); }); } void mse_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto diff = a - b; return diff * diff; }); }); } REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda); REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda); REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda); REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda); REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda); REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda); REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda); REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda); }} // namespace at::native
5011a9dbbb7ad5fe7da531aac8937096b37c60cd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <iostream> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include <time.h> // 3*4 // // a11 a12 a13 a14 // a21 a22 a23 a24 // a31 a32 a33 a34 // // a11 a12 a13 a14 a21 a22 a23 a24 a31 a32 a33 a34 // // // cuBLAS // // a11 a21 a31 a12 a22 a32 a13 a23 a33 a14 a24 a34 // . // // (,column) // cuBLAS (, row) . // #define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column using namespace std; bool ChoseGpuAvailable(int n) { int devicesCount; hipGetDeviceCount(&devicesCount); cout<<"devicesCount : "<<devicesCount<<endl; for(int i = 0 ; i < devicesCount ; i++) { hipDeviceProp_t deviceProperties; hipGetDeviceProperties(&deviceProperties,i); cout<<"----- device "<<i<<" -----"<<endl; cout<<"device name : "<<deviceProperties.name<<endl; cout<<"maxThreadsPerBlock : "<<deviceProperties.maxThreadsPerBlock<<endl; cout<<"warpSize : "<<deviceProperties.warpSize<<endl; } if(n > devicesCount && n < 0) return false; else { hipSetDevice(n); return true; } } int main(int argc, char** argv) { unsigned int m = 6; //row unsigned int n = 4; //column unsigned int k = 5; float *matrix1, *matrix2, *matrix3; clock_t t; int host2device_time, device2host_time,GPU_time; matrix1 = new float[m*k]; matrix2 = new float[k*n]; matrix3 = new float[m*n]; hipError_t cudaStat; hipblasStatus_t stat; hipblasHandle_t handle; hiprandGenerator_t gen; hiprandCreateGenerator(&gen,HIPRAND_RNG_PSEUDO_DEFAULT); ChoseGpuAvailable(1); // for(int x = 0 ; x < k ; x++) { for(int y = 0 ; y < m ; y++) { matrix1[IDX2C(y,x,m)] = 0.0; } } for(int x = 0 ; x < n ; x++) { for(int y = 0 ; y < k ; y++) { matrix2[IDX2C(y,x,k)] = 0.0; } } for(int x = 0 ; x < n ; x++) { for(int y = 0 ; y < m ; y++) { matrix3[IDX2C(y,x,m)] = 0.0; } } /* cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < k ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"maxtrix2"<<endl; for(int y = 0 ; y < k ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix2[IDX2C(y,x,k)]<<" "; } cout<<endl; } cout<<"maxtrix3"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix3[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<endl; */ //cuda float *d_matrix1, *d_matrix2, *d_matrix3; hipMalloc(&d_matrix1,m*k*sizeof(float)); hipMalloc(&d_matrix2,k*n*sizeof(float)); hipMalloc(&d_matrix3,m*n*sizeof(float)); // memory -> cuda memory t = clock(); hipblasCreate(&handle); hipblasSetMatrix(m,k,sizeof(float),matrix1,m,d_matrix1,m); hipblasSetMatrix(k,n,sizeof(float),matrix2,k,d_matrix2,k); hipblasSetMatrix(m,n,sizeof(float),matrix3,m,d_matrix3,m); host2device_time = clock()-t; // ( ) float al=1.0f; float bet=0.0f; t = clock(); //stat = hipblasSgemv(handle, HIPBLAS_OP_N,m,n, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2,1); // // (0,0) //3,4 . // //CUBLAS_OP_N //CUBLAS_OP_T transpose. // // stat = hipblasSgemm(handle, HIPBLAS_OP_T,HIPBLAS_OP_N,m-4,n-2,k-3, &al, d_matrix1+m,m, d_matrix2,k, &bet, d_matrix3,m); //hiprandGenerateNormal(gen,d_matrix1,m*k,0,0.1); hiprandGenerateUniform(gen,d_matrix1,m*k); hiprandGenerateUniform(gen,d_matrix2,k*n); hiprandGenerateUniform(gen,d_matrix3,m*n); stat = hipblasSgemm(handle, HIPBLAS_OP_T,HIPBLAS_OP_N,m-4,n-2,k-3, &al, d_matrix1+m,m, d_matrix2,k, &bet, d_matrix3,m); hiprandGenerateUniform(gen,d_matrix1,m*k); hiprandGenerateUniform(gen,d_matrix2,k*n); hiprandGenerateUniform(gen,d_matrix3,m*n); GPU_time = clock() - t; //cuda memory -> memory t= clock(); hipblasGetMatrix(m,k,sizeof(float),d_matrix1,m,matrix1,m); hipblasGetMatrix(k,n,sizeof(float),d_matrix2,k,matrix2,k); hipblasGetMatrix(m,n,sizeof(float),d_matrix3,m,matrix3,m); device2host_time = clock() - t; // cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < k ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"maxtrix2"<<endl; for(int y = 0 ; y < k ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix2[IDX2C(y,x,k)]<<" "; } cout<<endl; } cout<<"maxtrix3"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix3[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"host to device time : "<<host2device_time<<endl; cout<<"GPU time : "<<GPU_time<<endl; cout<<"device to host time : "<<device2host_time<<endl; //cuda hipFree(d_matrix1); hipFree(d_matrix2); hipFree(d_matrix3); hipblasDestroy(handle); hiprandDestroyGenerator(gen); delete matrix1; delete matrix2; delete matrix3; return 0; }
5011a9dbbb7ad5fe7da531aac8937096b37c60cd.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda_runtime.h> #include <cublas_v2.h> #include <curand.h> #include <time.h> //보통 행렬은 3*4 행렬이면 // // a11 a12 a13 a14 // a21 a22 a23 a24 // a31 a32 a33 a34 // // a11 a12 a13 a14 a21 a22 a23 a24 a31 a32 a33 a34 // 위와 같이 저장하지만 // // cuBLAS에서는 // // a11 a21 a31 a12 a22 a32 a13 a23 a33 a14 a24 a34 // 위와 같이 저장된다. // // 보통 열(세로,column)의 수 기준으로 저장하지만 // cuBLAS에서는 행(가로, row)의 수 기준으로 저장한다. // #define IDX2C(i,j,Id) (((j)*(Id))+(i)) // j -> row, i -> column using namespace std; bool ChoseGpuAvailable(int n) { int devicesCount; cudaGetDeviceCount(&devicesCount); cout<<"devicesCount : "<<devicesCount<<endl; for(int i = 0 ; i < devicesCount ; i++) { cudaDeviceProp deviceProperties; cudaGetDeviceProperties(&deviceProperties,i); cout<<"----- device "<<i<<" -----"<<endl; cout<<"device name : "<<deviceProperties.name<<endl; cout<<"maxThreadsPerBlock : "<<deviceProperties.maxThreadsPerBlock<<endl; cout<<"warpSize : "<<deviceProperties.warpSize<<endl; } if(n > devicesCount && n < 0) return false; else { cudaSetDevice(n); return true; } } int main(int argc, char** argv) { unsigned int m = 6; //row unsigned int n = 4; //column unsigned int k = 5; float *matrix1, *matrix2, *matrix3; clock_t t; int host2device_time, device2host_time,GPU_time; matrix1 = new float[m*k]; matrix2 = new float[k*n]; matrix3 = new float[m*n]; cudaError_t cudaStat; cublasStatus_t stat; cublasHandle_t handle; curandGenerator_t gen; curandCreateGenerator(&gen,CURAND_RNG_PSEUDO_DEFAULT); ChoseGpuAvailable(1); //데이터 초기화 for(int x = 0 ; x < k ; x++) { for(int y = 0 ; y < m ; y++) { matrix1[IDX2C(y,x,m)] = 0.0; } } for(int x = 0 ; x < n ; x++) { for(int y = 0 ; y < k ; y++) { matrix2[IDX2C(y,x,k)] = 0.0; } } for(int x = 0 ; x < n ; x++) { for(int y = 0 ; y < m ; y++) { matrix3[IDX2C(y,x,m)] = 0.0; } } /* cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < k ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"maxtrix2"<<endl; for(int y = 0 ; y < k ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix2[IDX2C(y,x,k)]<<" "; } cout<<endl; } cout<<"maxtrix3"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix3[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<endl; */ //cuda 메모리 할당 float *d_matrix1, *d_matrix2, *d_matrix3; cudaMalloc(&d_matrix1,m*k*sizeof(float)); cudaMalloc(&d_matrix2,k*n*sizeof(float)); cudaMalloc(&d_matrix3,m*n*sizeof(float)); // memory -> cuda memory t = clock(); cublasCreate(&handle); cublasSetMatrix(m,k,sizeof(float),matrix1,m,d_matrix1,m); cublasSetMatrix(k,n,sizeof(float),matrix2,k,d_matrix2,k); cublasSetMatrix(m,n,sizeof(float),matrix3,m,d_matrix3,m); host2device_time = clock()-t; // 연산 (커널 실행) float al=1.0f; float bet=0.0f; t = clock(); //stat = cublasSgemv(handle, CUBLAS_OP_N,m,n, &al, d_matrix1,m, d_vector1,1, &bet, d_vector2,1); // //먼저 주소 값의 바꿔주면서 행렬의 (0,0)의 위치를 바꿔주고 //3,4번째 파라메터로 행렬의 최종 크기를 정해준다. // //CUBLAS_OP_N은 아무것도 안한것 //CUBLAS_OP_T는 transpose한것이다. // // stat = cublasSgemm(handle, CUBLAS_OP_T,CUBLAS_OP_N,m-4,n-2,k-3, &al, d_matrix1+m,m, d_matrix2,k, &bet, d_matrix3,m); //curandGenerateNormal(gen,d_matrix1,m*k,0,0.1); curandGenerateUniform(gen,d_matrix1,m*k); curandGenerateUniform(gen,d_matrix2,k*n); curandGenerateUniform(gen,d_matrix3,m*n); stat = cublasSgemm(handle, CUBLAS_OP_T,CUBLAS_OP_N,m-4,n-2,k-3, &al, d_matrix1+m,m, d_matrix2,k, &bet, d_matrix3,m); curandGenerateUniform(gen,d_matrix1,m*k); curandGenerateUniform(gen,d_matrix2,k*n); curandGenerateUniform(gen,d_matrix3,m*n); GPU_time = clock() - t; //cuda memory -> memory t= clock(); cublasGetMatrix(m,k,sizeof(float),d_matrix1,m,matrix1,m); cublasGetMatrix(k,n,sizeof(float),d_matrix2,k,matrix2,k); cublasGetMatrix(m,n,sizeof(float),d_matrix3,m,matrix3,m); device2host_time = clock() - t; //결과 확인 cout<<"maxtrix1"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < k ; x++) { cout<<matrix1[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"maxtrix2"<<endl; for(int y = 0 ; y < k ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix2[IDX2C(y,x,k)]<<" "; } cout<<endl; } cout<<"maxtrix3"<<endl; for(int y = 0 ; y < m ; y++) { for(int x = 0 ; x < n ; x++) { cout<<matrix3[IDX2C(y,x,m)]<<" "; } cout<<endl; } cout<<"host to device time : "<<host2device_time<<endl; cout<<"GPU time : "<<GPU_time<<endl; cout<<"device to host time : "<<device2host_time<<endl; //cuda 메모리 해제 cudaFree(d_matrix1); cudaFree(d_matrix2); cudaFree(d_matrix3); cublasDestroy(handle); curandDestroyGenerator(gen); delete matrix1; delete matrix2; delete matrix3; return 0; }
6b42edb16feb973dcd12eec5e1c47097cb6ed7ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <iostream> //Thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/execution_policy.h> #include "hip_helpers.cuh" typedef uint8_t U8; typedef uint32_t U32; using namespace thrust; //Pack 4 U8's into an int void pack(device_vector<U8>& data, device_vector<int>& keys){ U8 *data_r = raw(data); int *keys_r = raw(keys); auto r = counting_iterator<int>(0); int n = keys.size(); for_each(r, r + n, [=] __device__(int i) { int packed = data_r[i]; packed <<= 8; if (i + 1 < n) packed |= data_r[i+1]; packed <<= 8; if (i + 2 < n) packed |= data_r[i + 2]; packed <<= 8; if (i + 3 < n) packed |= data_r[i + 3]; keys_r[i] = packed; }); } void mark_head(device_vector<int>& keys, device_vector<U8>& buckets){ int *keys_r = raw(keys); U8 *bucket_r = raw(buckets); auto r = counting_iterator<int>(0); int n = keys.size(); for_each(r, r + n, [=] __device__(int i) { //Already marked - don't need to do anything if (bucket_r[i] == 1){ return; } //First item is always head else if (i == 0){ bucket_r[i] = 1; } //Is different than previous item - must be a bucket head else if (keys_r[i] != keys_r[i - 1]){ bucket_r[i] = 1; } }); } void get_rank(device_vector<U8>& buckets, device_vector<int>& b_scan, device_vector<int>& rank, device_vector<int>& sa){ //Scan bucket heads //Copy buckets into rank before we scan it into b_scan - scanning 8 bit types creates problems copy(buckets.begin(), buckets.end(), rank.begin()); inclusive_scan(rank.begin(), rank.end(), b_scan.begin()); //Calculate rank - stores rank inverse to the suffix array // e.g. rank[3] stores the bucket position of sa[?] = 3 int *rank_r = raw(rank); int *sa_r = raw(sa); int *b_scan_r = raw(b_scan); auto r = counting_iterator<int>(0); int n = sa.size(); for_each(r, r + n, [=] __device__(int i) { int suffix = sa_r[i]; rank_r[suffix] = b_scan_r[i]; }); } void get_sort_keys(device_vector<int>& keys, device_vector<int>& rank, device_vector<int>& sa, device_vector<U8>& buckets, int step){ int *rank_r = raw(rank); int *sa_r = raw(sa); int *keys_r = raw(keys); U8 *buckets_r = raw(buckets); auto r = counting_iterator<int>(0); int n = keys.size(); for_each(r, r + n, [=] __device__(int i) { //Check if already sorted //If is last item - just need to check its flag if (buckets_r[i] == 1 && i == n - 1) return; //Otherwise, if the current item and its next item are flagged, current item must be already sorted else if (buckets_r[i] == 1 && buckets_r[i + 1]) return; //Set sort keys int next_suffix = sa_r[i] + step; //Went off end of string - must be lexicographically less than rest of bucket if (next_suffix >= n) keys_r[i] = -next_suffix; //Else set sort key to rank of next suffix else keys_r[i] = rank_r[next_suffix]; }); } //We have to do a 2 pass sort here to get a "segmented sort" void sort_sa(device_vector<int>& keys, device_vector<int>& b_scan, device_vector<int>& sa){ stable_sort_by_key(keys.begin(), keys.end(), make_zip_iterator(make_tuple(sa.begin(), b_scan.begin()))); stable_sort_by_key(b_scan.begin(), b_scan.end(), make_zip_iterator(make_tuple(sa.begin(), keys.begin()))); } int device_sa(const unsigned char *data_in, int *sa_in, int n){ try{ //Copy up to device vectors device_vector<U8> data(data_in, data_in + n); device_vector<int> sa(n); //Init suffix array sequence(sa.begin(), sa.end()); device_vector<int> keys(n); //Sort keys device_vector<U8> buckets(n, 0); //Bucket head flags device_vector<int> b_scan(n); //Scanned head flags device_vector<int> rank(n); //Rank of suffixes //Pack 4 bytes into keys so we can radix sort to H order 4 before prefix doubling pack(data, keys); //Radix sort as unsigned //We have to cast keys to a raw pointer then to a device_ptr to convince thrust its unsigned unsigned int *keys_r = (unsigned int*)raw(keys); device_ptr<unsigned int> keys_ptr(keys_r); stable_sort_by_key(keys_ptr, keys_ptr + n, sa.begin()); int step = 4; //Begin prefix doubling loop - runs at most log(n) times while (true){ //Mark bucket heads mark_head(keys, buckets); //Check if we are done, i.e. every item is a bucket head int result = reduce(buckets.begin(), buckets.end(), INT_MAX, minimum<int>()); if (result == 1) break; //Get rank of suffixes get_rank(buckets, b_scan, rank, sa); //Use rank as new sort keys get_sort_keys(keys, rank, sa, buckets, step); //Sort sort_sa(keys, b_scan, sa); /* std::cout << "-----\n"; print("SA", sa); print("Keys", keys); print("Buckets", buckets); print("rank", rank); std::cout << "-----\n"; */ step *= 2; //Just in case, check for infinite loop if (step < 0){ std::cout << "Error: Prefix doubling infinite loop.\n"; return 1; } } //Copy SA back to host safe_cuda(hipMemcpy(sa_in, raw(sa), sizeof(int)*sa.size(), hipMemcpyDeviceToHost)); } catch (thrust::system_error &e) { std::cerr << "CUDA error: " << e.what() << std::endl; } return 0; }
6b42edb16feb973dcd12eec5e1c47097cb6ed7ea.cu
#include "cuda_runtime.h" #include <stdint.h> #include <iostream> //Thrust #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/execution_policy.h> #include "cuda_helpers.cuh" typedef uint8_t U8; typedef uint32_t U32; using namespace thrust; //Pack 4 U8's into an int void pack(device_vector<U8>& data, device_vector<int>& keys){ U8 *data_r = raw(data); int *keys_r = raw(keys); auto r = counting_iterator<int>(0); int n = keys.size(); for_each(r, r + n, [=] __device__(int i) { int packed = data_r[i]; packed <<= 8; if (i + 1 < n) packed |= data_r[i+1]; packed <<= 8; if (i + 2 < n) packed |= data_r[i + 2]; packed <<= 8; if (i + 3 < n) packed |= data_r[i + 3]; keys_r[i] = packed; }); } void mark_head(device_vector<int>& keys, device_vector<U8>& buckets){ int *keys_r = raw(keys); U8 *bucket_r = raw(buckets); auto r = counting_iterator<int>(0); int n = keys.size(); for_each(r, r + n, [=] __device__(int i) { //Already marked - don't need to do anything if (bucket_r[i] == 1){ return; } //First item is always head else if (i == 0){ bucket_r[i] = 1; } //Is different than previous item - must be a bucket head else if (keys_r[i] != keys_r[i - 1]){ bucket_r[i] = 1; } }); } void get_rank(device_vector<U8>& buckets, device_vector<int>& b_scan, device_vector<int>& rank, device_vector<int>& sa){ //Scan bucket heads //Copy buckets into rank before we scan it into b_scan - scanning 8 bit types creates problems copy(buckets.begin(), buckets.end(), rank.begin()); inclusive_scan(rank.begin(), rank.end(), b_scan.begin()); //Calculate rank - stores rank inverse to the suffix array // e.g. rank[3] stores the bucket position of sa[?] = 3 int *rank_r = raw(rank); int *sa_r = raw(sa); int *b_scan_r = raw(b_scan); auto r = counting_iterator<int>(0); int n = sa.size(); for_each(r, r + n, [=] __device__(int i) { int suffix = sa_r[i]; rank_r[suffix] = b_scan_r[i]; }); } void get_sort_keys(device_vector<int>& keys, device_vector<int>& rank, device_vector<int>& sa, device_vector<U8>& buckets, int step){ int *rank_r = raw(rank); int *sa_r = raw(sa); int *keys_r = raw(keys); U8 *buckets_r = raw(buckets); auto r = counting_iterator<int>(0); int n = keys.size(); for_each(r, r + n, [=] __device__(int i) { //Check if already sorted //If is last item - just need to check its flag if (buckets_r[i] == 1 && i == n - 1) return; //Otherwise, if the current item and its next item are flagged, current item must be already sorted else if (buckets_r[i] == 1 && buckets_r[i + 1]) return; //Set sort keys int next_suffix = sa_r[i] + step; //Went off end of string - must be lexicographically less than rest of bucket if (next_suffix >= n) keys_r[i] = -next_suffix; //Else set sort key to rank of next suffix else keys_r[i] = rank_r[next_suffix]; }); } //We have to do a 2 pass sort here to get a "segmented sort" void sort_sa(device_vector<int>& keys, device_vector<int>& b_scan, device_vector<int>& sa){ stable_sort_by_key(keys.begin(), keys.end(), make_zip_iterator(make_tuple(sa.begin(), b_scan.begin()))); stable_sort_by_key(b_scan.begin(), b_scan.end(), make_zip_iterator(make_tuple(sa.begin(), keys.begin()))); } int device_sa(const unsigned char *data_in, int *sa_in, int n){ try{ //Copy up to device vectors device_vector<U8> data(data_in, data_in + n); device_vector<int> sa(n); //Init suffix array sequence(sa.begin(), sa.end()); device_vector<int> keys(n); //Sort keys device_vector<U8> buckets(n, 0); //Bucket head flags device_vector<int> b_scan(n); //Scanned head flags device_vector<int> rank(n); //Rank of suffixes //Pack 4 bytes into keys so we can radix sort to H order 4 before prefix doubling pack(data, keys); //Radix sort as unsigned //We have to cast keys to a raw pointer then to a device_ptr to convince thrust its unsigned unsigned int *keys_r = (unsigned int*)raw(keys); device_ptr<unsigned int> keys_ptr(keys_r); stable_sort_by_key(keys_ptr, keys_ptr + n, sa.begin()); int step = 4; //Begin prefix doubling loop - runs at most log(n) times while (true){ //Mark bucket heads mark_head(keys, buckets); //Check if we are done, i.e. every item is a bucket head int result = reduce(buckets.begin(), buckets.end(), INT_MAX, minimum<int>()); if (result == 1) break; //Get rank of suffixes get_rank(buckets, b_scan, rank, sa); //Use rank as new sort keys get_sort_keys(keys, rank, sa, buckets, step); //Sort sort_sa(keys, b_scan, sa); /* std::cout << "-----\n"; print("SA", sa); print("Keys", keys); print("Buckets", buckets); print("rank", rank); std::cout << "-----\n"; */ step *= 2; //Just in case, check for infinite loop if (step < 0){ std::cout << "Error: Prefix doubling infinite loop.\n"; return 1; } } //Copy SA back to host safe_cuda(cudaMemcpy(sa_in, raw(sa), sizeof(int)*sa.size(), cudaMemcpyDeviceToHost)); } catch (thrust::system_error &e) { std::cerr << "CUDA error: " << e.what() << std::endl; } return 0; }
690d8feee60559dfb5497f288e6936f1c71c27cc.hip
// !!! This is a file automatically generated by hipify!!! using namespace libff; void fprint_fq(FILE* stream, Fq<mnt4753_pp> x); void write_mnt4_fq(FILE* output, Fq<mnt4753_pp> x); void write_mnt6_fq(FILE* output, Fq<mnt6753_pp> x); void write_mnt4_fq2(FILE* output, Fqe<mnt4753_pp> x); Fq<mnt4753_pp> read_mnt4_fq(FILE* input); Fq<mnt6753_pp> read_mnt6_fq(FILE* input); Fqe<mnt4753_pp> read_mnt4_fq2(FILE* input); struct delete_ptr { // Helper function to ease cleanup of container template <typename P> void operator () (P p) { delete p; } }; struct delete_ptr_gpu { // Helper function to ease cleanup of container template <typename P> void operator () (P p) { hipFree(p); } }; uint8_t* read_mnt_fq_2(FILE* inputs); uint8_t* read_mnt_fq_2_gpu(FILE* inputs); Fq<mnt4753_pp> to_fq(uint8_t* data); bool check(uint8_t* a, uint8_t* b, int num); void fprint_uint8_array(FILE* stream, uint8_t* array, int size); void fprint_fq(FILE* stream, Fq<mnt4753_pp> x) { int size = libff::mnt4753_q_limbs * sizeof(mp_size_t); uint8_t* array = (uint8_t*) x.mont_repr.data; for (int i = 0; i < size; i ++) { fprintf(stream, "%02x", array[i]); } fprintf(stream, "\n"); } void write_mnt4_fq(FILE* output, Fq<mnt4753_pp> x) { fwrite((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, output); } void write_mnt6_fq(FILE* output, Fq<mnt6753_pp> x) { fwrite((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, output); } void write_mnt4_fq2(FILE* output, Fqe<mnt4753_pp> x) { write_mnt4_fq(output, x.c0); write_mnt4_fq(output, x.c1); } Fq<mnt4753_pp> read_mnt4_fq(FILE* input) { Fq<mnt4753_pp> x; fread((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, input); return x; } Fq<mnt6753_pp> read_mnt6_fq(FILE* input) { Fq<mnt6753_pp> x; fread((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, input); return x; } Fqe<mnt4753_pp> read_mnt4_fq2(FILE* input) { Fq<mnt4753_pp> c0 = read_mnt4_fq(input); Fq<mnt4753_pp> c1 = read_mnt4_fq(input); return Fqe<mnt4753_pp>(c0, c1); } uint8_t* read_mnt_fq_2(FILE* inputs) { uint8_t* buf = (uint8_t*)calloc(bytes_per_elem, sizeof(uint8_t)); // the input is montgomery representation x * 2^768 whereas cuda-fixnum expects x * 2^1024 so we shift over by (1024-768)/8 bytes fread((void*)buf, io_bytes_per_elem*sizeof(uint8_t), 1, inputs); return buf; } bool check(uint8_t* a, uint8_t* b, int num) { return memcmp(a, b, num * sizeof(uint8_t)); } void fprint_uint8_array(FILE* stream, uint8_t* array, int size) { for (int i = 0; i < size; i ++) { fprintf(stream, "%02x", array[i]); } fprintf(stream, "\n"); } Fq<mnt4753_pp> to_fq(uint8_t* data) { Fq<mnt4753_pp> x; memcpy((void *) x.mont_repr.data, data, libff::mnt4753_q_limbs * sizeof(mp_size_t)); return x; }
690d8feee60559dfb5497f288e6936f1c71c27cc.cu
using namespace libff; void fprint_fq(FILE* stream, Fq<mnt4753_pp> x); void write_mnt4_fq(FILE* output, Fq<mnt4753_pp> x); void write_mnt6_fq(FILE* output, Fq<mnt6753_pp> x); void write_mnt4_fq2(FILE* output, Fqe<mnt4753_pp> x); Fq<mnt4753_pp> read_mnt4_fq(FILE* input); Fq<mnt6753_pp> read_mnt6_fq(FILE* input); Fqe<mnt4753_pp> read_mnt4_fq2(FILE* input); struct delete_ptr { // Helper function to ease cleanup of container template <typename P> void operator () (P p) { delete p; } }; struct delete_ptr_gpu { // Helper function to ease cleanup of container template <typename P> void operator () (P p) { cudaFree(p); } }; uint8_t* read_mnt_fq_2(FILE* inputs); uint8_t* read_mnt_fq_2_gpu(FILE* inputs); Fq<mnt4753_pp> to_fq(uint8_t* data); bool check(uint8_t* a, uint8_t* b, int num); void fprint_uint8_array(FILE* stream, uint8_t* array, int size); void fprint_fq(FILE* stream, Fq<mnt4753_pp> x) { int size = libff::mnt4753_q_limbs * sizeof(mp_size_t); uint8_t* array = (uint8_t*) x.mont_repr.data; for (int i = 0; i < size; i ++) { fprintf(stream, "%02x", array[i]); } fprintf(stream, "\n"); } void write_mnt4_fq(FILE* output, Fq<mnt4753_pp> x) { fwrite((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, output); } void write_mnt6_fq(FILE* output, Fq<mnt6753_pp> x) { fwrite((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, output); } void write_mnt4_fq2(FILE* output, Fqe<mnt4753_pp> x) { write_mnt4_fq(output, x.c0); write_mnt4_fq(output, x.c1); } Fq<mnt4753_pp> read_mnt4_fq(FILE* input) { Fq<mnt4753_pp> x; fread((void *) x.mont_repr.data, libff::mnt4753_q_limbs * sizeof(mp_size_t), 1, input); return x; } Fq<mnt6753_pp> read_mnt6_fq(FILE* input) { Fq<mnt6753_pp> x; fread((void *) x.mont_repr.data, libff::mnt6753_q_limbs * sizeof(mp_size_t), 1, input); return x; } Fqe<mnt4753_pp> read_mnt4_fq2(FILE* input) { Fq<mnt4753_pp> c0 = read_mnt4_fq(input); Fq<mnt4753_pp> c1 = read_mnt4_fq(input); return Fqe<mnt4753_pp>(c0, c1); } uint8_t* read_mnt_fq_2(FILE* inputs) { uint8_t* buf = (uint8_t*)calloc(bytes_per_elem, sizeof(uint8_t)); // the input is montgomery representation x * 2^768 whereas cuda-fixnum expects x * 2^1024 so we shift over by (1024-768)/8 bytes fread((void*)buf, io_bytes_per_elem*sizeof(uint8_t), 1, inputs); return buf; } bool check(uint8_t* a, uint8_t* b, int num) { return memcmp(a, b, num * sizeof(uint8_t)); } void fprint_uint8_array(FILE* stream, uint8_t* array, int size) { for (int i = 0; i < size; i ++) { fprintf(stream, "%02x", array[i]); } fprintf(stream, "\n"); } Fq<mnt4753_pp> to_fq(uint8_t* data) { Fq<mnt4753_pp> x; memcpy((void *) x.mont_repr.data, data, libff::mnt4753_q_limbs * sizeof(mp_size_t)); return x; }
a1ba6e4125dc37fb47879c8ff24d63db4af29b9e.hip
// !!! This is a file automatically generated by hipify!!! /* // This source file contains the Cuda Code for subtraction of a source Image. // It is a part of Cuda Image Processing Library ). // Copyright (C) 2011 Remaldeep Singh // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "hip/hip_runtime.h" #include "../API/gpu_filter_api.h" #include "assert.h" #include "stdio.h" // gpu_in is 8bit input image. StaticBg is the background to be subtracted. outTemp is for output. // It is the duty of the programmer to contantly change staticBg for dynamic filtering. __global__ void subtract( unsigned char *gpu_buffer_1, unsigned char *staticBg, int imageW, int imageH) { //int ix = threadIdx.x + __mul24( blockIdx.x, blockDim.x); //int iy = threadIdx.y + __mul24( blockIdx.y, blockDim.y); //int offset = ix + iy * __mul24( blockDim.x, gridDim.x); int ix = threadIdx.x + (blockIdx.x * blockDim.x); int iy = threadIdx.y + (blockIdx.y * blockDim.y); int offset = ix + iy * blockDim.x * gridDim.x; if(ix >= imageW || iy >= imageH) return; gpu_buffer_1[offset] = ( (gpu_buffer_1[offset] - staticBg[offset]) < 0 ? 0 : (gpu_buffer_1[offset] - staticBg[offset]) ); } gpu_error_t gpu_sub( gpu_context_t *ctx, unsigned char *staticBg) { assert(staticBg); assert(ctx); float elapsedtime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); gpu_error_t error = GPU_OK; char *errstr; unsigned char *temp; int siz = ctx->height * ctx->width; hipMalloc( (void **)&temp, siz); hipMemcpy( temp, staticBg, siz, hipMemcpyHostToDevice); error = CHECK_CUDA_ERROR(); int threadsX = ctx->threadsX; int threadsY = ctx->threadsY; int temp1 = ((ctx->width % threadsX) != 0 ? (ctx->width / threadsX) + 1 : ctx->width / threadsX ); int temp2 = ((ctx->height % threadsY) != 0 ? (ctx->height / threadsY) + 1 : ctx->height / threadsY ); ////////////////////////////////////////////////////////////////////////////// dim3 threads( threadsX, threadsY); dim3 blocks( temp1, temp2); hipLaunchKernelGGL(( subtract), dim3(blocks), dim3(threads), 0, 0, ctx->gpu_buffer_1, temp, ctx->width, ctx->height); ///////////////////////////////////////////////////////////////////////////// if( hipSuccess != hipMemcpy( ctx->output_buffer_1, ctx->gpu_buffer_1, siz, hipMemcpyDeviceToHost)) //errstr = gpu_error(); error = GPU_ERR_MEM; hipFree(temp); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedtime,start,stop); hipEventDestroy(start); hipEventDestroy(stop); //FILE *file; //file = fopen("../timing.txt","a+"); fprintf(stderr,"BgSubtract:%lf \n",elapsedtime); //fclose(file); return error; }
a1ba6e4125dc37fb47879c8ff24d63db4af29b9e.cu
/* // This source file contains the Cuda Code for subtraction of a source Image. // It is a part of Cuda Image Processing Library ). // Copyright (C) 2011 Remaldeep Singh // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "cuda.h" #include "../API/gpu_filter_api.h" #include "assert.h" #include "stdio.h" // gpu_in is 8bit input image. StaticBg is the background to be subtracted. outTemp is for output. // It is the duty of the programmer to contantly change staticBg for dynamic filtering. __global__ void subtract( unsigned char *gpu_buffer_1, unsigned char *staticBg, int imageW, int imageH) { //int ix = threadIdx.x + __mul24( blockIdx.x, blockDim.x); //int iy = threadIdx.y + __mul24( blockIdx.y, blockDim.y); //int offset = ix + iy * __mul24( blockDim.x, gridDim.x); int ix = threadIdx.x + (blockIdx.x * blockDim.x); int iy = threadIdx.y + (blockIdx.y * blockDim.y); int offset = ix + iy * blockDim.x * gridDim.x; if(ix >= imageW || iy >= imageH) return; gpu_buffer_1[offset] = ( (gpu_buffer_1[offset] - staticBg[offset]) < 0 ? 0 : (gpu_buffer_1[offset] - staticBg[offset]) ); } gpu_error_t gpu_sub( gpu_context_t *ctx, unsigned char *staticBg) { assert(staticBg); assert(ctx); float elapsedtime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); gpu_error_t error = GPU_OK; char *errstr; unsigned char *temp; int siz = ctx->height * ctx->width; cudaMalloc( (void **)&temp, siz); cudaMemcpy( temp, staticBg, siz, cudaMemcpyHostToDevice); error = CHECK_CUDA_ERROR(); int threadsX = ctx->threadsX; int threadsY = ctx->threadsY; int temp1 = ((ctx->width % threadsX) != 0 ? (ctx->width / threadsX) + 1 : ctx->width / threadsX ); int temp2 = ((ctx->height % threadsY) != 0 ? (ctx->height / threadsY) + 1 : ctx->height / threadsY ); ////////////////////////////////////////////////////////////////////////////// dim3 threads( threadsX, threadsY); dim3 blocks( temp1, temp2); subtract<<< blocks, threads>>>(ctx->gpu_buffer_1, temp, ctx->width, ctx->height); ///////////////////////////////////////////////////////////////////////////// if( cudaSuccess != cudaMemcpy( ctx->output_buffer_1, ctx->gpu_buffer_1, siz, cudaMemcpyDeviceToHost)) //errstr = gpu_error(); error = GPU_ERR_MEM; cudaFree(temp); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedtime,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); //FILE *file; //file = fopen("../timing.txt","a+"); fprintf(stderr,"BgSubtract:%lf \n",elapsedtime); //fclose(file); return error; }
bd629ce580cc1d26c703b2a3209d6530c7088c12.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "maxout_layer_hessian_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../maxout_layer.h" __global__ void maxout_hess_kernel( float * __restrict output, int * __restrict max_feature_map_positions, const float * __restrict input, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; float max_val = input[input_offset]; int max_pos = 0; for(int i = 1; i < feature_map_subsampling_size; ++i) { input_offset += output_feature_map_count * neuron_count_per_feature_map; float new_val = input[input_offset]; if (new_val > max_val) { max_val = new_val; max_pos = i; } } int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; output[output_offset] = max_val; max_feature_map_positions[output_offset] = max_pos; } } __global__ void maxout_square_deriviative_hess_kernel( float * __restrict input_errors, const int * __restrict max_feature_map_positions, const float * __restrict output_errors, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; int max_feature_map = max_feature_map_positions[output_offset]; float output_error = output_errors[output_offset]; int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; for(int i = 0; i < feature_map_subsampling_size; ++i) { input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F); input_offset += output_feature_map_count * neuron_count_per_feature_map; } } } namespace nnforge { namespace cuda { maxout_layer_hessian_cuda::maxout_layer_hessian_cuda() { } maxout_layer_hessian_cuda::~maxout_layer_hessian_cuda() { } void maxout_layer_hessian_cuda::enqueue_test( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { const float * input = *input_neurons_buffer; float * output = *output_neurons_buffer; int * max_feature_map_positions = *additional_buffers[0]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); hipLaunchKernelGGL(( maxout_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, output, max_feature_map_positions, input, output_elem_count_per_feature_map, input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } void maxout_layer_hessian_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { const float * output_errors = *output_errors_buffer; int * max_feature_map_positions = *additional_buffers[0]; float * input_errors = *input_errors_buffer; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); hipLaunchKernelGGL(( maxout_square_deriviative_hess_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, input_errors, max_feature_map_positions, output_errors, output_elem_count_per_feature_map, input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } void maxout_layer_hessian_cuda::hessian_configured() { std::tr1::shared_ptr<const maxout_layer> layer_derived = std::tr1::dynamic_pointer_cast<const maxout_layer>(layer_schema); feature_map_subsampling_size = layer_derived->feature_map_subsampling_size; } bool maxout_layer_hessian_cuda::is_in_place_backprop() const { return false; } std::vector<size_t> maxout_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(int)); return res; } } }
bd629ce580cc1d26c703b2a3209d6530c7088c12.cu
/* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "maxout_layer_hessian_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "neural_network_cuda_exception.h" #include "../maxout_layer.h" __global__ void maxout_hess_kernel( float * __restrict output, int * __restrict max_feature_map_positions, const float * __restrict input, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; float max_val = input[input_offset]; int max_pos = 0; for(int i = 1; i < feature_map_subsampling_size; ++i) { input_offset += output_feature_map_count * neuron_count_per_feature_map; float new_val = input[input_offset]; if (new_val > max_val) { max_val = new_val; max_pos = i; } } int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; output[output_offset] = max_val; max_feature_map_positions[output_offset] = max_pos; } } __global__ void maxout_square_deriviative_hess_kernel( float * __restrict input_errors, const int * __restrict max_feature_map_positions, const float * __restrict output_errors, int neuron_count_per_feature_map, int input_feature_map_count, int output_feature_map_count, int feature_map_subsampling_size, int entry_count) { int neuron_id = blockIdx.x * blockDim.x + threadIdx.x; int output_feature_map_id = blockIdx.y * blockDim.y + threadIdx.y; int entry_id = blockIdx.z * blockDim.z + threadIdx.z; if ((neuron_id < neuron_count_per_feature_map) && (output_feature_map_id < output_feature_map_count) && (entry_id < entry_count)) { int output_offset = (entry_id * output_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; int max_feature_map = max_feature_map_positions[output_offset]; float output_error = output_errors[output_offset]; int input_offset = (entry_id * input_feature_map_count + output_feature_map_id) * neuron_count_per_feature_map + neuron_id; for(int i = 0; i < feature_map_subsampling_size; ++i) { input_errors[input_offset] = ((i == max_feature_map) ? output_error : 0.0F); input_offset += output_feature_map_count * neuron_count_per_feature_map; } } } namespace nnforge { namespace cuda { maxout_layer_hessian_cuda::maxout_layer_hessian_cuda() { } maxout_layer_hessian_cuda::~maxout_layer_hessian_cuda() { } void maxout_layer_hessian_cuda::enqueue_test( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { const float * input = *input_neurons_buffer; float * output = *output_neurons_buffer; int * max_feature_map_positions = *additional_buffers[0]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); maxout_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( output, max_feature_map_positions, input, output_elem_count_per_feature_map, input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } void maxout_layer_hessian_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { const float * output_errors = *output_errors_buffer; int * max_feature_map_positions = *additional_buffers[0]; float * input_errors = *input_errors_buffer; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, output_elem_count_per_feature_map, output_configuration_specific.feature_map_count, entry_count); maxout_square_deriviative_hess_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( input_errors, max_feature_map_positions, output_errors, output_elem_count_per_feature_map, input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, feature_map_subsampling_size, entry_count); } void maxout_layer_hessian_cuda::hessian_configured() { std::tr1::shared_ptr<const maxout_layer> layer_derived = std::tr1::dynamic_pointer_cast<const maxout_layer>(layer_schema); feature_map_subsampling_size = layer_derived->feature_map_subsampling_size; } bool maxout_layer_hessian_cuda::is_in_place_backprop() const { return false; } std::vector<size_t> maxout_layer_hessian_cuda::get_sizes_of_additional_buffers_per_entry() const { std::vector<size_t> res; res.push_back(output_elem_count_per_entry * sizeof(int)); return res; } } }
fcbbb6bb1b9687e607f7b25493cd457764569fce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2021, salesforce.com, inc. // All rights reserved. // SPDX-License-Identifier: BSD-3-Clause // For full license text, see the LICENSE file in the repo root // or https://opensource.org/licenses/BSD-3-Clause __constant__ int kIndexToActionArr[10]; extern "C" { __device__ void CudaTagGridWorldGenerateObservation( int *states_x_arr, int *states_y_arr, float *obs_arr, int world_boundary, int *env_timestep_arr, int episode_length, int agent_id, int env_id, bool use_full_observation ) { const bool is_tagger = (agent_id < num_agents - 1); // obs shape is (num_envs, num_agents, 4 * num_agents + 1) // state shape is (num_envs, num_agents,) if (use_full_observation) { const int agent_times_feature_dim = num_agents * 4 + 1; const int obs_start_index = env_id * num_agents * agent_times_feature_dim; const int state_index = env_id * num_agents + agent_id; for (int ag_id = 0; ag_id < num_agents; ag_id++) { const int state_x_obs_index = obs_start_index + ag_id * agent_times_feature_dim + agent_id; const int state_y_obs_index = state_x_obs_index + num_agents; const int type_obs_index = state_y_obs_index + num_agents; const int is_current_agent_obs_index = type_obs_index + num_agents; obs_arr[state_x_obs_index] = states_x_arr[state_index] / static_cast<float>(world_boundary); obs_arr[state_y_obs_index] = states_y_arr[state_index] / static_cast<float>(world_boundary); obs_arr[type_obs_index] = 1.0 * static_cast<int>( agent_id == num_agents - 1); obs_arr[is_current_agent_obs_index] = 1.0 * static_cast<int>( ag_id == agent_id); if (agent_id == num_agents - 1) { int time_to_end_index = is_current_agent_obs_index + 1; obs_arr[time_to_end_index] = env_timestep_arr[env_id] / static_cast<float>(episode_length); } } } else { // obs shape is (num_envs, num_agents, 6) // state shape is (num_envs, num_agents,) __shared__ int distance[num_agents]; const int state_index = env_id * num_agents + agent_id; const int obs_start_index = state_index * 6; const int adversary_state_index = env_id * num_agents + num_agents - 1; // tagger and runners observe their own locations obs_arr[obs_start_index] = states_x_arr[state_index] / static_cast<float>( world_boundary); obs_arr[obs_start_index + 1] = states_y_arr[state_index] / static_cast<float>( world_boundary); if (is_tagger) { // Taggers can observe the runner location obs_arr[obs_start_index + 2] = states_x_arr[adversary_state_index] / static_cast<float>( world_boundary); obs_arr[obs_start_index + 3] = states_y_arr[adversary_state_index] / static_cast<float>( world_boundary); distance[agent_id] = pow(states_x_arr[state_index] - states_x_arr[adversary_state_index], 2) + pow(states_y_arr[state_index] - states_y_arr[adversary_state_index], 2); } __syncthreads(); // A runner can observe the tagger location closest to it. if (!is_tagger) { int closest_agent_id = 0; int min_distance = 2 * world_boundary * world_boundary; for (int ag_id = 0; ag_id < num_agents - 1; ag_id++) { if (distance[ag_id] < min_distance) { min_distance = distance[ag_id]; closest_agent_id = ag_id; } } obs_arr[obs_start_index + 2] = states_x_arr[env_id * num_agents + closest_agent_id] / static_cast<float>(world_boundary); obs_arr[obs_start_index + 3] = states_y_arr[env_id * num_agents + closest_agent_id] / static_cast<float>(world_boundary); } obs_arr[obs_start_index + 4] = 1.0 * static_cast<int>( agent_id == num_agents - 1); obs_arr[obs_start_index + 5] = env_timestep_arr[env_id] / static_cast<float>(episode_length); } } __global__ void CudaTagGridWorldStep( int *states_x_arr, int *states_y_arr, int *actions_arr, int *done_arr, float *rewards_arr, float *obs_arr, float wall_hit_penalty, float tag_reward_for_tagger, float tag_penalty_for_runner, float step_cost_for_tagger, bool use_full_observation, int world_boundary, int *env_timestep_arr, int episode_length ) { // This implements Tagger on a discrete grid. // There are N taggers and 1 runner. // The taggers try to tag the runner. __shared__ int num_total_tagged; const int kEnvId = blockIdx.x; const int kThisAgentId = threadIdx.x; const bool is_tagger = (kThisAgentId < num_agents - 1); // Increment time ONCE -- only 1 thread can do this. // Initialize the shared variable that counts how many runners are tagged. if (kThisAgentId == 0) { env_timestep_arr[kEnvId] += 1; num_total_tagged = 0; } __syncthreads(); assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <= episode_length); int state_index = kEnvId * num_agents + kThisAgentId; int adversary_state_index = kEnvId * num_agents + num_agents - 1; int action_index = kEnvId * num_agents + kThisAgentId; int reward_index = kEnvId * num_agents + kThisAgentId; rewards_arr[reward_index] = 0.0; float __rew = 0.0; // ----------------------------------- // Movement // ----------------------------------- // Take action and check boundary cost. // Map action index to the real action space. int ac_index; ac_index = actions_arr[action_index] * 2; states_x_arr[state_index] = states_x_arr[state_index] + kIndexToActionArr[ac_index]; states_y_arr[state_index] = states_y_arr[state_index] + kIndexToActionArr[ac_index + 1]; if (states_x_arr[state_index] < 0) { states_x_arr[state_index] = 0; __rew -= wall_hit_penalty; } else if (states_x_arr[state_index] > world_boundary) { states_x_arr[state_index] = world_boundary; __rew -= wall_hit_penalty; } if (states_y_arr[state_index] < 0) { states_y_arr[state_index] = 0; __rew -= wall_hit_penalty; } else if (states_y_arr[state_index] > world_boundary) { states_y_arr[state_index] = world_boundary; __rew -= wall_hit_penalty; } // make sure all agents have finished their movements __syncthreads(); // ----------------------------------- // Check tags // ----------------------------------- // If this agent is a tagger, check number of tags if (is_tagger) { if (states_x_arr[state_index] == states_x_arr[adversary_state_index] && states_y_arr[state_index] == states_y_arr[adversary_state_index]) { atomicAdd(&num_total_tagged, 1); } } // make sure all agents have finished tag count __syncthreads(); // ----------------------------------- // Rewards // ----------------------------------- // If this agent is a tagger. if (is_tagger) { if (num_total_tagged > 0) { __rew += tag_reward_for_tagger; } else { __rew -= step_cost_for_tagger; } } else { // If it's the runner. if (num_total_tagged > 0) { __rew -= tag_penalty_for_runner; } else { __rew += step_cost_for_tagger; } } rewards_arr[reward_index] = __rew; // ----------------------------------- // Generate observation. // ----------------------------------- // (x, y, tagger or runner, current_agent_or_not) CudaTagGridWorldGenerateObservation(states_x_arr, states_y_arr, obs_arr, world_boundary, env_timestep_arr, episode_length, kThisAgentId, kEnvId, use_full_observation); // ----------------------------------- // End condition // ----------------------------------- // Determine if we're done (the runner is tagged or not). if (env_timestep_arr[kEnvId] == episode_length || num_total_tagged > 0) { if (kThisAgentId == 0) { done_arr[kEnvId] = 1; } } } }
fcbbb6bb1b9687e607f7b25493cd457764569fce.cu
// Copyright (c) 2021, salesforce.com, inc. // All rights reserved. // SPDX-License-Identifier: BSD-3-Clause // For full license text, see the LICENSE file in the repo root // or https://opensource.org/licenses/BSD-3-Clause __constant__ int kIndexToActionArr[10]; extern "C" { __device__ void CudaTagGridWorldGenerateObservation( int *states_x_arr, int *states_y_arr, float *obs_arr, int world_boundary, int *env_timestep_arr, int episode_length, int agent_id, int env_id, bool use_full_observation ) { const bool is_tagger = (agent_id < num_agents - 1); // obs shape is (num_envs, num_agents, 4 * num_agents + 1) // state shape is (num_envs, num_agents,) if (use_full_observation) { const int agent_times_feature_dim = num_agents * 4 + 1; const int obs_start_index = env_id * num_agents * agent_times_feature_dim; const int state_index = env_id * num_agents + agent_id; for (int ag_id = 0; ag_id < num_agents; ag_id++) { const int state_x_obs_index = obs_start_index + ag_id * agent_times_feature_dim + agent_id; const int state_y_obs_index = state_x_obs_index + num_agents; const int type_obs_index = state_y_obs_index + num_agents; const int is_current_agent_obs_index = type_obs_index + num_agents; obs_arr[state_x_obs_index] = states_x_arr[state_index] / static_cast<float>(world_boundary); obs_arr[state_y_obs_index] = states_y_arr[state_index] / static_cast<float>(world_boundary); obs_arr[type_obs_index] = 1.0 * static_cast<int>( agent_id == num_agents - 1); obs_arr[is_current_agent_obs_index] = 1.0 * static_cast<int>( ag_id == agent_id); if (agent_id == num_agents - 1) { int time_to_end_index = is_current_agent_obs_index + 1; obs_arr[time_to_end_index] = env_timestep_arr[env_id] / static_cast<float>(episode_length); } } } else { // obs shape is (num_envs, num_agents, 6) // state shape is (num_envs, num_agents,) __shared__ int distance[num_agents]; const int state_index = env_id * num_agents + agent_id; const int obs_start_index = state_index * 6; const int adversary_state_index = env_id * num_agents + num_agents - 1; // tagger and runners observe their own locations obs_arr[obs_start_index] = states_x_arr[state_index] / static_cast<float>( world_boundary); obs_arr[obs_start_index + 1] = states_y_arr[state_index] / static_cast<float>( world_boundary); if (is_tagger) { // Taggers can observe the runner location obs_arr[obs_start_index + 2] = states_x_arr[adversary_state_index] / static_cast<float>( world_boundary); obs_arr[obs_start_index + 3] = states_y_arr[adversary_state_index] / static_cast<float>( world_boundary); distance[agent_id] = pow(states_x_arr[state_index] - states_x_arr[adversary_state_index], 2) + pow(states_y_arr[state_index] - states_y_arr[adversary_state_index], 2); } __syncthreads(); // A runner can observe the tagger location closest to it. if (!is_tagger) { int closest_agent_id = 0; int min_distance = 2 * world_boundary * world_boundary; for (int ag_id = 0; ag_id < num_agents - 1; ag_id++) { if (distance[ag_id] < min_distance) { min_distance = distance[ag_id]; closest_agent_id = ag_id; } } obs_arr[obs_start_index + 2] = states_x_arr[env_id * num_agents + closest_agent_id] / static_cast<float>(world_boundary); obs_arr[obs_start_index + 3] = states_y_arr[env_id * num_agents + closest_agent_id] / static_cast<float>(world_boundary); } obs_arr[obs_start_index + 4] = 1.0 * static_cast<int>( agent_id == num_agents - 1); obs_arr[obs_start_index + 5] = env_timestep_arr[env_id] / static_cast<float>(episode_length); } } __global__ void CudaTagGridWorldStep( int *states_x_arr, int *states_y_arr, int *actions_arr, int *done_arr, float *rewards_arr, float *obs_arr, float wall_hit_penalty, float tag_reward_for_tagger, float tag_penalty_for_runner, float step_cost_for_tagger, bool use_full_observation, int world_boundary, int *env_timestep_arr, int episode_length ) { // This implements Tagger on a discrete grid. // There are N taggers and 1 runner. // The taggers try to tag the runner. __shared__ int num_total_tagged; const int kEnvId = blockIdx.x; const int kThisAgentId = threadIdx.x; const bool is_tagger = (kThisAgentId < num_agents - 1); // Increment time ONCE -- only 1 thread can do this. // Initialize the shared variable that counts how many runners are tagged. if (kThisAgentId == 0) { env_timestep_arr[kEnvId] += 1; num_total_tagged = 0; } __syncthreads(); assert(env_timestep_arr[kEnvId] > 0 && env_timestep_arr[kEnvId] <= episode_length); int state_index = kEnvId * num_agents + kThisAgentId; int adversary_state_index = kEnvId * num_agents + num_agents - 1; int action_index = kEnvId * num_agents + kThisAgentId; int reward_index = kEnvId * num_agents + kThisAgentId; rewards_arr[reward_index] = 0.0; float __rew = 0.0; // ----------------------------------- // Movement // ----------------------------------- // Take action and check boundary cost. // Map action index to the real action space. int ac_index; ac_index = actions_arr[action_index] * 2; states_x_arr[state_index] = states_x_arr[state_index] + kIndexToActionArr[ac_index]; states_y_arr[state_index] = states_y_arr[state_index] + kIndexToActionArr[ac_index + 1]; if (states_x_arr[state_index] < 0) { states_x_arr[state_index] = 0; __rew -= wall_hit_penalty; } else if (states_x_arr[state_index] > world_boundary) { states_x_arr[state_index] = world_boundary; __rew -= wall_hit_penalty; } if (states_y_arr[state_index] < 0) { states_y_arr[state_index] = 0; __rew -= wall_hit_penalty; } else if (states_y_arr[state_index] > world_boundary) { states_y_arr[state_index] = world_boundary; __rew -= wall_hit_penalty; } // make sure all agents have finished their movements __syncthreads(); // ----------------------------------- // Check tags // ----------------------------------- // If this agent is a tagger, check number of tags if (is_tagger) { if (states_x_arr[state_index] == states_x_arr[adversary_state_index] && states_y_arr[state_index] == states_y_arr[adversary_state_index]) { atomicAdd(&num_total_tagged, 1); } } // make sure all agents have finished tag count __syncthreads(); // ----------------------------------- // Rewards // ----------------------------------- // If this agent is a tagger. if (is_tagger) { if (num_total_tagged > 0) { __rew += tag_reward_for_tagger; } else { __rew -= step_cost_for_tagger; } } else { // If it's the runner. if (num_total_tagged > 0) { __rew -= tag_penalty_for_runner; } else { __rew += step_cost_for_tagger; } } rewards_arr[reward_index] = __rew; // ----------------------------------- // Generate observation. // ----------------------------------- // (x, y, tagger or runner, current_agent_or_not) CudaTagGridWorldGenerateObservation(states_x_arr, states_y_arr, obs_arr, world_boundary, env_timestep_arr, episode_length, kThisAgentId, kEnvId, use_full_observation); // ----------------------------------- // End condition // ----------------------------------- // Determine if we're done (the runner is tagged or not). if (env_timestep_arr[kEnvId] == episode_length || num_total_tagged > 0) { if (kThisAgentId == 0) { done_arr[kEnvId] = 1; } } } }
e331cd4f162a6fb5d08ceaa9f9f1ec4f73cc9f78.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <NDArray.h> #include <NDArrayFactory.h> #include <Context.h> #include <Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <execution/LaunchContext.h> #include <specials_cuda.h> #include <TAD.h> #include <ops/declarable/CustomOperations.h> #include <hip/hip_runtime.h> using namespace nd4j; using namespace nd4j::graph; class NDArrayCudaBasicsTests : public testing::Test { public: }; ////////////////////////////////////////////////////////////////////////// static hipError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) { if(devicePtrs.size() != hostData.size()) throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); hipError_t cudaResult; void* reductionPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult; int* allocationPointer; cudaResult = hipMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult; lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); hipStream_t stream = *lc.getCudaStream(); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = hipMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult; hipMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice, stream); } return cudaResult; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_1) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_2) { auto x = NDArrayFactory::create<int>('c', {5}); auto y = NDArrayFactory::create<int>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_3) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); NDArray::registerSpecialUse({&x}, {&y}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); ASSERT_TRUE(y.isActualOnDeviceSide()); ASSERT_TRUE(y.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_01) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_TRUE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_02) { auto x = NDArrayFactory::create_<int>('c', {5}); auto y = NDArrayFactory::create_<int>('c', {5}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_03) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_TRUE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Neg, y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); // y->printBuffer("Negatives"); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Cosine_1) { auto x = NDArrayFactory::create_<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<double>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_TRUE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Cosine, y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); y->printBuffer("Cosine"); delete x; delete y; } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t)); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); auto stream = reinterpret_cast<hipStream_t *>(&nativeStream); //hipMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), hipMemcpyHostToDevice, *stream); //hipMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), hipMemcpyHostToDevice, *stream); LaunchContext lc(stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_2) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray y('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); NDArray exp('c', { 5 }, { 2, 4, 6, 8, 10 }); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t)); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); auto stream = reinterpret_cast<hipStream_t *>(&nativeStream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_3) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(hipStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(hipStream_t)); hipError_t dZ = hipStreamCreate(reinterpret_cast<hipStream_t *>(&nativeStream)); auto stream = reinterpret_cast<hipStream_t *>(&nativeStream); //hipMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), hipMemcpyHostToDevice, *stream); //hipMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), hipMemcpyHostToDevice, *stream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); //double* localBuffer = ; hipMemcpy(z.buffer(), z.specialBuffer(), z.lengthOf() * z.sizeOfT(), hipMemcpyDeviceToHost); res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); x.printBuffer("3X = "); y.printBuffer("3Y = "); z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_4) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_5) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_6) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); x.printBuffer("6X = "); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_7) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += 2.; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); x.printBuffer("7X = "); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("3X = "); // y.printBuffer("3Y = "); // z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_2) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); x.printBuffer("3X = "); y.printBuffer("3Y = "); z.printBuffer("3Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_3) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); // z.printBuffer("23Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_4) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; //x.tickWriteDevice(); // x.printBuffer("33Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestPrimitiveNeg_01) { // allocating host-side arrays auto x = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto exp = NDArrayFactory::create<int>('c', { 5 }, { -1, -2, -3, -4, -5 }); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); NativeOpExecutioner::execTransformSame(x.getContext(), transform::Neg, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, nullptr, nullptr); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); y.tickWriteDevice(); // x.printBuffer("X = "); // y.printBuffer("Y = "); for (int e = 0; e < y.lengthOf(); e++) { ASSERT_NEAR(exp.e<int>(e), y.e<int>(e), 1e-5); } } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveNeg_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Neg, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // y.printBuffer("Negatives2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveSqrt_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Sqrt, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(y.equalsTo(exp)); //y.printBuffer("SQRT output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveAssign_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); //auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); //ASSERT_TRUE(x.isActualOnDeviceSide()); //ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Assign, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // printf("Assigned to another array\n"); // y.printBuffer("OUput"); ASSERT_TRUE(y.equalsTo(x)); //y.syncToHost(); //y.printBuffer("IsMax output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //y.printBuffer("Cosine2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); //y.printBuffer("PrimitiveCosine2"); //exp.printBuffer("Primitive Cosine exp"); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //for (int e = 0; e < y.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_3) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = hipStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); // y.printBuffer("PrimitiveCosine3"); // exp.printBuffer("Primitive Cosine3 exp"); // y.printShapeInfo("Y shape"); // exp.printShapeInfo("Exp Shape"); ASSERT_TRUE(exp.isSameShape(y)); // // for (int e = 0; e < y.lengthOf(); e++) { // printf("%lf == %lf\n", exp.e<double>(e), y.e<double>(e)); //// ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); // } ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_2) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x = NDArrayFactory::create<double>('c', {2,3,4}); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t stream; cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); z.printBuffer("Result with Broadcast2 (multiply)"); exp.printBuffer("Expect with Broadcast2 (multiply)"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_3) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, nd4j::DataType::DOUBLE); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; //hipStream_t stream; //cudaResult = hipStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext* pLc = x.getContext();//(&stream); hipStream_t* stream = pLc->getCudaStream(); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = hipMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); ASSERT_EQ(0, cudaResult); hipMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice, *stream); } NDArray::registerSpecialUse({&z}, {&x, &y}); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); //cudaResult = hipStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); //z.syncToHost(); z.printBuffer("Result with Broadcast3 (multiply)"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); ASSERT_TRUE(exp.equalsTo(z)); // delete cuda stream //cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_1) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; //x.syncToHost(); x.printBuffer("54Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(x)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_01) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), &y, &z);// *= y; // z.printBuffer("53Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_02) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2,3}, {3, 3, 3, 3, 3, 3}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), &y, &z);// *= y; // z.printBuffer("52Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_002) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2, 3}, {2., 3., 3., 3., 3., 3.}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyPairwiseTransform(pairwise::Multiply, &y, &z);// *= y; // z.printBuffer("51Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcastRaw_1) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray y('c', {3}, {10, 20, 30}, nd4j::DataType::INT64); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, nd4j::DataType::INT32); //real output [10, 11, 12, 13, 4, 5, 6, 7, 28, 29, 30, 31, 22, 23, 24, 25, 16, 17, 18, 19, 40, 41, 42, 43] x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(Nd4jLong)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext hipError_t cudaResult; hipStream_t* stream = x.getContext()->getCudaStream(); LaunchContext* pLc = x.getContext(); // allocate required amount of global device memory and copy host data to it //cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(size_t i = 0; i < devicePtrs.size(); ++i) { nd4j_printf("Allocation of %i bytes with device\n", hostData[i].second) cudaResult = hipMalloc(&devicePtrs[i], hostData[i].second); //if(cudaResult != 0) return cudaResult; ASSERT_EQ(cudaResult, 0); hipMemcpy(devicePtrs[i], hostData[i].first, hostData[i].second, hipMemcpyHostToDevice); } // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Add, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = hipStreamSynchronize(*stream); ASSERT_EQ(0, cudaResult); // x.printIndexedBuffer(" X"); // y.printIndexedBuffer("+Y"); // z.printBuffer("ADD broadcasted output"); // verify results // for (int e = 0; e < z.lengthOf(); e++) // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) hipFree(devicePtrs[i]); // delete cuda stream //cudaResult = hipStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; x.printBuffer("55Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_2) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 11,12, 13,14, 15, 16 }); auto expZ = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //hipError_t res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = hipMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); //void NDArray::applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray* other, NDArray* target, const bool checkTargetShape, ExtraArguments *extraArgs) x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), &y, &exp); exp.printBuffer("56Result out"); // // hipFree(devBufferPtrX); //hipFree(devBufferPtrZ); //hipFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(expZ)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestReduceSum_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(15); auto exp = NDArrayFactory::create<double>(15); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); NativeOpExecutioner::execReduceSameScalar(x.getContext(), reduce::Sum, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo()); auto res = hipStreamSynchronize(*stream); ASSERT_EQ(0, res); y.syncToHost(); x.printBuffer("X = "); y.printBuffer("Y = "); ASSERT_NEAR(y.e<double>(0), 15, 1e-5); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestDup1) { NDArray array('c', {2,3}, {1,2,3,4,5,6}); array.printBuffer("Array at start"); auto arrC = array.dup('c'); auto arrF = array.dup('f'); // arrC->printBuffer("arrC"); // arrF->printBuffer("arrF"); //arrC->printShapeInfo("C shape"); //arrF->printShapeInfo("F shape"); ASSERT_TRUE(array.equalsTo(arrF)); ASSERT_TRUE(array.equalsTo(arrC)); ASSERT_TRUE(arrF->equalsTo(arrC)); delete arrC; delete arrF; } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_1) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_TRUE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_TRUE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_2) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,10,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,5,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_3) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,10.f}, nd4j::DataType::FLOAT32); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_1) { NDArray x('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::INT32); NDArray k('c', {2,3}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray exp1('c', {3}, {4., 20., 36.}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,3}, {-10., -2., 6.,14., 22., 30.}, nd4j::DataType::FLOAT32); NDArray exp3('c', {4}, {38., 41., 44., 47.}, nd4j::DataType::FLOAT32); NDArray exp4('c', {4}, {114., 117., 120., 123.}, nd4j::DataType::FLOAT32); NDArray* z = x.applyReduce3(nd4j::reduce3::Dot, &y, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x.applyReduce3(nd4j::reduce3::Dot, &k, {0,1}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, &x, {1}); ASSERT_TRUE(z->equalsTo(&exp2)); // printCudaGlobal<float><<<1,1,0, *y.getContext()->getCudaStream()>>>(z->specialBuffer(), 6); delete z; x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, &k2, {0,1}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_2) { NDArray x('c', {2,3,4}, {-10,-9,-8.5,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0.5,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2.5,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::DOUBLE); NDArray k('c', {2,3}, {-2,3,-4,5.5,-2,3}, nd4j::DataType::DOUBLE); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3.5}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3}, {5., 20., 36.}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,3}, {-8., -2., 6., 13., 22., 30.}, nd4j::DataType::DOUBLE); NDArray exp3('c', {4}, {39., 42.5, 47., 49.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {4}, {119., 122.5, 125., 129.5}, nd4j::DataType::DOUBLE); NDArray* z = x.applyReduce3(nd4j::reduce3::Dot, &y, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x.applyReduce3(nd4j::reduce3::Dot, &k, {0,1}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, &x, {1}); ASSERT_TRUE(z->equalsTo(&exp2)); // printCudaGlobal<float><<<1,1,0, *y.getContext()->getCudaStream()>>>(z->specialBuffer(), 6); delete z; x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, &k2, {0,1}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_3) { NDArray x1('c', {2,2,2}, {1,2,3,4,5,6,7,8}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {0}, {-204}, nd4j::DataType::FLOAT32); NDArray exp2('c', {0}, {31.5}, nd4j::DataType::DOUBLE); auto z = x1.applyReduce3(reduce3::Dot, &x2); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x3.applyReduce3(reduce3::Dot, &x4); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyReduce3(reduce3::Dot, &x2); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x3.applyReduce3(reduce3::Dot, &x4); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyAllReduce3_1) { NDArray x1('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3,2}, {-88., -124., 6., -2., 22., 14.}, nd4j::DataType::FLOAT32); NDArray exp2('c', {6,4}, {-36., -44., -52., -60.,-42., -52., -62., -72.,2., 0., -2., -4.,6., 4., 2., 0.,10., 8., 6., 4.,14., 12., 10., 8.}, nd4j::DataType::FLOAT32); NDArray exp3('c', {1,1}, {31.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,3}, {4.5, 10.5, 16.5,4.5, 10.5, 16.5,4.5, 10.5, 16.5}, nd4j::DataType::DOUBLE); auto z = x1.applyAllReduce3(reduce3::Dot, &x2, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x1.applyAllReduce3(reduce3::Dot, &x2, {0}); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; z = x3.applyAllReduce3(reduce3::Dot, &x4, {0,1}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; z = x3.applyAllReduce3(reduce3::Dot, &x4, {1}); // z->syncToHost(); // z->printShapeInfo(); // z->printIndexedBuffer(); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyAllReduce3(reduce3::Dot, &x2, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x3.applyAllReduce3(reduce3::Dot, &x4, {0}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test1) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray scalar('c', {0}, {100}, nd4j::DataType::INT64); NDArray vec1('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray vec2('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray exp1('c', {0}, {1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {0}, {2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp1)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec1, {1}); ASSERT_TRUE(vec1.equalsTo(&exp2)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec2, {0}); ASSERT_TRUE(vec2.equalsTo(&exp3)); x.permutei({1,0}); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp4)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec1, {0}); ASSERT_TRUE(vec1.equalsTo(&exp5)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec2, {1}); ASSERT_TRUE(vec2.equalsTo(&exp6)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test2) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray exp1('c', {0}, {1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {0}, {2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); auto z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; x.permutei({1,0}); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z->equalsTo(&exp5)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z->equalsTo(&exp6)); delete z; } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test1) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray z1('c', {0}, {100}, nd4j::DataType::DOUBLE); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::DOUBLE); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {0}, {2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3,4,1,0.666667}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {3.5,0.833333}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Mean, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Mean, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Mean, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Mean, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test2) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::DOUBLE); NDArray exp1('c', {0}, {2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3,4,1,0.666667}, nd4j::DataType::DOUBLE); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::DOUBLE); NDArray exp5('c', {2}, {3.5,0.833333}, nd4j::DataType::DOUBLE); NDArray z1 = x.reduceAlongDims(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, EqualityTest1) { auto arrayA = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayB = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayC = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayD = NDArrayFactory::create_<float>('f', {2, 4}); auto arrayE = NDArrayFactory::create_<float>('f', {1, 15}); for (int i = 0; i < arrayA->rows(); i++) { for (int k = 0; k < arrayA->columns(); k++) { arrayA->p(i, k, (float) i); } } arrayA->printBuffer("arrayA is "); for (int i = 0; i < arrayB->rows(); i++) { for (int k = 0; k < arrayB->columns(); k++) { arrayB->p(i, k, (float) i); } } arrayB->printBuffer("arrayB is "); for (int i = 0; i < arrayC->rows(); i++) { for (int k = 0; k < arrayC->columns(); k++) { arrayC->p(i, k, (float) i+1); } } arrayC->printBuffer("arrayC is "); ASSERT_TRUE(arrayA->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayC->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayD->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayE->equalsTo(arrayB, 1e-5)); delete arrayA; delete arrayB; delete arrayC; delete arrayD; delete arrayE; } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test1) { NDArray x('c', {2,3,2}, {1.5,2,3,4,5,6,7.5,8,-1,-2,-3.5,-4,}, nd4j::DataType::FLOAT32); NDArray z1('c', {0}, {100}, nd4j::DataType::FLOAT32); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {0}, {26.5}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,2}, {9.5,12,3,2}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {19,4,3.5}, nd4j::DataType::FLOAT32); NDArray exp4('c', {3,2}, {9,10,2,2,1.5,2}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {21.5,5}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Sum, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Sum, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Sum, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Sum, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test2) { NDArray x('c', {2,3,2}, {1.5,2,3,4,5,6,7.5,8,-1,-2,-3.5,-4,}, nd4j::DataType::INT64); NDArray exp1('c', {0}, {26}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {9,12,3,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {18,4,4}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {8,10,2,2,2,2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {21,5}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDims(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test1) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::DOUBLE); NDArray z1('c', {0}, {100}, nd4j::DataType::BOOL); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::BOOL); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::BOOL); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::BOOL); NDArray z5('c', {2}, {100,100}, nd4j::DataType::BOOL); NDArray exp1('c', {0}, {1}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {1,1,0,1}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {1,1,1}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {1,1,1,0,1,1}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::BOOL); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::IsPositive, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {0}, {1}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {1,1,0,1}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {1,1,1}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {0,1,1,0,1,1}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::BOOL); NDArray z1 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test1) { NDArray x('c', {2,3,2}, {0.5,2,3,-0,5,6,-7.5,0,-1,-0.5,-3.5,4}, nd4j::DataType::FLOAT32); NDArray z1('c', {0}, {100}, nd4j::DataType::INT64); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::INT64); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::INT64); NDArray z5('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray exp1('c', {0}, {2}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {0,1,0,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,1,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {0,1,0,1,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); x.reduceAlongDimension(nd4j::reduce::CountZero, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::CountZero, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-0,5,6,-7.5,0,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {0}, {4}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {1,1,0,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {2,2,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {1,1,0,2,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {2,2}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest1) { auto x = NDArrayFactory::create<float>('c', {5, 5}); auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, row, &z, nullptr); x += *row; ASSERT_TRUE(x.equalsTo(z)); //ASSERT_TRUE(z.equalsTo(&exp)); delete row; } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest2) { auto x = NDArrayFactory::create<float>('c', {5, 5}); //auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, row); ASSERT_TRUE(x.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcast_1) { NDArray exp('c', {2, 3, 2, 2}, {1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3., 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.}, nd4j::DataType::DOUBLE); auto input = NDArrayFactory::create<double>('c',{ 2, 3, 2, 2}); auto bias = NDArrayFactory::create<double>('c', {1, 3}); bias.linspace(1); input.applyBroadcast(broadcast::Add, {1}, &bias); ASSERT_TRUE(exp.equalsTo(&input)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_1) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_2) { auto x = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(y)); //for (int e = 0; e < x.lengthOf(); e++) // ASSERT_NEAR(x.e<float16>(e), y.e<float16>(e), 1.e-5f); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_3) { auto x = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_4) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_5) { auto x = NDArrayFactory::create<float>('c', {3,3}, {1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>('c', {3,3}, {2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_6) { auto x = NDArrayFactory::create<float>('f', {3,3}, {1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>('f', {3,3}, {2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_05) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {1, 8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2 = NDArrayFactory::create<float>(expected.ordering(), expected.getShapeAsVector()); x = 1.; y = 2.; expected = 3.; res2 = 0.f; x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; ASSERT_TRUE(expected.isSameShape(&res2)); ASSERT_TRUE(expected.equalsTo(&res2)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_5) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 1, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_51) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_1) { auto x = NDArrayFactory::create<float>('c', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('c', {2, 2, 2}); exp = 10.; // y.printShapeInfo("Output SHAPE"); // y.printBuffer("Output TILE"); // exp.printBuffer("Expect TILE"); ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_2) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; y.printShapeInfo("Output SHAPE"); y.printBuffer("Output TILE"); ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_3) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; x.p(1,0,1, 20); x.syncToDevice(); auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; exp.p(1,0,1, 20.); exp.p(1, 1, 1, 20.); exp.syncToDevice(); ASSERT_TRUE(exp.equalsTo(y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_2) { double expBuff[] = {2., 3, 3., 4., 4., 5, 5., 6., 6., 7, 7., 8.}; NDArray a('c', {4,4}, {1.,2,3,4,5,6,7,8,9,2,3,2,1,0,4,7.}, nd4j::DataType::FLOAT32); a.printBuffer(); auto x = NDArrayFactory::create<double>('c', {3, 2, 1}); auto y = NDArrayFactory::create<double>('c', {1, 2}); auto expected = NDArrayFactory::create<double>(expBuff, 'c', {3, 2, 2}); x.linspace(1); y.linspace(1); x.printBuffer("X="); y.printBuffer("Y="); auto result = x + y; result.printIndexedBuffer("Result"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, assign_2) { NDArray x('c', {4}, {1.5,2.5,3.5,4.5}, nd4j::DataType::FLOAT32); NDArray y('c', {4}, nd4j::DataType::INT32); NDArray expected('c', {4}, {1,2,3,4}, nd4j::DataType::INT32); y.assign(x); // y.printBuffer("ASSIGN VECTOR"); ASSERT_TRUE(expected.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, subarray_1) { NDArray x('c', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); NDArray y('f', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); Nd4jLong shapeExpX0[] = {1, 2, 12, 8192, -1, 99}; float buffExpX0[] = {1.000000, 13.000000}; Nd4jLong shapeExpX1[] = {1, 2, 12, 8192, -1, 99}; float buffExpX1[] = {2.000000, 14.000000}; Nd4jLong shapeExpX2[] = {3, 2, 1, 1, 12, 4, 1, 8192, -1, 99}; float buffExpX2[] = {1.000000, 13.000000}; Nd4jLong shapeExpX3[] = {2, 2, 4, 12, 1, 8192, -1, 99}; float buffExpX3[] = {9.000000, 10.000000, 11.000000, 12.000000, 21.000000, 22.000000, 23.000000, 24.000000}; Nd4jLong shapeExpX4[] = {3, 2, 1, 4, 12, 4, 1, 8192, -1, 99}; float buffExpX4[] = {9.000000, 10.000000, 11.000000, 12.000000, 21.000000, 22.000000, 23.000000, 24.000000}; Nd4jLong shapeExpX5[] = {2, 2, 3, 12, 4, 8192, -1, 99}; float buffExpX5[] = {4.000000, 8.000000, 12.000000, 16.000000, 20.000000, 24.000000}; Nd4jLong shapeExpY0[] = {1, 2, 1, 8192, -1, 99}; float buffExpY0[] = {1.000000, 2.000000}; Nd4jLong shapeExpY1[] = {1, 2, 1, 8192, -1, 99}; float buffExpY1[] = {7.000000, 8.000000}; Nd4jLong shapeExpY2[] = {3, 2, 1, 1, 1, 2, 6, 8192, -1, 102}; float buffExpY2[] = {1.000000, 2.000000}; Nd4jLong shapeExpY3[] = {2, 2, 4, 1, 6, 8192, -1, 99}; float buffExpY3[] = {5.000000, 11.000000, 17.000000, 23.000000, 6.000000, 12.000000, 18.000000, 24.000000}; Nd4jLong shapeExpY4[] = {3, 2, 1, 4, 1, 2, 6, 8192, -1, 102}; float buffExpY4[] = {5.000000, 11.000000, 17.000000, 23.000000, 6.000000, 12.000000, 18.000000, 24.000000}; Nd4jLong shapeExpY5[] = {2, 2, 3, 1, 2, 8192, -1, 99}; float buffExpY5[] = {19.000000, 21.000000, 23.000000, 20.000000, 22.000000, 24.000000}; NDArray x0 = x(0, {1,2}); NDArray xExp(buffExpX0, shapeExpX0); ASSERT_TRUE(xExp.isSameShape(x0)); ASSERT_TRUE(xExp.equalsTo(x0)); // for(int i = 0; i < shape::shapeInfoLength(x0.rankOf()); ++i) // ASSERT_TRUE(x0.getShapeInfo()[i] == shapeExpX0[i]); // for(int i = 0; i < x0.lengthOf(); ++i) // ASSERT_TRUE(x0.e<float>(i) == buffExpX0[i]); NDArray x1 = x(1, {1,2}); NDArray x1Exp(buffExpX1, shapeExpX1); ASSERT_TRUE(x1Exp.isSameShape(x1)); ASSERT_TRUE(x1Exp.equalsTo(x1)); // for(int i = 0; i < shape::shapeInfoLength(x1.rankOf()); ++i) // ASSERT_TRUE(x1.getShapeInfo()[i] == shapeExpX1[i]); // for(int i = 0; i < x1.lengthOf(); ++i) // ASSERT_TRUE(x1.e<float>(i) == buffExpX1[i]); NDArray x2 = x(0, {1,2}, true); NDArray x2Exp(buffExpX2, shapeExpX2); ASSERT_TRUE(x2Exp.isSameShape(x2)); x2.printBuffer("X2"); x2Exp.printBuffer("X2 EXPECT"); ASSERT_TRUE(x2Exp.equalsTo(x2)); // for(int i = 0; i < shape::shapeInfoLength(x2.rankOf()); ++i) // ASSERT_TRUE(x2.getShapeInfo()[i] == shapeExpX2[i]); // for(int i = 0; i < x2.lengthOf(); ++i) // ASSERT_TRUE(x2.e<float>(i) == buffExpX2[i]); NDArray x3 = x(2, {1}); NDArray x3Exp(buffExpX3, shapeExpX3); ASSERT_TRUE(x3Exp.isSameShape(x3)); ASSERT_TRUE(x3Exp.equalsTo(x3)); // for(int i = 0; i < shape::shapeInfoLength(x3.rankOf()); ++i) // ASSERT_TRUE(x3.getShapeInfo()[i] == shapeExpX3[i]); // for(int i = 0; i < x3.lengthOf(); ++i) // ASSERT_TRUE(x3.e<float>(i) == buffExpX3[i]); NDArray x4 = x(2, {1}, true); NDArray x4Exp(buffExpX4, shapeExpX4); ASSERT_TRUE(x4Exp.isSameShape(x4)); ASSERT_TRUE(x4Exp.equalsTo(x4)); // for(int i = 0; i < shape::shapeInfoLength(x4.rankOf()); ++i) // ASSERT_TRUE(x4.getShapeInfo()[i] == shapeExpX4[i]); // for(int i = 0; i < x4.lengthOf(); ++i) // ASSERT_TRUE(x4.e<float>(i) == buffExpX4[i]); NDArray x5 = x(3, {2}); NDArray x5Exp(buffExpX5, shapeExpX5); ASSERT_TRUE(x5Exp.isSameShape(x5)); ASSERT_TRUE(x5Exp.equalsTo(x5)); // for(int i = 0; i < shape::shapeInfoLength(x5.rankOf()); ++i) // ASSERT_TRUE(x5.getShapeInfo()[i] == shapeExpX5[i]); // for(int i = 0; i < x5.lengthOf(); ++i) // ASSERT_TRUE(x5.e<float>(i) == buffExpX5[i]); // ******************* // NDArray y0 = y(0, {1,2}); NDArray y0Exp(buffExpY0, shapeExpY0); ASSERT_TRUE(y0Exp.isSameShape(y0)); ASSERT_TRUE(y0Exp.equalsTo(y0)); // for(int i = 0; i < shape::shapeInfoLength(y0.rankOf()); ++i) // ASSERT_TRUE(y0.getShapeInfo()[i] == shapeExpY0[i]); // for(int i = 0; i < y0.lengthOf(); ++i) // ASSERT_TRUE(y0.e<float>(i) == buffExpY0[i]); NDArray y1 = y(1, {1,2}); for(int i = 0; i < shape::shapeInfoLength(y1.rankOf()); ++i) ASSERT_TRUE(y1.getShapeInfo()[i] == shapeExpY1[i]); for(int i = 0; i < y1.lengthOf(); ++i) ASSERT_TRUE(y1.e<float>(i) == buffExpY1[i]); NDArray y2 = y(0, {1,2}, true); for(int i = 0; i < shape::shapeInfoLength(y2.rankOf()); ++i) ASSERT_TRUE(y2.getShapeInfo()[i] == shapeExpY2[i]); for(int i = 0; i < y2.lengthOf(); ++i) ASSERT_TRUE(y2.e<float>(i) == buffExpY2[i]); NDArray y3 = y(2, {1}); for(int i = 0; i < shape::shapeInfoLength(y3.rankOf()); ++i) ASSERT_TRUE(y3.getShapeInfo()[i] == shapeExpY3[i]); for(int i = 0; i < y3.lengthOf(); ++i) ASSERT_TRUE(y3.e<float>(i) == buffExpY3[i]); NDArray y4 = y(2, {1}, true); for(int i = 0; i < shape::shapeInfoLength(y4.rankOf()); ++i) ASSERT_TRUE(y4.getShapeInfo()[i] == shapeExpY4[i]); for(int i = 0; i < y4.lengthOf(); ++i) ASSERT_TRUE(y4.e<float>(i) == buffExpY4[i]); NDArray y5 = y(3, {2}); for(int i = 0; i < shape::shapeInfoLength(y5.rankOf()); ++i) ASSERT_TRUE(y5.getShapeInfo()[i] == shapeExpY5[i]); for(int i = 0; i < y5.lengthOf(); ++i) ASSERT_TRUE(y5.e<float>(i) == buffExpY5[i]); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Test_diagonal_1) { auto x = NDArrayFactory::create<float>('c', {2, 3}, {1, 2, 3, 4, 5, 6}); auto exp = NDArrayFactory::create<float>('c', {2, 1}, {1, 5}); auto diag = x.diagonal('c'); //diag->syncToDevice(); for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { printf("VAL[%ld] = %f\n", e, diag->e<float>(e)); //, exp.e<float>(e), 1.e-5); } diag->printIndexedBuffer("DIAGONAL"); for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { ASSERT_NEAR(diag->e<float>(e), exp.e<float>(e), 1.e-5); } double eps(1.e-5); NDArray tmp(nd4j::DataType::FLOAT32, x.getContext()); // scalar = 0 ExtraArguments extras({eps}); NativeOpExecutioner::execReduce3Scalar(diag->getContext(), reduce3::EqualsWithEps, diag->getBuffer(), diag->getShapeInfo(), diag->getSpecialBuffer(), diag->getSpecialShapeInfo(), extras.argumentsAsT(nd4j::DataType::FLOAT32), exp.getBuffer(), exp.getShapeInfo(), exp.getSpecialBuffer(), exp.getSpecialShapeInfo(), tmp.buffer(), tmp.shapeInfo(), tmp.specialBuffer(), tmp.specialShapeInfo()); hipStream_t* stream = x.getContext()->getCudaStream(); auto res = hipStreamSynchronize(*stream); // tmp.printBuffer("Compare result is (expected 0)"); ASSERT_TRUE(exp.isSameShape(diag)); ASSERT_TRUE(exp.equalsTo(diag)); delete diag; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_02) { auto x = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); //x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x->reshapei('c', {3, 4, 5}); x->permutei({0, 1, 2}); x->streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(x)); ASSERT_TRUE(exp.equalsTo(x)); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_0) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_1) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_2) { //auto x = NDArrayFactory::create<float>('c', {1, 60}); auto xx = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); // auto x = *xx; //x.linspace(1); // auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); // x.reshapei('c', {3, 4, 5}); // x.permutei({0, 1, 2}); // x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); // ASSERT_TRUE(exp.isSameShape(&x)); // ASSERT_TRUE(exp.equalsTo(&x)); delete xx; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_3) { auto x = NDArrayFactory::create<float>('c', {1, 60}); //x.linspace(1); for (int l = 0; l < x.lengthOf(); l++) x.p(l, float(l + 1.f)); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_1) { auto x = NDArrayFactory::empty<float>(); ASSERT_TRUE(x.isActualOnHostSide()); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_2) { auto x = NDArrayFactory::empty_<float>(); ASSERT_TRUE(x->isEmpty()); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_Empty_3) { auto x = NDArrayFactory::empty(nd4j::DataType::FLOAT32); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_4) { auto x = NDArrayFactory::empty_(nd4j::DataType::FLOAT32); ASSERT_TRUE(x->isEmpty()); delete x; } // printCudaGlobal<double><<<1,1,0,*stream>>>(dX, 6); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(dXShapeInfo, 8); // printCudaGlobal<double><<<1,1,0,*stream>>>(dZ, 2); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(dZShapeInfo, 6); // printCudaGlobal<int><<<1,1,0,*stream>>>(dimension, 1); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(tadShapeInfo, 6); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(tadOffsets, 2); // hipStreamSynchronize(*stream); TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_1) { auto x = NDArrayFactory::create<float>('c', {5,2}, {0,1,2,3,4,5,6,7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {5, 8}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(4); std::vector<Nd4jLong*> shapes(4); std::vector<Nd4jLong*> hostShapes(4); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = x.specialBuffer(); shapes[i] = x.specialShapeInfo(); hostShapes[i] = x.shapeInfo(); } Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 1, 4, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_2) { auto x = NDArrayFactory::create<float>('c', {5,2}, {0,1,2,3,4,5,6,7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('f', {5, 8}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(4); std::vector<Nd4jLong*> shapes(4); std::vector<Nd4jLong*> hostShapes(4); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = x.specialBuffer(); shapes[i] = x.specialShapeInfo(); hostShapes[i] = x.shapeInfo(); } Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 1, 4, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("F Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_3) { auto x = NDArrayFactory::create<float>('c', {2,3}, {1,2,3,4,5,6}); auto y = NDArrayFactory::create<float>('c', {1,3}, {7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('f', {3, 3}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(2); std::vector<Nd4jLong*> shapes(2); std::vector<Nd4jLong*> hostShapes(2); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x.specialBuffer(); shapes[0] = x.specialShapeInfo(); hostShapes[0] = x.shapeInfo(); buffers[1] = y.specialBuffer(); shapes[1] = y.specialShapeInfo(); hostShapes[1] = y.shapeInfo(); //} Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 2, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("F Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_4) { auto x = NDArrayFactory::create<float>('c', {2,3}, {1,2,3,4,5,6}); auto y = NDArrayFactory::create<float>('c', {1,3}, {7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {3, 3}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(2); std::vector<Nd4jLong*> shapes(2); std::vector<Nd4jLong*> hostShapes(2); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x.specialBuffer(); shapes[0] = x.specialShapeInfo(); hostShapes[0] = x.shapeInfo(); buffers[1] = y.specialBuffer(); shapes[1] = y.specialShapeInfo(); hostShapes[1] = y.shapeInfo(); //} Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 2, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_5) { auto x = NDArrayFactory::create<float>('c', {1,2,3}, {1,2,3,4,5,6}); auto y = NDArrayFactory::create<float>('c', {1,2,3}, {7,8,9,10,11, 12}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {2, 2, 3}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(2); std::vector<Nd4jLong*> shapes(2); std::vector<Nd4jLong*> hostShapes(2); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x.specialBuffer(); shapes[0] = x.specialShapeInfo(); hostShapes[0] = x.shapeInfo(); buffers[1] = y.specialBuffer(); shapes[1] = y.specialShapeInfo(); hostShapes[1] = y.shapeInfo(); //} Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 2, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_6) { auto x1 = NDArrayFactory::create<float>('c', {2,2,3}, {1,2,3,4,5,6,7,8, 9, 10,11,12}); auto x2 = NDArrayFactory::create<float>('c', {1,2,3}, {13,14,15,16,17, 18}); auto x3 = NDArrayFactory::create<float>('c', {1,2,3}, {19,20,21,22,23, 24}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {4, 2, 3}); auto stream = x1.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(3); std::vector<Nd4jLong*> shapes(3); std::vector<Nd4jLong*> hostShapes(3); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x1.specialBuffer(); shapes[0] = x1.specialShapeInfo(); hostShapes[0] = x1.shapeInfo(); buffers[1] = x2.specialBuffer(); shapes[1] = x2.specialShapeInfo(); hostShapes[1] = x2.shapeInfo(); buffers[2] = x3.specialBuffer(); shapes[2] = x3.specialShapeInfo(); hostShapes[2] = x3.shapeInfo(); //} printf("The third array is %p\n", buffers[2]); Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 3, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat3D result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_7) { auto x1 = NDArrayFactory::create<float>(1); auto x2 = NDArrayFactory::create<float>(2); auto x3 = NDArrayFactory::create<float>(3); NativeOps native; auto z = NDArrayFactory::create<float>('c', {3}, {1,2,3}); auto stream = x1.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(3); std::vector<Nd4jLong*> shapes(3); std::vector<Nd4jLong*> hostShapes(3); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x1.specialBuffer(); shapes[0] = x1.specialShapeInfo(); hostShapes[0] = x1.shapeInfo(); buffers[1] = x2.specialBuffer(); shapes[1] = x2.specialShapeInfo(); hostShapes[1] = x2.shapeInfo(); buffers[2] = x3.specialBuffer(); shapes[2] = x3.specialShapeInfo(); hostShapes[2] = x3.shapeInfo(); //} printf("The third array is %p\n", buffers[2]); Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 3, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat scalar result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_8) { // public void testLargeConcat() { // val list = new ArrayList<INDArray>(); // // for (int e = 0; e < 100000; e++) // list.add(Nd4j.create(1, 300)); // // val result = Nd4j.concat(0, list.toArray(new INDArray[list.size()])); // } auto totalCount = 1000; auto width = 300; std::vector<NDArray> lx;//(totalCount); for (int i = 0; i < totalCount; i++) { lx.emplace_back(NDArrayFactory::create<float>('c', {1, width})); lx[i].assign(i); } NativeOps native; auto z = NDArrayFactory::create<float>('c', {totalCount, width}); auto stream = nd4j::LaunchContext ::defaultContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); std::vector<void*> buffers(totalCount); std::vector<Nd4jLong*> shapes(totalCount); std::vector<Nd4jLong*> hostShapes(totalCount); for (size_t i = 0; i < lx.size(); i++) { buffers[i] = lx[i].specialBuffer(); shapes[i] = lx[i].specialShapeInfo(); hostShapes[i] = lx[i].shapeInfo(); } printf("The third array is %p\n", buffers[2]); Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, totalCount, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); nd4j_printf("%f %f %f\n", z.e<float>(0), z.e<float>(width * totalCount / 2), z.e<float>(width * (totalCount - 1))); //z.printIndexedBuffer("Concat result"); //z.printBuffer("C Concat scalar result linear"); } TEST_F(NDArrayCudaBasicsTests, TestTear_1) { auto input = NDArrayFactory::create<float>('c', {1, 10, 10}); std::vector<NDArray> arrays; // = {NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10})}; int total = 151; for (int e = 0; e < total; e++) { input.assign(e); arrays.emplace_back(input); } auto z = NDArrayFactory::create<float>('c', {total, 10, 10}); NativeOps native; auto stream = input.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); Nd4jPointer extra[2]; extra[1] = *stream; std::vector<void*> buffers(total); std::vector<Nd4jLong*> shapes(total); std::vector<Nd4jLong*> hostShapes(total); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = arrays[i].specialBuffer(); shapes[i] = arrays[i].specialShapeInfo(); hostShapes[i] = arrays[i].shapeInfo(); } native.concat(extra, 0, total, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); nd4j::ops::tear op; auto result = op.execute({&z}, {}, {1, 2}); //ASSERT_EQ(10, result->size()); auto e = result->size() - 1; //for (size_t e = 0; e < result->size(); e++) { // arrays[e].printIndexedBuffer("Input list at 40"); // result->at(e)->printIndexedBuffer("OUtput TEAR at 40"); //} // ASSERT_TRUE(tads->at(e)->equalsTo(result->at(e))); delete result; // delete tads; } TEST_F(NDArrayCudaBasicsTests, TestTear_2) { auto input = NDArrayFactory::create<float>('c', {1, 10, 10}); std::vector<NDArray> arrays; // = {NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10})}; for (int e = 0; e < 10; e++) { input.assign(e); arrays.emplace_back(input); } auto z = NDArrayFactory::create<float>('c', {10, 10, 10}); NativeOps native; auto stream = input.getContext()->getCudaStream();//reinterpret_cast<hipStream_t *>(&nativeStream); Nd4jPointer extra[2]; extra[1] = *stream; std::vector<void*> buffers(10); std::vector<Nd4jLong*> shapes(10); std::vector<Nd4jLong*> hostShapes(10); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = arrays[i].specialBuffer(); shapes[i] = arrays[i].specialShapeInfo(); hostShapes[i] = arrays[i].shapeInfo(); } std::vector<int> dimsToExclude({1,2}); native.concat(extra, 0, 10, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); // z.syncToHost(); // z.printBuffer("Pile OK"); // z.printIndexedBuffer("Pile 10x10"); // z.printIndexedBuffer("Pile 10x10"); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimsToExclude); //std::vector<void*> arraysData(arrays.size()); Nd4jPointer* arraysData; hipError_t err = hipMalloc(&arraysData, arrays.size() * sizeof(void*)); if (err != 0) { printf("Cannot allocate device memory for targets due error %d\n", err); ASSERT_TRUE(false); } for (size_t i = 0; i < arrays.size(); i++) { Nd4jPointer target = arrays[i].specialBuffer(); hipMemcpy(&arraysData[i], &target, sizeof(Nd4jPointer), hipMemcpyHostToDevice); } native.tear(extra, z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), arraysData, input.specialShapeInfo(), packX.specialShapeInfo(), packX.specialOffsets()); // auto result = op.execute({&z}, {}, {1, 2}); // nd4j_printf("Result count is %lu\n", result->size()); //ASSERT_EQ(10, result->size()); err = hipFree(arraysData); if (err != 0) { printf("Cannot deallocate device memory for targets due error %d\n", err); ASSERT_TRUE(false); } for (size_t e = 0; e < arrays.size(); e++) { arrays[e].syncToHost(); arrays[e].printBuffer("Output list at"); //result->at(e)->printBuffer("OUtput TEAR at"); } // ASSERT_TRUE(tads->at(e)->equalsTo(result->at(e))); // delete result; // delete tads; }
e331cd4f162a6fb5d08ceaa9f9f1ec4f73cc9f78.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include "testlayers.h" #include <NDArray.h> #include <NDArrayFactory.h> #include <Context.h> #include <Node.h> #include <graph/Variable.h> #include <graph/VariableSpace.h> #include <execution/LaunchContext.h> #include <specials_cuda.h> #include <TAD.h> #include <ops/declarable/CustomOperations.h> #include <cuda.h> using namespace nd4j; using namespace nd4j::graph; class NDArrayCudaBasicsTests : public testing::Test { public: }; ////////////////////////////////////////////////////////////////////////// static cudaError_t allocateDeviceMem(LaunchContext& lc, std::vector<void*>& devicePtrs, const std::vector<std::pair<void*,size_t>>& hostData) { if(devicePtrs.size() != hostData.size()) throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); cudaError_t cudaResult; void* reductionPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult; int* allocationPointer; cudaResult = cudaMalloc(reinterpret_cast<void **>(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult; lc.setReductionPointer(reductionPointer); lc.setAllocationPointer(allocationPointer); cudaStream_t stream = *lc.getCudaStream(); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult; cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, stream); } return cudaResult; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_1) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_2) { auto x = NDArrayFactory::create<int>('c', {5}); auto y = NDArrayFactory::create<int>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_3) { auto x = NDArrayFactory::create<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); NDArray::registerSpecialUse({&x}, {&y}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_FALSE(x.isActualOnHostSide()); ASSERT_TRUE(y.isActualOnDeviceSide()); ASSERT_TRUE(y.isActualOnHostSide()); } TEST_F(NDArrayCudaBasicsTests, Test_Registration_01) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_TRUE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_02) { auto x = NDArrayFactory::create_<int>('c', {5}); auto y = NDArrayFactory::create_<int>('c', {5}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_FALSE(x->isActualOnHostSide()); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Registration_03) { auto x = NDArrayFactory::create_<int>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<int>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_TRUE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Neg, y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); // y->printBuffer("Negatives"); delete x; delete y; } TEST_F(NDArrayCudaBasicsTests, Test_Cosine_1) { auto x = NDArrayFactory::create_<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create_<double>('c', {5}, {5, 4, 3, 2, 1}); ASSERT_TRUE(x->isActualOnDeviceSide()); ASSERT_TRUE(x->isActualOnHostSide()); NDArray::registerSpecialUse({y}, {x}); x->applyTransform(transform::Cosine, y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //y->syncToHost(); y->printBuffer("Cosine"); delete x; delete y; } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream); //cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); //cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream); LaunchContext lc(stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_2) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray y('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); NDArray exp('c', { 5 }, { 2, 4, 6, 8, 10 }); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_3) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }, {10, 10, 10, 10, 10}); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); cudaError_t dZ = cudaStreamCreate(reinterpret_cast<cudaStream_t *>(&nativeStream)); auto stream = reinterpret_cast<cudaStream_t *>(&nativeStream); //cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); //cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream); LaunchContext lc(stream, *stream, nullptr, nullptr); NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); z.tickWriteDevice(); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); //double* localBuffer = ; cudaMemcpy(z.buffer(), z.specialBuffer(), z.lengthOf() * z.sizeOfT(), cudaMemcpyDeviceToHost); res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); x.printBuffer("3X = "); y.printBuffer("3Y = "); z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_4) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_5) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 2, 4, 6, 8, 10 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_6) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += y; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); x.printBuffer("6X = "); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestAdd_7) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); //auto y = NDArrayFactory::create<double>(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 3, 4, 5, 6, 7 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x += 2.; //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); x.syncToHost(); x.printBuffer("7X = "); //y.printBuffer("3Y = "); //z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); // x.printBuffer("3X = "); // y.printBuffer("3Y = "); // z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_2) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); x.printBuffer("3X = "); y.printBuffer("3Y = "); z.printBuffer("3Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_3) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); // z.printBuffer("23Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < z.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestMultiply_4) { // allocating host-side arrays NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 5 }, { 1, 4, 9, 16, 25 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; //x.tickWriteDevice(); // x.printBuffer("33Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); for (int e = 0; e < x.lengthOf(); e++) { ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); } } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestPrimitiveNeg_01) { // allocating host-side arrays auto x = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<int>('c', { 5 }, { 1, 2, 3, 4, 5}); auto exp = NDArrayFactory::create<int>('c', { 5 }, { -1, -2, -3, -4, -5 }); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); NativeOpExecutioner::execTransformSame(x.getContext(), transform::Neg, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, nullptr, nullptr); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); y.tickWriteDevice(); // x.printBuffer("X = "); // y.printBuffer("Y = "); for (int e = 0; e < y.lengthOf(); e++) { ASSERT_NEAR(exp.e<int>(e), y.e<int>(e), 1e-5); } } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveNeg_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Neg, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // y.printBuffer("Negatives2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveSqrt_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Sqrt, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(y.equalsTo(exp)); //y.printBuffer("SQRT output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveAssign_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); //auto exp = NDArrayFactory::create<double>({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); //ASSERT_TRUE(x.isActualOnDeviceSide()); //ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Assign, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); // printf("Assigned to another array\n"); // y.printBuffer("OUput"); ASSERT_TRUE(y.equalsTo(x)); //y.syncToHost(); //y.printBuffer("IsMax output"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_1) { // strict auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //y.printBuffer("Cosine2"); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_2) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); //y.printBuffer("PrimitiveCosine2"); //exp.printBuffer("Primitive Cosine exp"); ASSERT_TRUE(exp.isSameShape(y)); ASSERT_TRUE(exp.dataType() == y.dataType()); //for (int e = 0; e < y.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_3) { auto x = NDArrayFactory::create<double>('c', {5}, {1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>('c', {5}); auto exp = NDArrayFactory::create<double>({0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); ASSERT_TRUE(x.isActualOnDeviceSide()); ASSERT_TRUE(x.isActualOnHostSide()); x.applyTransform(transform::Cosine, &y, nullptr); //ASSERT_TRUE(x->isActualOnDeviceSide()); //ASSERT_FALSE(x->isActualOnHostSide()); //ASSERT_TRUE(y->isActualOnDeviceSide()); //ASSERT_TRUE(y->isActualOnHostSide()); //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); //ASSERT_EQ(0, res); //exp.syncToHost(); // y.printBuffer("PrimitiveCosine3"); // exp.printBuffer("Primitive Cosine3 exp"); // y.printShapeInfo("Y shape"); // exp.printShapeInfo("Exp Shape"); ASSERT_TRUE(exp.isSameShape(y)); // // for (int e = 0; e < y.lengthOf(); e++) { // printf("%lf == %lf\n", exp.e<double>(e), y.e<double>(e)); //// ASSERT_NEAR(exp.e<double>(e), y.e<double>(e), 1e-5); // } ASSERT_TRUE(exp.equalsTo(y)); //delete x; //delete y; } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_2) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x = NDArrayFactory::create<double>('c', {2,3,4}); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t stream; cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext lc(&stream); // allocate required amount of global device memory and copy host data to it cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); z.tickWriteDevice(); z.printBuffer("Result with Broadcast2 (multiply)"); exp.printBuffer("Expect with Broadcast2 (multiply)"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_3) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, nd4j::DataType::DOUBLE); NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); // NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); x.linspace(1); x.syncToDevice(); std::vector<int> dimensions = {0,2}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; //cudaStream_t stream; //cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); LaunchContext* pLc = x.getContext();//(&stream); cudaStream_t* stream = pLc->getCudaStream(); // allocate required amount of global device memory and copy host data to it // cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(int i = 0; i < devicePtrs.size(); ++i) { cudaResult = cudaMalloc(reinterpret_cast<void **>(&devicePtrs[i]), hostData[i].second); ASSERT_EQ(0, cudaResult); cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, *stream); } NDArray::registerSpecialUse({&z}, {&x, &y}); // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Multiply, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); //cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); //z.syncToHost(); z.printBuffer("Result with Broadcast3 (multiply)"); // verify results for (int e = 0; e < z.lengthOf(); e++) ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); ASSERT_TRUE(exp.equalsTo(z)); // delete cuda stream //cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_1) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; //x.syncToHost(); x.printBuffer("54Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(x)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_01) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y = NDArrayFactory::create<double>(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), &y, &z);// *= y; // z.printBuffer("53Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_02) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2,3}, {3, 3, 3, 3, 3, 3}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), &y, &z);// *= y; // z.printBuffer("52Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_002) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); auto y = NDArrayFactory::create<double>('c', {2, 3}, {2., 3., 3., 3., 3., 3.}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); auto z = NDArrayFactory::create<double>('c', { 2, 3 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 9, 12, 15, 18 }); //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x.applyPairwiseTransform(pairwise::Multiply, &y, &z);// *= y; // z.printBuffer("51Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); ASSERT_TRUE(exp.equalsTo(z)); // for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // } } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcastRaw_1) { //if (!Environment::getInstance()->isExperimentalBuild()) // return; NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray y('c', {3}, {10, 20, 30}, nd4j::DataType::INT64); NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, nd4j::DataType::INT32); //real output [10, 11, 12, 13, 4, 5, 6, 7, 28, 29, 30, 31, 22, 23, 24, 25, 16, 17, 18, 19, 40, 41, 42, 43] x.linspace(0); x.syncToDevice(); std::vector<int> dimensions = {1}; // evaluate xTad data shape::TAD xTad; xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); xTad.createTadOnlyShapeInfo(); xTad.createOffsets(); // prepare input arrays for prepareDataForCuda function std::vector<std::pair<void*,size_t>> hostData; hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(Nd4jLong)); // 0 -- dimensions hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets std::vector<void*> devicePtrs(hostData.size(), nullptr); // create cuda stream and LaunchContext cudaError_t cudaResult; cudaStream_t* stream = x.getContext()->getCudaStream(); LaunchContext* pLc = x.getContext(); // allocate required amount of global device memory and copy host data to it //cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); for(size_t i = 0; i < devicePtrs.size(); ++i) { nd4j_printf("Allocation of %i bytes with device\n", hostData[i].second) cudaResult = cudaMalloc(&devicePtrs[i], hostData[i].second); //if(cudaResult != 0) return cudaResult; ASSERT_EQ(cudaResult, 0); cudaMemcpy(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice); } // call cuda kernel which calculates result NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Add, nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), (int*)devicePtrs[0], dimensions.size(), (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], nullptr, nullptr); cudaResult = cudaStreamSynchronize(*stream); ASSERT_EQ(0, cudaResult); // x.printIndexedBuffer(" X"); // y.printIndexedBuffer("+Y"); // z.printBuffer("ADD broadcasted output"); // verify results // for (int e = 0; e < z.lengthOf(); e++) // ASSERT_NEAR(exp.e<double>(e), z.e<double>(e), 1e-5); // free allocated global device memory for(int i = 0; i < devicePtrs.size(); ++i) cudaFree(devicePtrs[i]); // delete cuda stream //cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); x *= y; x.printBuffer("55Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} } TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_2) { // allocating host-side arrays NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); //auto z = NDArrayFactory::create<double>('c', { 5 }); auto exp = NDArrayFactory::create<double>('c', { 2, 3 }, { 11,12, 13,14, 15, 16 }); auto expZ = NDArrayFactory::create<double>('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); // making raw buffers //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; //cudaError_t res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); //ASSERT_EQ(0, res); //res = cudaMalloc(reinterpret_cast<void **>(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); //ASSERT_EQ(0, res); //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); //x.printBuffer("23X = "); //y.printBuffer("23Y = "); //void NDArray::applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray* other, NDArray* target, const bool checkTargetShape, ExtraArguments *extraArgs) x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), &y, &exp); exp.printBuffer("56Result out"); // // cudaFree(devBufferPtrX); //cudaFree(devBufferPtrZ); //cudaFree(devShapePtrX); //for (int e = 0; e < x.lengthOf(); e++) { // ASSERT_NEAR(exp.e<double>(e), x.e<double>(e), 1e-5); //} ASSERT_TRUE(exp.equalsTo(expZ)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestReduceSum_1) { // allocating host-side arrays auto x = NDArrayFactory::create<double>('c', { 5 }, { 1, 2, 3, 4, 5}); auto y = NDArrayFactory::create<double>(15); auto exp = NDArrayFactory::create<double>(15); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); NativeOpExecutioner::execReduceSameScalar(x.getContext(), reduce::Sum, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo()); auto res = cudaStreamSynchronize(*stream); ASSERT_EQ(0, res); y.syncToHost(); x.printBuffer("X = "); y.printBuffer("Y = "); ASSERT_NEAR(y.e<double>(0), 15, 1e-5); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestDup1) { NDArray array('c', {2,3}, {1,2,3,4,5,6}); array.printBuffer("Array at start"); auto arrC = array.dup('c'); auto arrF = array.dup('f'); // arrC->printBuffer("arrC"); // arrF->printBuffer("arrF"); //arrC->printShapeInfo("C shape"); //arrF->printShapeInfo("F shape"); ASSERT_TRUE(array.equalsTo(arrF)); ASSERT_TRUE(array.equalsTo(arrC)); ASSERT_TRUE(arrF->equalsTo(arrC)); delete arrC; delete arrF; } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_1) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_TRUE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_TRUE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_2) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,10,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1,2,5,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } ////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, equalsTo_3) { NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); NDArray y('c', {2,5}, {1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,10.f}, nd4j::DataType::FLOAT32); ASSERT_FALSE(x.equalsTo(y)); x.permutei({1,0}); y.permutei({1,0}); ASSERT_FALSE(x.equalsTo(y)); } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_1) { NDArray x('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::INT32); NDArray k('c', {2,3}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); NDArray exp1('c', {3}, {4., 20., 36.}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,3}, {-10., -2., 6.,14., 22., 30.}, nd4j::DataType::FLOAT32); NDArray exp3('c', {4}, {38., 41., 44., 47.}, nd4j::DataType::FLOAT32); NDArray exp4('c', {4}, {114., 117., 120., 123.}, nd4j::DataType::FLOAT32); NDArray* z = x.applyReduce3(nd4j::reduce3::Dot, &y, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x.applyReduce3(nd4j::reduce3::Dot, &k, {0,1}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, &x, {1}); ASSERT_TRUE(z->equalsTo(&exp2)); // printCudaGlobal<float><<<1,1,0, *y.getContext()->getCudaStream()>>>(z->specialBuffer(), 6); delete z; x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, &k2, {0,1}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_2) { NDArray x('c', {2,3,4}, {-10,-9,-8.5,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0.5,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2.5,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::DOUBLE); NDArray k('c', {2,3}, {-2,3,-4,5.5,-2,3}, nd4j::DataType::DOUBLE); NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3.5}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3}, {5., 20., 36.}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,3}, {-8., -2., 6., 13., 22., 30.}, nd4j::DataType::DOUBLE); NDArray exp3('c', {4}, {39., 42.5, 47., 49.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {4}, {119., 122.5, 125., 129.5}, nd4j::DataType::DOUBLE); NDArray* z = x.applyReduce3(nd4j::reduce3::Dot, &y, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x.applyReduce3(nd4j::reduce3::Dot, &k, {0,1}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; x.permutei({0,2,1}); y.permutei({0,2,1}); z = y.applyReduce3(nd4j::reduce3::Dot, &x, {1}); ASSERT_TRUE(z->equalsTo(&exp2)); // printCudaGlobal<float><<<1,1,0, *y.getContext()->getCudaStream()>>>(z->specialBuffer(), 6); delete z; x2.permutei({1,0,2}); z = x2.applyReduce3(nd4j::reduce3::Dot, &k2, {0,1}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyReduce3_3) { NDArray x1('c', {2,2,2}, {1,2,3,4,5,6,7,8}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {0}, {-204}, nd4j::DataType::FLOAT32); NDArray exp2('c', {0}, {31.5}, nd4j::DataType::DOUBLE); auto z = x1.applyReduce3(reduce3::Dot, &x2); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x3.applyReduce3(reduce3::Dot, &x4); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyReduce3(reduce3::Dot, &x2); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x3.applyReduce3(reduce3::Dot, &x4); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; } //////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyAllReduce3_1) { NDArray x1('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); NDArray exp1('c', {3,2}, {-88., -124., 6., -2., 22., 14.}, nd4j::DataType::FLOAT32); NDArray exp2('c', {6,4}, {-36., -44., -52., -60.,-42., -52., -62., -72.,2., 0., -2., -4.,6., 4., 2., 0.,10., 8., 6., 4.,14., 12., 10., 8.}, nd4j::DataType::FLOAT32); NDArray exp3('c', {1,1}, {31.5}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,3}, {4.5, 10.5, 16.5,4.5, 10.5, 16.5,4.5, 10.5, 16.5}, nd4j::DataType::DOUBLE); auto z = x1.applyAllReduce3(reduce3::Dot, &x2, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x1.applyAllReduce3(reduce3::Dot, &x2, {0}); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; z = x3.applyAllReduce3(reduce3::Dot, &x4, {0,1}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; z = x3.applyAllReduce3(reduce3::Dot, &x4, {1}); // z->syncToHost(); // z->printShapeInfo(); // z->printIndexedBuffer(); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; x1.permutei({2,1,0}); x2.permutei({2,1,0}); x3.permutei({1,0}); x4.permutei({1,0}); z = x1.applyAllReduce3(reduce3::Dot, &x2, {0,2}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x3.applyAllReduce3(reduce3::Dot, &x4, {0}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test1) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray scalar('c', {0}, {100}, nd4j::DataType::INT64); NDArray vec1('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray vec2('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray exp1('c', {0}, {1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {0}, {2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp1)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec1, {1}); ASSERT_TRUE(vec1.equalsTo(&exp2)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec2, {0}); ASSERT_TRUE(vec2.equalsTo(&exp3)); x.permutei({1,0}); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &scalar, {0,1}); ASSERT_TRUE(scalar.equalsTo(&exp4)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec1, {0}); ASSERT_TRUE(vec1.equalsTo(&exp5)); x.applyIndexReduce(nd4j::indexreduce::IndexMax, &vec2, {1}); ASSERT_TRUE(vec2.equalsTo(&exp6)); } ////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test2) { NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); NDArray exp1('c', {0}, {1}, nd4j::DataType::INT64); NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); NDArray exp4('c', {0}, {2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); auto z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z->equalsTo(&exp1)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z->equalsTo(&exp2)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z->equalsTo(&exp3)); delete z; x.permutei({1,0}); z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); ASSERT_TRUE(z->equalsTo(&exp4)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); ASSERT_TRUE(z->equalsTo(&exp5)); delete z; z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); ASSERT_TRUE(z->equalsTo(&exp6)); delete z; } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test1) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); NDArray z1('c', {0}, {100}, nd4j::DataType::DOUBLE); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::DOUBLE); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {0}, {2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3,4,1,0.666667}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {3.5,0.833333}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Mean, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Mean, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Mean, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Mean, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Mean, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test2) { NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::DOUBLE); NDArray exp1('c', {0}, {2.166667}, nd4j::DataType::DOUBLE); NDArray exp2('c', {2,2}, {3,4,1,0.666667}, nd4j::DataType::DOUBLE); NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::DOUBLE); NDArray exp5('c', {2}, {3.5,0.833333}, nd4j::DataType::DOUBLE); NDArray z1 = x.reduceAlongDims(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::Mean, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::Mean, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::Mean, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, EqualityTest1) { auto arrayA = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayB = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayC = NDArrayFactory::create_<float>('f', {3, 5}); auto arrayD = NDArrayFactory::create_<float>('f', {2, 4}); auto arrayE = NDArrayFactory::create_<float>('f', {1, 15}); for (int i = 0; i < arrayA->rows(); i++) { for (int k = 0; k < arrayA->columns(); k++) { arrayA->p(i, k, (float) i); } } arrayA->printBuffer("arrayA is "); for (int i = 0; i < arrayB->rows(); i++) { for (int k = 0; k < arrayB->columns(); k++) { arrayB->p(i, k, (float) i); } } arrayB->printBuffer("arrayB is "); for (int i = 0; i < arrayC->rows(); i++) { for (int k = 0; k < arrayC->columns(); k++) { arrayC->p(i, k, (float) i+1); } } arrayC->printBuffer("arrayC is "); ASSERT_TRUE(arrayA->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayC->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayD->equalsTo(arrayB, 1e-5)); ASSERT_FALSE(arrayE->equalsTo(arrayB, 1e-5)); delete arrayA; delete arrayB; delete arrayC; delete arrayD; delete arrayE; } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test1) { NDArray x('c', {2,3,2}, {1.5,2,3,4,5,6,7.5,8,-1,-2,-3.5,-4,}, nd4j::DataType::FLOAT32); NDArray z1('c', {0}, {100}, nd4j::DataType::FLOAT32); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); NDArray exp1('c', {0}, {26.5}, nd4j::DataType::FLOAT32); NDArray exp2('c', {2,2}, {9.5,12,3,2}, nd4j::DataType::FLOAT32); NDArray exp3('c', {3}, {19,4,3.5}, nd4j::DataType::FLOAT32); NDArray exp4('c', {3,2}, {9,10,2,2,1.5,2}, nd4j::DataType::FLOAT32); NDArray exp5('c', {2}, {21.5,5}, nd4j::DataType::FLOAT32); x.reduceAlongDimension(nd4j::reduce::Sum, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::Sum, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::Sum, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::Sum, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::Sum, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test2) { NDArray x('c', {2,3,2}, {1.5,2,3,4,5,6,7.5,8,-1,-2,-3.5,-4,}, nd4j::DataType::INT64); NDArray exp1('c', {0}, {26}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {9,12,3,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {18,4,4}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {8,10,2,2,2,2}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {21,5}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDims(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::Sum, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::Sum, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::Sum, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test1) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::DOUBLE); NDArray z1('c', {0}, {100}, nd4j::DataType::BOOL); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::BOOL); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::BOOL); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::BOOL); NDArray z5('c', {2}, {100,100}, nd4j::DataType::BOOL); NDArray exp1('c', {0}, {1}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {1,1,0,1}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {1,1,1}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {1,1,1,0,1,1}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::BOOL); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::IsPositive, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::IsPositive, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {0}, {1}, nd4j::DataType::BOOL); NDArray exp2('c', {2,2}, {1,1,0,1}, nd4j::DataType::BOOL); NDArray exp3('c', {3}, {1,1,1}, nd4j::DataType::BOOL); NDArray exp4('c', {3,2}, {0,1,1,0,1,1}, nd4j::DataType::BOOL); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::BOOL); NDArray z1 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::IsPositive, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::IsPositive, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test1) { NDArray x('c', {2,3,2}, {0.5,2,3,-0,5,6,-7.5,0,-1,-0.5,-3.5,4}, nd4j::DataType::FLOAT32); NDArray z1('c', {0}, {100}, nd4j::DataType::INT64); NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::INT64); NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::INT64); NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::INT64); NDArray z5('c', {2}, {100,100}, nd4j::DataType::INT64); NDArray exp1('c', {0}, {2}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {0,1,0,1}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {1,1,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {0,1,0,1,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); x.reduceAlongDimension(nd4j::reduce::CountZero, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z2, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z3, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 x.reduceAlongDimension(nd4j::reduce::CountZero, &z1, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z4, {1}); ASSERT_TRUE(z4.equalsTo(&exp4)); x.reduceAlongDimension(nd4j::reduce::CountZero, &z5, {0,2}); ASSERT_TRUE(z5.equalsTo(&exp5)); } //////////////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test2) { NDArray x('c', {2,3,2}, {0.5,2,3,-0,5,6,-7.5,0,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); NDArray exp1('c', {0}, {4}, nd4j::DataType::INT64); NDArray exp2('c', {2,2}, {1,1,0,2}, nd4j::DataType::INT64); NDArray exp3('c', {3}, {2,2,0}, nd4j::DataType::INT64); NDArray exp4('c', {3,2}, {1,1,0,2,0,0}, nd4j::DataType::INT64); NDArray exp5('c', {2}, {2,2}, nd4j::DataType::INT64); NDArray z1 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z1.equalsTo(&exp1)); NDArray z2 = x.reduceAlongDims(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z2.equalsTo(&exp2)); NDArray z3 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z3.equalsTo(&exp3)); x.permutei({1,0,2}); // 3x2x2 NDArray z4 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,1,2}); ASSERT_TRUE(z4.equalsTo(&exp1)); NDArray z5 = x.reduceAlongDims(nd4j::reduce::CountZero, {1}); ASSERT_TRUE(z5.equalsTo(&exp4)); NDArray z6 = x.reduceAlongDims(nd4j::reduce::CountZero, {0,2}); ASSERT_TRUE(z6.equalsTo(&exp5)); } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest1) { auto x = NDArrayFactory::create<float>('c', {5, 5}); auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, row, &z, nullptr); x += *row; ASSERT_TRUE(x.equalsTo(z)); //ASSERT_TRUE(z.equalsTo(&exp)); delete row; } TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest2) { auto x = NDArrayFactory::create<float>('c', {5, 5}); //auto z = NDArrayFactory::create<float>('c', {5, 5}); auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); ASSERT_TRUE(row->equalsTo(&expRow)); x.applyBroadcast(broadcast::Add, {1}, row); ASSERT_TRUE(x.equalsTo(&exp)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, TestBroadcast_1) { NDArray exp('c', {2, 3, 2, 2}, {1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3., 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.}, nd4j::DataType::DOUBLE); auto input = NDArrayFactory::create<double>('c',{ 2, 3, 2, 2}); auto bias = NDArrayFactory::create<double>('c', {1, 3}); bias.linspace(1); input.applyBroadcast(broadcast::Add, {1}, &bias); ASSERT_TRUE(exp.equalsTo(&input)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_1) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_2) { auto x = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float16>('c', {9}, {1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(y)); //for (int e = 0; e < x.lengthOf(); e++) // ASSERT_NEAR(x.e<float16>(e), y.e<float16>(e), 1.e-5f); } TEST_F(NDArrayCudaBasicsTests, TestFloat16_3) { auto x = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<bfloat16>({1,2,3,4,5,7,8,9}); ASSERT_TRUE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_4) { auto x = NDArrayFactory::create<float>({1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>({2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_5) { auto x = NDArrayFactory::create<float>('c', {3,3}, {1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>('c', {3,3}, {2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } TEST_F(NDArrayCudaBasicsTests, TestFloat_6) { auto x = NDArrayFactory::create<float>('f', {3,3}, {1,2,3,4,5,7,8,9}); auto y = NDArrayFactory::create<float>('f', {3,3}, {2,4,5,5,6,7,8,9}); ASSERT_FALSE(x.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_05) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {1, 8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2 = NDArrayFactory::create<float>(expected.ordering(), expected.getShapeAsVector()); x = 1.; y = 2.; expected = 3.; res2 = 0.f; x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; ASSERT_TRUE(expected.isSameShape(&res2)); ASSERT_TRUE(expected.equalsTo(&res2)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_5) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 1, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_51) { auto x = NDArrayFactory::create<float>('c', {8, 8, 8}); auto y = NDArrayFactory::create<float>('c', {8, 8}); auto expected = NDArrayFactory::create<float>('c', {8, 8, 8}); NDArray res2(expected); x = 1.; y = 2.; expected = 3.; //x.printBuffer("X="); //y.printBuffer("Y="); //expected.printBuffer("EXPECTED"); auto result = x + y; //result.printBuffer("1 + 2 ="); //res2.assign(x + y); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); //res2.printBuffer("Z="); //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; // x += y; //x.printBuffer("OutputX"); //res2.syncToHost(); //res2.printBuffer("OUputZ"); //x.printIndexedBuffer("OUtputX"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_1) { auto x = NDArrayFactory::create<float>('c', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('c', {2, 2, 2}); exp = 10.; // y.printShapeInfo("Output SHAPE"); // y.printBuffer("Output TILE"); // exp.printBuffer("Expect TILE"); ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_2) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; y.printShapeInfo("Output SHAPE"); y.printBuffer("Output TILE"); ASSERT_TRUE(exp.equalsTo(y)); } TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_3) { auto x = NDArrayFactory::create<float>('f', {2, 1, 2}); x = 10.; x.p(1,0,1, 20); x.syncToDevice(); auto y = x.tile({1,2,1}); auto exp = NDArrayFactory::create<float>('f', {2, 2, 2}); exp = 10.; exp.p(1,0,1, 20.); exp.p(1, 1, 1, 20.); exp.syncToDevice(); ASSERT_TRUE(exp.equalsTo(y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_2) { double expBuff[] = {2., 3, 3., 4., 4., 5, 5., 6., 6., 7, 7., 8.}; NDArray a('c', {4,4}, {1.,2,3,4,5,6,7,8,9,2,3,2,1,0,4,7.}, nd4j::DataType::FLOAT32); a.printBuffer(); auto x = NDArrayFactory::create<double>('c', {3, 2, 1}); auto y = NDArrayFactory::create<double>('c', {1, 2}); auto expected = NDArrayFactory::create<double>(expBuff, 'c', {3, 2, 2}); x.linspace(1); y.linspace(1); x.printBuffer("X="); y.printBuffer("Y="); auto result = x + y; result.printIndexedBuffer("Result"); ASSERT_TRUE(expected.isSameShape(&result)); ASSERT_TRUE(expected.equalsTo(&result)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, assign_2) { NDArray x('c', {4}, {1.5,2.5,3.5,4.5}, nd4j::DataType::FLOAT32); NDArray y('c', {4}, nd4j::DataType::INT32); NDArray expected('c', {4}, {1,2,3,4}, nd4j::DataType::INT32); y.assign(x); // y.printBuffer("ASSIGN VECTOR"); ASSERT_TRUE(expected.equalsTo(&y)); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, subarray_1) { NDArray x('c', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); NDArray y('f', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); Nd4jLong shapeExpX0[] = {1, 2, 12, 8192, -1, 99}; float buffExpX0[] = {1.000000, 13.000000}; Nd4jLong shapeExpX1[] = {1, 2, 12, 8192, -1, 99}; float buffExpX1[] = {2.000000, 14.000000}; Nd4jLong shapeExpX2[] = {3, 2, 1, 1, 12, 4, 1, 8192, -1, 99}; float buffExpX2[] = {1.000000, 13.000000}; Nd4jLong shapeExpX3[] = {2, 2, 4, 12, 1, 8192, -1, 99}; float buffExpX3[] = {9.000000, 10.000000, 11.000000, 12.000000, 21.000000, 22.000000, 23.000000, 24.000000}; Nd4jLong shapeExpX4[] = {3, 2, 1, 4, 12, 4, 1, 8192, -1, 99}; float buffExpX4[] = {9.000000, 10.000000, 11.000000, 12.000000, 21.000000, 22.000000, 23.000000, 24.000000}; Nd4jLong shapeExpX5[] = {2, 2, 3, 12, 4, 8192, -1, 99}; float buffExpX5[] = {4.000000, 8.000000, 12.000000, 16.000000, 20.000000, 24.000000}; Nd4jLong shapeExpY0[] = {1, 2, 1, 8192, -1, 99}; float buffExpY0[] = {1.000000, 2.000000}; Nd4jLong shapeExpY1[] = {1, 2, 1, 8192, -1, 99}; float buffExpY1[] = {7.000000, 8.000000}; Nd4jLong shapeExpY2[] = {3, 2, 1, 1, 1, 2, 6, 8192, -1, 102}; float buffExpY2[] = {1.000000, 2.000000}; Nd4jLong shapeExpY3[] = {2, 2, 4, 1, 6, 8192, -1, 99}; float buffExpY3[] = {5.000000, 11.000000, 17.000000, 23.000000, 6.000000, 12.000000, 18.000000, 24.000000}; Nd4jLong shapeExpY4[] = {3, 2, 1, 4, 1, 2, 6, 8192, -1, 102}; float buffExpY4[] = {5.000000, 11.000000, 17.000000, 23.000000, 6.000000, 12.000000, 18.000000, 24.000000}; Nd4jLong shapeExpY5[] = {2, 2, 3, 1, 2, 8192, -1, 99}; float buffExpY5[] = {19.000000, 21.000000, 23.000000, 20.000000, 22.000000, 24.000000}; NDArray x0 = x(0, {1,2}); NDArray xExp(buffExpX0, shapeExpX0); ASSERT_TRUE(xExp.isSameShape(x0)); ASSERT_TRUE(xExp.equalsTo(x0)); // for(int i = 0; i < shape::shapeInfoLength(x0.rankOf()); ++i) // ASSERT_TRUE(x0.getShapeInfo()[i] == shapeExpX0[i]); // for(int i = 0; i < x0.lengthOf(); ++i) // ASSERT_TRUE(x0.e<float>(i) == buffExpX0[i]); NDArray x1 = x(1, {1,2}); NDArray x1Exp(buffExpX1, shapeExpX1); ASSERT_TRUE(x1Exp.isSameShape(x1)); ASSERT_TRUE(x1Exp.equalsTo(x1)); // for(int i = 0; i < shape::shapeInfoLength(x1.rankOf()); ++i) // ASSERT_TRUE(x1.getShapeInfo()[i] == shapeExpX1[i]); // for(int i = 0; i < x1.lengthOf(); ++i) // ASSERT_TRUE(x1.e<float>(i) == buffExpX1[i]); NDArray x2 = x(0, {1,2}, true); NDArray x2Exp(buffExpX2, shapeExpX2); ASSERT_TRUE(x2Exp.isSameShape(x2)); x2.printBuffer("X2"); x2Exp.printBuffer("X2 EXPECT"); ASSERT_TRUE(x2Exp.equalsTo(x2)); // for(int i = 0; i < shape::shapeInfoLength(x2.rankOf()); ++i) // ASSERT_TRUE(x2.getShapeInfo()[i] == shapeExpX2[i]); // for(int i = 0; i < x2.lengthOf(); ++i) // ASSERT_TRUE(x2.e<float>(i) == buffExpX2[i]); NDArray x3 = x(2, {1}); NDArray x3Exp(buffExpX3, shapeExpX3); ASSERT_TRUE(x3Exp.isSameShape(x3)); ASSERT_TRUE(x3Exp.equalsTo(x3)); // for(int i = 0; i < shape::shapeInfoLength(x3.rankOf()); ++i) // ASSERT_TRUE(x3.getShapeInfo()[i] == shapeExpX3[i]); // for(int i = 0; i < x3.lengthOf(); ++i) // ASSERT_TRUE(x3.e<float>(i) == buffExpX3[i]); NDArray x4 = x(2, {1}, true); NDArray x4Exp(buffExpX4, shapeExpX4); ASSERT_TRUE(x4Exp.isSameShape(x4)); ASSERT_TRUE(x4Exp.equalsTo(x4)); // for(int i = 0; i < shape::shapeInfoLength(x4.rankOf()); ++i) // ASSERT_TRUE(x4.getShapeInfo()[i] == shapeExpX4[i]); // for(int i = 0; i < x4.lengthOf(); ++i) // ASSERT_TRUE(x4.e<float>(i) == buffExpX4[i]); NDArray x5 = x(3, {2}); NDArray x5Exp(buffExpX5, shapeExpX5); ASSERT_TRUE(x5Exp.isSameShape(x5)); ASSERT_TRUE(x5Exp.equalsTo(x5)); // for(int i = 0; i < shape::shapeInfoLength(x5.rankOf()); ++i) // ASSERT_TRUE(x5.getShapeInfo()[i] == shapeExpX5[i]); // for(int i = 0; i < x5.lengthOf(); ++i) // ASSERT_TRUE(x5.e<float>(i) == buffExpX5[i]); // ******************* // NDArray y0 = y(0, {1,2}); NDArray y0Exp(buffExpY0, shapeExpY0); ASSERT_TRUE(y0Exp.isSameShape(y0)); ASSERT_TRUE(y0Exp.equalsTo(y0)); // for(int i = 0; i < shape::shapeInfoLength(y0.rankOf()); ++i) // ASSERT_TRUE(y0.getShapeInfo()[i] == shapeExpY0[i]); // for(int i = 0; i < y0.lengthOf(); ++i) // ASSERT_TRUE(y0.e<float>(i) == buffExpY0[i]); NDArray y1 = y(1, {1,2}); for(int i = 0; i < shape::shapeInfoLength(y1.rankOf()); ++i) ASSERT_TRUE(y1.getShapeInfo()[i] == shapeExpY1[i]); for(int i = 0; i < y1.lengthOf(); ++i) ASSERT_TRUE(y1.e<float>(i) == buffExpY1[i]); NDArray y2 = y(0, {1,2}, true); for(int i = 0; i < shape::shapeInfoLength(y2.rankOf()); ++i) ASSERT_TRUE(y2.getShapeInfo()[i] == shapeExpY2[i]); for(int i = 0; i < y2.lengthOf(); ++i) ASSERT_TRUE(y2.e<float>(i) == buffExpY2[i]); NDArray y3 = y(2, {1}); for(int i = 0; i < shape::shapeInfoLength(y3.rankOf()); ++i) ASSERT_TRUE(y3.getShapeInfo()[i] == shapeExpY3[i]); for(int i = 0; i < y3.lengthOf(); ++i) ASSERT_TRUE(y3.e<float>(i) == buffExpY3[i]); NDArray y4 = y(2, {1}, true); for(int i = 0; i < shape::shapeInfoLength(y4.rankOf()); ++i) ASSERT_TRUE(y4.getShapeInfo()[i] == shapeExpY4[i]); for(int i = 0; i < y4.lengthOf(); ++i) ASSERT_TRUE(y4.e<float>(i) == buffExpY4[i]); NDArray y5 = y(3, {2}); for(int i = 0; i < shape::shapeInfoLength(y5.rankOf()); ++i) ASSERT_TRUE(y5.getShapeInfo()[i] == shapeExpY5[i]); for(int i = 0; i < y5.lengthOf(); ++i) ASSERT_TRUE(y5.e<float>(i) == buffExpY5[i]); } ////////////////////////////////////////////////////////////////////// TEST_F(NDArrayCudaBasicsTests, Test_diagonal_1) { auto x = NDArrayFactory::create<float>('c', {2, 3}, {1, 2, 3, 4, 5, 6}); auto exp = NDArrayFactory::create<float>('c', {2, 1}, {1, 5}); auto diag = x.diagonal('c'); //diag->syncToDevice(); for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { printf("VAL[%ld] = %f\n", e, diag->e<float>(e)); //, exp.e<float>(e), 1.e-5); } diag->printIndexedBuffer("DIAGONAL"); for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { ASSERT_NEAR(diag->e<float>(e), exp.e<float>(e), 1.e-5); } double eps(1.e-5); NDArray tmp(nd4j::DataType::FLOAT32, x.getContext()); // scalar = 0 ExtraArguments extras({eps}); NativeOpExecutioner::execReduce3Scalar(diag->getContext(), reduce3::EqualsWithEps, diag->getBuffer(), diag->getShapeInfo(), diag->getSpecialBuffer(), diag->getSpecialShapeInfo(), extras.argumentsAsT(nd4j::DataType::FLOAT32), exp.getBuffer(), exp.getShapeInfo(), exp.getSpecialBuffer(), exp.getSpecialShapeInfo(), tmp.buffer(), tmp.shapeInfo(), tmp.specialBuffer(), tmp.specialShapeInfo()); cudaStream_t* stream = x.getContext()->getCudaStream(); auto res = cudaStreamSynchronize(*stream); // tmp.printBuffer("Compare result is (expected 0)"); ASSERT_TRUE(exp.isSameShape(diag)); ASSERT_TRUE(exp.equalsTo(diag)); delete diag; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_02) { auto x = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); //x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x->reshapei('c', {3, 4, 5}); x->permutei({0, 1, 2}); x->streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(x)); ASSERT_TRUE(exp.equalsTo(x)); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_0) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_1) { auto x = NDArrayFactory::create<float>('c', {1, 60}); x.linspace(1); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_2) { //auto x = NDArrayFactory::create<float>('c', {1, 60}); auto xx = NDArrayFactory::linspace<float>(1.f, 60.f, 60); //('c', {1, 60}); // auto x = *xx; //x.linspace(1); // auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); // x.reshapei('c', {3, 4, 5}); // x.permutei({0, 1, 2}); // x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); // ASSERT_TRUE(exp.isSameShape(&x)); // ASSERT_TRUE(exp.equalsTo(&x)); delete xx; } TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_3) { auto x = NDArrayFactory::create<float>('c', {1, 60}); //x.linspace(1); for (int l = 0; l < x.lengthOf(); l++) x.p(l, float(l + 1.f)); auto exp = NDArrayFactory::create<float>('c', {3, 4, 5}, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0}); x.reshapei('c', {3, 4, 5}); x.permutei({0, 1, 2}); x.streamline(); // x.printShapeInfo("{0, 1, 2} shape"); // x.printBuffer("{0, 1, 2} data"); ASSERT_TRUE(exp.isSameShape(&x)); ASSERT_TRUE(exp.equalsTo(&x)); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_1) { auto x = NDArrayFactory::empty<float>(); ASSERT_TRUE(x.isActualOnHostSide()); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_2) { auto x = NDArrayFactory::empty_<float>(); ASSERT_TRUE(x->isEmpty()); delete x; } TEST_F(NDArrayCudaBasicsTests, Test_Empty_3) { auto x = NDArrayFactory::empty(nd4j::DataType::FLOAT32); ASSERT_TRUE(x.isEmpty()); } TEST_F(NDArrayCudaBasicsTests, Test_Empty_4) { auto x = NDArrayFactory::empty_(nd4j::DataType::FLOAT32); ASSERT_TRUE(x->isEmpty()); delete x; } // printCudaGlobal<double><<<1,1,0,*stream>>>(dX, 6); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(dXShapeInfo, 8); // printCudaGlobal<double><<<1,1,0,*stream>>>(dZ, 2); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(dZShapeInfo, 6); // printCudaGlobal<int><<<1,1,0,*stream>>>(dimension, 1); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(tadShapeInfo, 6); // printCudaGlobal<Nd4jLong><<<1,1,0,*stream>>>(tadOffsets, 2); // cudaStreamSynchronize(*stream); TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_1) { auto x = NDArrayFactory::create<float>('c', {5,2}, {0,1,2,3,4,5,6,7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {5, 8}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(4); std::vector<Nd4jLong*> shapes(4); std::vector<Nd4jLong*> hostShapes(4); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = x.specialBuffer(); shapes[i] = x.specialShapeInfo(); hostShapes[i] = x.shapeInfo(); } Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 1, 4, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_2) { auto x = NDArrayFactory::create<float>('c', {5,2}, {0,1,2,3,4,5,6,7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('f', {5, 8}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(4); std::vector<Nd4jLong*> shapes(4); std::vector<Nd4jLong*> hostShapes(4); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = x.specialBuffer(); shapes[i] = x.specialShapeInfo(); hostShapes[i] = x.shapeInfo(); } Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 1, 4, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("F Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_3) { auto x = NDArrayFactory::create<float>('c', {2,3}, {1,2,3,4,5,6}); auto y = NDArrayFactory::create<float>('c', {1,3}, {7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('f', {3, 3}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(2); std::vector<Nd4jLong*> shapes(2); std::vector<Nd4jLong*> hostShapes(2); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x.specialBuffer(); shapes[0] = x.specialShapeInfo(); hostShapes[0] = x.shapeInfo(); buffers[1] = y.specialBuffer(); shapes[1] = y.specialShapeInfo(); hostShapes[1] = y.shapeInfo(); //} Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 2, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("F Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_4) { auto x = NDArrayFactory::create<float>('c', {2,3}, {1,2,3,4,5,6}); auto y = NDArrayFactory::create<float>('c', {1,3}, {7,8,9}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {3, 3}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(2); std::vector<Nd4jLong*> shapes(2); std::vector<Nd4jLong*> hostShapes(2); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x.specialBuffer(); shapes[0] = x.specialShapeInfo(); hostShapes[0] = x.shapeInfo(); buffers[1] = y.specialBuffer(); shapes[1] = y.specialShapeInfo(); hostShapes[1] = y.shapeInfo(); //} Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 2, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_5) { auto x = NDArrayFactory::create<float>('c', {1,2,3}, {1,2,3,4,5,6}); auto y = NDArrayFactory::create<float>('c', {1,2,3}, {7,8,9,10,11, 12}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {2, 2, 3}); auto stream = x.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(2); std::vector<Nd4jLong*> shapes(2); std::vector<Nd4jLong*> hostShapes(2); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x.specialBuffer(); shapes[0] = x.specialShapeInfo(); hostShapes[0] = x.shapeInfo(); buffers[1] = y.specialBuffer(); shapes[1] = y.specialShapeInfo(); hostShapes[1] = y.shapeInfo(); //} Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 2, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_6) { auto x1 = NDArrayFactory::create<float>('c', {2,2,3}, {1,2,3,4,5,6,7,8, 9, 10,11,12}); auto x2 = NDArrayFactory::create<float>('c', {1,2,3}, {13,14,15,16,17, 18}); auto x3 = NDArrayFactory::create<float>('c', {1,2,3}, {19,20,21,22,23, 24}); NativeOps native; auto z = NDArrayFactory::create<float>('c', {4, 2, 3}); auto stream = x1.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(3); std::vector<Nd4jLong*> shapes(3); std::vector<Nd4jLong*> hostShapes(3); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x1.specialBuffer(); shapes[0] = x1.specialShapeInfo(); hostShapes[0] = x1.shapeInfo(); buffers[1] = x2.specialBuffer(); shapes[1] = x2.specialShapeInfo(); hostShapes[1] = x2.shapeInfo(); buffers[2] = x3.specialBuffer(); shapes[2] = x3.specialShapeInfo(); hostShapes[2] = x3.shapeInfo(); //} printf("The third array is %p\n", buffers[2]); Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 3, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat3D result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_7) { auto x1 = NDArrayFactory::create<float>(1); auto x2 = NDArrayFactory::create<float>(2); auto x3 = NDArrayFactory::create<float>(3); NativeOps native; auto z = NDArrayFactory::create<float>('c', {3}, {1,2,3}); auto stream = x1.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(3); std::vector<Nd4jLong*> shapes(3); std::vector<Nd4jLong*> hostShapes(3); //for (size_t i = 0; i < buffers.size(); i++) { buffers[0] = x1.specialBuffer(); shapes[0] = x1.specialShapeInfo(); hostShapes[0] = x1.shapeInfo(); buffers[1] = x2.specialBuffer(); shapes[1] = x2.specialShapeInfo(); hostShapes[1] = x2.shapeInfo(); buffers[2] = x3.specialBuffer(); shapes[2] = x3.specialShapeInfo(); hostShapes[2] = x3.shapeInfo(); //} printf("The third array is %p\n", buffers[2]); Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, 3, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); z.printIndexedBuffer("Concat result"); z.printBuffer("C Concat scalar result linear"); } TEST_F(NDArrayCudaBasicsTests, Test_ConcatNative_8) { // public void testLargeConcat() { // val list = new ArrayList<INDArray>(); // // for (int e = 0; e < 100000; e++) // list.add(Nd4j.create(1, 300)); // // val result = Nd4j.concat(0, list.toArray(new INDArray[list.size()])); // } auto totalCount = 1000; auto width = 300; std::vector<NDArray> lx;//(totalCount); for (int i = 0; i < totalCount; i++) { lx.emplace_back(NDArrayFactory::create<float>('c', {1, width})); lx[i].assign(i); } NativeOps native; auto z = NDArrayFactory::create<float>('c', {totalCount, width}); auto stream = nd4j::LaunchContext ::defaultContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); std::vector<void*> buffers(totalCount); std::vector<Nd4jLong*> shapes(totalCount); std::vector<Nd4jLong*> hostShapes(totalCount); for (size_t i = 0; i < lx.size(); i++) { buffers[i] = lx[i].specialBuffer(); shapes[i] = lx[i].specialShapeInfo(); hostShapes[i] = lx[i].shapeInfo(); } printf("The third array is %p\n", buffers[2]); Nd4jPointer extra[2]; extra[1] = *stream; native.concat(extra, 0, totalCount, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); z.syncToHost(); nd4j_printf("%f %f %f\n", z.e<float>(0), z.e<float>(width * totalCount / 2), z.e<float>(width * (totalCount - 1))); //z.printIndexedBuffer("Concat result"); //z.printBuffer("C Concat scalar result linear"); } TEST_F(NDArrayCudaBasicsTests, TestTear_1) { auto input = NDArrayFactory::create<float>('c', {1, 10, 10}); std::vector<NDArray> arrays; // = {NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10})}; int total = 151; for (int e = 0; e < total; e++) { input.assign(e); arrays.emplace_back(input); } auto z = NDArrayFactory::create<float>('c', {total, 10, 10}); NativeOps native; auto stream = input.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); Nd4jPointer extra[2]; extra[1] = *stream; std::vector<void*> buffers(total); std::vector<Nd4jLong*> shapes(total); std::vector<Nd4jLong*> hostShapes(total); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = arrays[i].specialBuffer(); shapes[i] = arrays[i].specialShapeInfo(); hostShapes[i] = arrays[i].shapeInfo(); } native.concat(extra, 0, total, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); nd4j::ops::tear op; auto result = op.execute({&z}, {}, {1, 2}); //ASSERT_EQ(10, result->size()); auto e = result->size() - 1; //for (size_t e = 0; e < result->size(); e++) { // arrays[e].printIndexedBuffer("Input list at 40"); // result->at(e)->printIndexedBuffer("OUtput TEAR at 40"); //} // ASSERT_TRUE(tads->at(e)->equalsTo(result->at(e))); delete result; // delete tads; } TEST_F(NDArrayCudaBasicsTests, TestTear_2) { auto input = NDArrayFactory::create<float>('c', {1, 10, 10}); std::vector<NDArray> arrays; // = {NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10}), NDArrayFactory::create<float>('c', {1, 10, 10})}; for (int e = 0; e < 10; e++) { input.assign(e); arrays.emplace_back(input); } auto z = NDArrayFactory::create<float>('c', {10, 10, 10}); NativeOps native; auto stream = input.getContext()->getCudaStream();//reinterpret_cast<cudaStream_t *>(&nativeStream); Nd4jPointer extra[2]; extra[1] = *stream; std::vector<void*> buffers(10); std::vector<Nd4jLong*> shapes(10); std::vector<Nd4jLong*> hostShapes(10); for (size_t i = 0; i < buffers.size(); i++) { buffers[i] = arrays[i].specialBuffer(); shapes[i] = arrays[i].specialShapeInfo(); hostShapes[i] = arrays[i].shapeInfo(); } std::vector<int> dimsToExclude({1,2}); native.concat(extra, 0, 10, nullptr, (Nd4jPointer*)hostShapes.data(), (Nd4jPointer*)buffers.data(), (Nd4jPointer*)shapes.data(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr, nullptr); // z.syncToHost(); // z.printBuffer("Pile OK"); // z.printIndexedBuffer("Pile 10x10"); // z.printIndexedBuffer("Pile 10x10"); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimsToExclude); //std::vector<void*> arraysData(arrays.size()); Nd4jPointer* arraysData; cudaError_t err = cudaMalloc(&arraysData, arrays.size() * sizeof(void*)); if (err != 0) { printf("Cannot allocate device memory for targets due error %d\n", err); ASSERT_TRUE(false); } for (size_t i = 0; i < arrays.size(); i++) { Nd4jPointer target = arrays[i].specialBuffer(); cudaMemcpy(&arraysData[i], &target, sizeof(Nd4jPointer), cudaMemcpyHostToDevice); } native.tear(extra, z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), arraysData, input.specialShapeInfo(), packX.specialShapeInfo(), packX.specialOffsets()); // auto result = op.execute({&z}, {}, {1, 2}); // nd4j_printf("Result count is %lu\n", result->size()); //ASSERT_EQ(10, result->size()); err = cudaFree(arraysData); if (err != 0) { printf("Cannot deallocate device memory for targets due error %d\n", err); ASSERT_TRUE(false); } for (size_t e = 0; e < arrays.size(); e++) { arrays[e].syncToHost(); arrays[e].printBuffer("Output list at"); //result->at(e)->printBuffer("OUtput TEAR at"); } // ASSERT_TRUE(tads->at(e)->equalsTo(result->at(e))); // delete result; // delete tads; }
828c0fb7b276dfbfa96ec254b896a42f23b41530.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2018 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "xflow/ops.h" #include "xflow/cuda_helper.h" using namespace XFlow; void Element::map(void) { // create descriptors checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc)); // set descriptors helperSetTensorDescriptor(inputs[0], inputTensor); cudnnOpTensorOp_t opType; switch (type) { case OP_EW_ADD: opType = CUDNN_OP_TENSOR_ADD; break; case OP_EW_MUL: opType = CUDNN_OP_TENSOR_MUL; break; default: assert(false); } checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT, CUDNN_NOT_PROPAGATE_NAN)); // allocate tensors size_t outputSize = sizeof(DATATYPE); for (int i = 0; i < outputs[0].numDim; i++) outputSize *= outputs[0].dim[i]; checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize)); } void Element::unmap(void) { checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor)); checkCUDNN(cudnnDestroyOpTensorDescriptor(opDesc)); checkCUDA(hipFree(outputs[0].data_ptr)); } void Element::forward(bool block) { const float alpha = 1.0f; const float beta = 0.0f; checkCUDNN(cudnnOpTensor(model->dnn, opDesc, &alpha, inputTensor, inputs[0].data_ptr, &alpha, inputTensor, inputs[1].data_ptr, &beta, inputTensor, outputs[0].data_ptr)); if (block) checkCUDA(hipDeviceSynchronize()); } void Model::measure_element_cost(Element* ele) { const float alpha = 1.0f; const float beta = 0.0f; helperSetTensorDescriptor(ele->inputs[0], inputTensor); //int inputN = ele->inputs[0].dim[0]; //int inputC = max(ele->inputs[0].dim[1], 1); //int inputH = max(ele->inputs[0].dim[2], 1); //int inputW = max(ele->inputs[0].dim[3], 1); //checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW, // CUDNN_DATA_FLOAT, inputN, inputC, inputH, inputW)); cudnnOpTensorOp_t opType; switch (ele->type) { case OP_EW_ADD: opType = CUDNN_OP_TENSOR_ADD; break; case OP_EW_MUL: opType = CUDNN_OP_TENSOR_MUL; break; default: assert(false); } checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT, CUDNN_NOT_PROPAGATE_NAN)); checkCUDA(hipDeviceSynchronize()); checkCUDA(hipEventRecord(startEvent)); for (int i = 0; i < REPEAT_TIMES; i++) { checkCUDNN(cudnnOpTensor(dnn, opDesc, &alpha, inputTensor, inputPtr, &alpha, inputTensor, filterPtr, &beta, inputTensor, outputPtr)); } checkCUDA(hipEventRecord(endEvent)); checkCUDA(hipEventSynchronize(endEvent)); float milliseconds; hipEventElapsedTime(&milliseconds, startEvent, endEvent); ele->runtime = milliseconds / REPEAT_TIMES; if (print_cost) printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n", ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2], ele->inputs[0].dim[3], ele->type, ele->runtime); }
828c0fb7b276dfbfa96ec254b896a42f23b41530.cu
/* Copyright 2018 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "xflow/ops.h" #include "xflow/cuda_helper.h" using namespace XFlow; void Element::map(void) { // create descriptors checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc)); // set descriptors helperSetTensorDescriptor(inputs[0], inputTensor); cudnnOpTensorOp_t opType; switch (type) { case OP_EW_ADD: opType = CUDNN_OP_TENSOR_ADD; break; case OP_EW_MUL: opType = CUDNN_OP_TENSOR_MUL; break; default: assert(false); } checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT, CUDNN_NOT_PROPAGATE_NAN)); // allocate tensors size_t outputSize = sizeof(DATATYPE); for (int i = 0; i < outputs[0].numDim; i++) outputSize *= outputs[0].dim[i]; checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize)); } void Element::unmap(void) { checkCUDNN(cudnnDestroyTensorDescriptor(inputTensor)); checkCUDNN(cudnnDestroyOpTensorDescriptor(opDesc)); checkCUDA(cudaFree(outputs[0].data_ptr)); } void Element::forward(bool block) { const float alpha = 1.0f; const float beta = 0.0f; checkCUDNN(cudnnOpTensor(model->dnn, opDesc, &alpha, inputTensor, inputs[0].data_ptr, &alpha, inputTensor, inputs[1].data_ptr, &beta, inputTensor, outputs[0].data_ptr)); if (block) checkCUDA(cudaDeviceSynchronize()); } void Model::measure_element_cost(Element* ele) { const float alpha = 1.0f; const float beta = 0.0f; helperSetTensorDescriptor(ele->inputs[0], inputTensor); //int inputN = ele->inputs[0].dim[0]; //int inputC = max(ele->inputs[0].dim[1], 1); //int inputH = max(ele->inputs[0].dim[2], 1); //int inputW = max(ele->inputs[0].dim[3], 1); //checkCUDNN(cudnnSetTensor4dDescriptor(inputTensor, CUDNN_TENSOR_NCHW, // CUDNN_DATA_FLOAT, inputN, inputC, inputH, inputW)); cudnnOpTensorOp_t opType; switch (ele->type) { case OP_EW_ADD: opType = CUDNN_OP_TENSOR_ADD; break; case OP_EW_MUL: opType = CUDNN_OP_TENSOR_MUL; break; default: assert(false); } checkCUDNN(cudnnSetOpTensorDescriptor(opDesc, opType, CUDNN_DATA_FLOAT, CUDNN_NOT_PROPAGATE_NAN)); checkCUDA(cudaDeviceSynchronize()); checkCUDA(cudaEventRecord(startEvent)); for (int i = 0; i < REPEAT_TIMES; i++) { checkCUDNN(cudnnOpTensor(dnn, opDesc, &alpha, inputTensor, inputPtr, &alpha, inputTensor, filterPtr, &beta, inputTensor, outputPtr)); } checkCUDA(cudaEventRecord(endEvent)); checkCUDA(cudaEventSynchronize(endEvent)); float milliseconds; cudaEventElapsedTime(&milliseconds, startEvent, endEvent); ele->runtime = milliseconds / REPEAT_TIMES; if (print_cost) printf(" measure[Element]: i(%d %d %d %d) type(%d) cost(%.4lf)\n", ele->inputs[0].dim[0], ele->inputs[0].dim[1], ele->inputs[0].dim[2], ele->inputs[0].dim[3], ele->type, ele->runtime); }
d5ce88d7da3fa59f5adfd835f8eeb7ad10b2c37d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> inline double gettime_ms() { struct timeval t; gettimeofday(&t,NULL); return (t.tv_sec+t.tv_usec*1e-6)*1000; } __global__ void mem_kernel(int *arr, int N, int *rdom_arr, int *result) { int tem=0; int random; int tid=blockIdx.x*blockDim.x+threadIdx.x; int totalthreads=blockDim.x*gridDim.x; int sum=0; __shared__ int sm[2048]; sm[threadIdx.x]=arr[tid]; for(int i=0;i<10;++i){ for(int j=threadIdx.x; j<N; j+=blockDim.x){ random = rdom_arr[j]; int idx=(j+random)%blockDim.x; tem = sm[idx]; sum=sum+tem; //__syncthreads(); sm[idx]=sum; } } //write result arr[tid] = sm[threadIdx.x]; } int main(int argc, char **argv){ //input: Size N, Utilization U, Affinity A, BlockNum B, ThreadsNum T, Randomizer X int N, U, A, B, T, X; if (argc!=7) { printf("\nInput arguments wrong!\n input: Size N, Utilization U, Affinity A, BlockNum B, ThreadsNum T, Random X \n "); return 0; } N=atoi(argv[1]); U=atof(argv[2]); A=atoi(argv[3]); B=atoi(argv[4]); T=atoi(argv[5]); X=atoi(argv[6]); printf("\ninput: Size N:%d, Utilization U:%d, Affinity A:%d, BlockNum B:%d, ThreadsNum T:%d, Random X:%d \n ",N,U,A,B,T,X); if(N%32!=0) { printf("\nArray size N has to be multiple of 32\n"); return 0; } hipSetDevice(1); srand(0); int *array_h=(int *)malloc(N*sizeof(int)); for (int i=0;i<N;++i) array_h[i]=1; int *array_d; hipMalloc(&array_d,N*sizeof(int)); hipMemcpy(array_d,array_h,N*sizeof(int),hipMemcpyHostToDevice); int *rdom_arr_h=(int *)malloc(N*sizeof(int)); for (int i=0;i<N;++i) rdom_arr_h[i]= rand() % X; //generate random number in range [0, X) int *rdom_arr_d; hipMalloc(&rdom_arr_d,N*sizeof(int)); hipMemcpy(rdom_arr_d,rdom_arr_h,N*sizeof(int),hipMemcpyHostToDevice); int *result_h=(int *)malloc(N*sizeof(int)); int *result_d; hipMalloc(&result_d,N*sizeof(int)); double ktime=gettime_ms();hipLaunchKernelGGL(( mem_kernel), dim3(B),dim3(T), 0, 0, array_d,N,rdom_arr_d,result_d); hipDeviceSynchronize(); ktime=gettime_ms()-ktime; FILE* fp=fopen("result.txt","a+"); fprintf(fp,"%f ",ktime); printf("Kernel time:%f \n",ktime); fclose(fp); hipMemcpy(result_h,result_d,N*sizeof(int),hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("results:\n"); for(int i=0;i<10;++i){ printf("%d ",result_h[i]); } printf("\n"); free(array_h); free(rdom_arr_h); free(result_h); hipFree(array_d); hipFree(rdom_arr_d); hipFree(result_d); return 0; }
d5ce88d7da3fa59f5adfd835f8eeb7ad10b2c37d.cu
#include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> inline double gettime_ms() { struct timeval t; gettimeofday(&t,NULL); return (t.tv_sec+t.tv_usec*1e-6)*1000; } __global__ void mem_kernel(int *arr, int N, int *rdom_arr, int *result) { int tem=0; int random; int tid=blockIdx.x*blockDim.x+threadIdx.x; int totalthreads=blockDim.x*gridDim.x; int sum=0; __shared__ int sm[2048]; sm[threadIdx.x]=arr[tid]; for(int i=0;i<10;++i){ for(int j=threadIdx.x; j<N; j+=blockDim.x){ random = rdom_arr[j]; int idx=(j+random)%blockDim.x; tem = sm[idx]; sum=sum+tem; //__syncthreads(); sm[idx]=sum; } } //write result arr[tid] = sm[threadIdx.x]; } int main(int argc, char **argv){ //input: Size N, Utilization U, Affinity A, BlockNum B, ThreadsNum T, Randomizer X int N, U, A, B, T, X; if (argc!=7) { printf("\nInput arguments wrong!\n input: Size N, Utilization U, Affinity A, BlockNum B, ThreadsNum T, Random X \n "); return 0; } N=atoi(argv[1]); U=atof(argv[2]); A=atoi(argv[3]); B=atoi(argv[4]); T=atoi(argv[5]); X=atoi(argv[6]); printf("\ninput: Size N:%d, Utilization U:%d, Affinity A:%d, BlockNum B:%d, ThreadsNum T:%d, Random X:%d \n ",N,U,A,B,T,X); if(N%32!=0) { printf("\nArray size N has to be multiple of 32\n"); return 0; } cudaSetDevice(1); srand(0); int *array_h=(int *)malloc(N*sizeof(int)); for (int i=0;i<N;++i) array_h[i]=1; int *array_d; cudaMalloc(&array_d,N*sizeof(int)); cudaMemcpy(array_d,array_h,N*sizeof(int),cudaMemcpyHostToDevice); int *rdom_arr_h=(int *)malloc(N*sizeof(int)); for (int i=0;i<N;++i) rdom_arr_h[i]= rand() % X; //generate random number in range [0, X) int *rdom_arr_d; cudaMalloc(&rdom_arr_d,N*sizeof(int)); cudaMemcpy(rdom_arr_d,rdom_arr_h,N*sizeof(int),cudaMemcpyHostToDevice); int *result_h=(int *)malloc(N*sizeof(int)); int *result_d; cudaMalloc(&result_d,N*sizeof(int)); double ktime=gettime_ms(); mem_kernel<<<B,T>>>(array_d,N,rdom_arr_d,result_d); cudaDeviceSynchronize(); ktime=gettime_ms()-ktime; FILE* fp=fopen("result.txt","a+"); fprintf(fp,"%f ",ktime); printf("Kernel time:%f \n",ktime); fclose(fp); cudaMemcpy(result_h,result_d,N*sizeof(int),cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("results:\n"); for(int i=0;i<10;++i){ printf("%d ",result_h[i]); } printf("\n"); free(array_h); free(rdom_arr_h); free(result_h); cudaFree(array_d); cudaFree(rdom_arr_d); cudaFree(result_d); return 0; }
e85710632a3700fd9dae737df92e1c9f1ede55a3.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions normal z -> c d s @author Weifeng Liu */ // CSR5 SpMV kernel // see paper by W. Liu and B. Vinter. (2015). // "CSR5: An Efficient Storage Format for Cross-Platform // Sparse Matrix-Vector Multiplication". // 29th ACM International Conference on Supercomputing (ICS15). pp. 339-350. #include "magmasparse_internal.h" #include "atomicopsmagmaDoubleComplex.h" #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #define MAGMA_CSR5_THREAD_GROUP 128 #define MAGMA_CSR5_THREAD_BUNCH 32 #if (defined( TORCH_HIP_VERSION ) && ( TORCH_HIP_VERSION >= 8000 )) __inline__ __device__ void sum_32( magmaDoubleComplex *s_sum, const int local_id) { if (local_id < 16) s_sum[local_id] += s_sum[local_id + 16]; if (local_id < 8) s_sum[local_id] += s_sum[local_id + 8]; if (local_id < 4) s_sum[local_id] += s_sum[local_id + 4]; if (local_id < 2) s_sum[local_id] += s_sum[local_id + 2]; if (local_id < 1) s_sum[local_id] += s_sum[local_id + 1]; } __inline__ __device__ void scan_32( magmaDoubleComplex *s_scan, const int local_id) { int ai, bi; const int baseai = 2 * local_id + 1; const int basebi = baseai + 1; magmaDoubleComplex temp; if (local_id < 16) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id == 0) { s_scan[31] = s_scan[15]; s_scan[15] = MAGMA_Z_ZERO; } if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } if (local_id < 16) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } } __inline__ __device__ magmaDoubleComplex candidate( magmaDoubleComplex *d_value_tile, magmaDoubleComplex *d_x, const magma_index_t *d_column_index_tile, const magma_index_t candidate_index, const magmaDoubleComplex alpha) { magmaDoubleComplex x = MAGMA_Z_ZERO; #if __CUDA_ARCH__ >= 350 x = __ldg(&d_x[d_column_index_tile[candidate_index]]); #else x = d_x[d_column_index_tile[candidate_index]]; #endif return d_value_tile[candidate_index] * x * alpha; } //template<typename vT> //__forceinline__ __device__ //vT segmented_sum_shfl(vT tmp_sum, // const int scansum_offset, // const int lane_id) //{ // vT sum = __shfl_down(tmp_sum, 1); // sum = lane_id == MAGMA_CSR5_OMEGA - 1 ? 0 : sum; // // inclusive scan // vT scan_sum = scan_32_shfl(sum); //scan_32_shfl<vT>(sum, lane_id); // tmp_sum = __shfl_down(scan_sum, scansum_offset); // tmp_sum = tmp_sum - scan_sum + sum; // // return tmp_sum; //} __forceinline__ __device__ magmaDoubleComplex segmented_sum( magmaDoubleComplex tmp_sum, magmaDoubleComplex *s_sum, const magma_index_t scansum_offset, const magma_index_t lane_id) { if (lane_id) s_sum[lane_id - 1] = tmp_sum; s_sum[lane_id] = lane_id == MAGMA_CSR5_OMEGA - 1 ? MAGMA_Z_ZERO : s_sum[lane_id]; magmaDoubleComplex sum = tmp_sum = s_sum[lane_id]; scan_32(s_sum, lane_id); // exclusive scan s_sum[lane_id] += tmp_sum; // inclusive scan (exclusive scan+original val) tmp_sum = s_sum[lane_id + scansum_offset]; tmp_sum = tmp_sum - s_sum[lane_id] + sum; return tmp_sum; } template<int c_sigma> __inline__ __device__ void tile_fast_track( magmaDoubleComplex *d_value_tile, magmaDoubleComplex *d_x, const magma_index_t *d_column_index_tile, magmaDoubleComplex *d_calibrator, //#if __CUDA_ARCH__ < 300 magmaDoubleComplex *s_sum, //#endif const int lane_id, const magma_index_t par_id, const magmaDoubleComplex alpha) { magmaDoubleComplex sum = MAGMA_Z_ZERO; #pragma unroll for (int i = 0; i < c_sigma; i++) { sum += candidate(d_value_tile, d_x, d_column_index_tile, i * MAGMA_CSR5_OMEGA + lane_id, alpha); } //#if __CUDA_ARCH__ >= 300 // use shfl intrinsic // sum = sum_32_shfl<vT>(sum); // if (!lane_id) // d_calibrator[par_id] = sum; //#else // use smem s_sum[lane_id] = sum; sum_32(s_sum, lane_id); if (!lane_id) { d_calibrator[par_id] = s_sum[0]; } //#endif } template<int c_sigma> __inline__ __device__ void tile_normal_track( const magma_index_t *d_column_index_tile, magmaDoubleComplex *d_value_tile, magmaDoubleComplex *d_x, const magma_uindex_t *d_tile_desc, const magma_index_t *d_tile_desc_offset_ptr, const magma_index_t *d_tile_desc_offset, magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, //#if __CUDA_ARCH__ < 300 magmaDoubleComplex *s_sum, volatile int *s_scan, //#endif const magma_index_t par_id, const int lane_id, const int bit_y_offset, const int bit_scansum_offset, const bool empty_rows, const magmaDoubleComplex alpha) { int start = 0; int stop = 0; bool local_bit; magmaDoubleComplex sum = MAGMA_Z_ZERO; magma_index_t offset_pointer = empty_rows ? d_tile_desc_offset_ptr[par_id] : 0; magma_uindex_t descriptor = d_tile_desc[lane_id]; magma_index_t y_offset = descriptor >> (32 - bit_y_offset); const int scansum_offset = (descriptor << bit_y_offset) >> (32 - bit_scansum_offset); const int bit_bitflag = 32 - bit_y_offset - bit_scansum_offset; bool direct = false; magmaDoubleComplex first_sum, last_sum; // step 1. thread-level seg sum int ly = 0; // extract the first bit-flag packet descriptor = descriptor << (bit_y_offset + bit_scansum_offset); descriptor = lane_id ? descriptor : descriptor | 0x80000000; local_bit = (descriptor >> 31) & 0x1; start = !local_bit; direct = local_bit & (bool)lane_id; sum = candidate(d_value_tile, d_x, d_column_index_tile, lane_id, alpha); #pragma unroll for (int i = 1; i < c_sigma; i++) { int norm_i = i - bit_bitflag; if (!(ly || norm_i) || (ly && !(31 & norm_i))) { ly++; descriptor = d_tile_desc[ly * MAGMA_CSR5_OMEGA + lane_id]; } norm_i = !ly ? 31 & i : 31 & norm_i; norm_i = 31 - norm_i; local_bit = (descriptor >> norm_i) & 0x1; if (local_bit) { if (direct) d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset] : y_offset] += sum; else first_sum = sum; } y_offset += local_bit & direct; direct |= local_bit; sum = local_bit ? MAGMA_Z_ZERO : sum; stop += local_bit; sum += candidate(d_value_tile, d_x, d_column_index_tile, i * MAGMA_CSR5_OMEGA + lane_id, alpha); } first_sum = direct ? first_sum : sum; last_sum = sum; // step 2. segmented sum sum = start ? first_sum : MAGMA_Z_ZERO; //#if __CUDA_ARCH__ >= 300 // sum = segmented_sum_shfl<vT>(sum, scansum_offset, lane_id); //#else sum = segmented_sum(sum, s_sum, scansum_offset, lane_id); //#endif // step 3-1. add s_sum to position stop last_sum += (start <= stop) ? sum : MAGMA_Z_ZERO; // step 3-2. write sums to result array if (direct) d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset] : y_offset] += last_sum; // the first/last value of the first thread goes to calibration if (!lane_id) d_calibrator[par_id] = direct ? first_sum : last_sum; } template<int c_sigma> __inline__ __device__ void spmv_tile( const magma_index_t *d_column_index_tile, magmaDoubleComplex *d_value_tile, const magma_index_t *d_row_pointer, magmaDoubleComplex *d_x, const magma_uindex_t *d_tile_ptr, const magma_uindex_t *d_tile_desc, const magma_index_t *d_tile_desc_offset_ptr, const magma_index_t *d_tile_desc_offset, magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, const magma_index_t par_id, const int lane_id, const int bunch_id, const int bit_y_offset, const int bit_scansum_offset, const magmaDoubleComplex alpha) { //#if __CUDA_ARCH__ < 300 __shared__ magmaDoubleComplex s_sum[MAGMA_CSR5_THREAD_GROUP]; volatile __shared__ int s_scan[(MAGMA_CSR5_OMEGA + 1) * (MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA)]; //#endif magma_uindex_t row_start, row_stop; //#if __CUDA_ARCH__ >= 350 // if (lane_id < 2) // row_start = __ldg(&d_tile_ptr[par_id + lane_id]); // row_stop = __shfl(row_start, 1); // row_start = __shfl(row_start, 0); // row_stop &= 0x7FFFFFFF; //#else volatile __shared__ magma_uindex_t s_row_start_stop[MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1]; if (threadIdx.x < MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1) { s_row_start_stop[threadIdx.x] = d_tile_ptr[par_id + threadIdx.x]; } __syncthreads(); row_start = s_row_start_stop[bunch_id]; row_stop = s_row_start_stop[bunch_id + 1] & 0x7FFFFFFF; //#endif if (row_start == row_stop) // fast track through reduction { tile_fast_track<c_sigma> (d_value_tile, d_x, d_column_index_tile, d_calibrator, //#if __CUDA_ARCH__ < 300 &s_sum[bunch_id * MAGMA_CSR5_OMEGA], //#endif lane_id, par_id, alpha); } else { const bool empty_rows = (row_start >> 31) & 0x1; row_start &= 0x7FFFFFFF; d_y = &d_y[row_start+1]; tile_normal_track<c_sigma> (d_column_index_tile, d_value_tile, d_x, d_tile_desc, d_tile_desc_offset_ptr, d_tile_desc_offset, d_calibrator, d_y, //#if __CUDA_ARCH__ < 300 &s_sum[bunch_id * MAGMA_CSR5_OMEGA], &s_scan[bunch_id * (MAGMA_CSR5_OMEGA + 1)], //#endif par_id, lane_id, bit_y_offset, bit_scansum_offset, empty_rows, alpha); } } template<int c_sigma> __global__ void spmv_csr5_compute_kernel( const magma_index_t *d_column_index, magmaDoubleComplex *d_value, const magma_index_t *d_row_pointer, magmaDoubleComplex *d_x, const magma_uindex_t *d_tile_ptr, const magma_uindex_t *d_tile_desc, const magma_index_t *d_tile_desc_offset_ptr, const magma_index_t *d_tile_desc_offset, magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, const magma_index_t p, const int num_packet, const int bit_y_offset, const int bit_scansum_offset, const magmaDoubleComplex alpha) { // warp lane id const int lane_id = 31 & threadIdx.x; //threadIdx.x % CSR5_OMEGA; // warp global id == par_id const magma_index_t par_id = (blockIdx.x * blockDim.x + threadIdx.x) / MAGMA_CSR5_OMEGA; const int bunch_id = threadIdx.x / MAGMA_CSR5_OMEGA; if (par_id >= p - 1) return; spmv_tile<c_sigma> (&d_column_index[par_id * MAGMA_CSR5_OMEGA * c_sigma], &d_value[par_id * MAGMA_CSR5_OMEGA * c_sigma], d_row_pointer, d_x, d_tile_ptr, &d_tile_desc[par_id * MAGMA_CSR5_OMEGA * num_packet], d_tile_desc_offset_ptr, d_tile_desc_offset, d_calibrator, d_y, par_id, lane_id, bunch_id, bit_y_offset, bit_scansum_offset, alpha); } __global__ void spmv_csr5_calibrate_kernel( const magma_uindex_t *d_tile_ptr, const magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, const magma_index_t p) { //const int lane_id = threadIdx.x % MAGMA_CSR5_THREAD_BUNCH; //const int bunch_id = threadIdx.x / MAGMA_CSR5_THREAD_BUNCH; const int local_id = threadIdx.x; const magma_index_t global_id = blockIdx.x * blockDim.x + threadIdx.x; magmaDoubleComplex sum; volatile __shared__ magma_index_t s_tile_ptr[MAGMA_CSR5_THREAD_GROUP+1]; __shared__ magmaDoubleComplex s_calibrator[MAGMA_CSR5_THREAD_GROUP]; //volatile __shared__ // magmaDoubleComplex s_sum[MAGMA_CSR5_THREAD_GROUP // / MAGMA_CSR5_THREAD_BUNCH]; s_tile_ptr[local_id] = global_id < p-1 ? (magma_index_t)(d_tile_ptr[global_id] & 0x7FFFFFFF) : -1; s_calibrator[local_id] = sum = global_id < p-1 ? d_calibrator[global_id] : MAGMA_Z_ZERO; __syncthreads(); // do a fast track if all s_tile_ptr are the same if (s_tile_ptr[0] == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP - 1]) { //sum = sum_32_shfl<vT>(sum); //if (!lane_id) // s_sum[bunch_id] = sum; //__syncthreads(); //if (!bunch_id) //{ // sum = lane_id < (MAGMA_CSR5_THREAD_GROUP // / MAGMA_CSR5_THREAD_BUNCH) ? s_sum[lane_id] : 0; // sum = sum_32_shfl<vT>(sum); //} if (local_id < 64) s_calibrator[local_id] += s_calibrator[local_id+64]; __syncthreads(); if (local_id < 32) s_calibrator[local_id] += s_calibrator[local_id+32]; if (local_id < 16) s_calibrator[local_id] += s_calibrator[local_id+16]; if (local_id < 8) s_calibrator[local_id] += s_calibrator[local_id+8]; if (local_id < 4) s_calibrator[local_id] += s_calibrator[local_id+4]; if (local_id < 2) s_calibrator[local_id] += s_calibrator[local_id+2]; if (local_id < 1) s_calibrator[local_id] += s_calibrator[local_id+1]; if (!local_id) { atomicAddmagmaDoubleComplex(&d_y[s_tile_ptr[0]], s_calibrator[0]); } return; } int local_par_id = local_id; magma_index_t row_start_current, row_start_target, row_start_previous; sum = MAGMA_Z_ZERO; // use (p - 1), due to the tail tile is dealt with CSR-vector method if (global_id < p - 1) { row_start_previous = local_id ? s_tile_ptr[local_id-1] : -1; row_start_current = s_tile_ptr[local_id]; if (row_start_previous != row_start_current) { row_start_target = row_start_current; while (row_start_target == row_start_current && local_par_id < blockDim.x) { sum += s_calibrator[local_par_id]; local_par_id++; row_start_current = s_tile_ptr[local_par_id]; } if (row_start_target == s_tile_ptr[0] || row_start_target == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP-1]) { atomicAddmagmaDoubleComplex(&d_y[row_start_target], sum); } else d_y[row_start_target] += sum; } } } __global__ void spmv_csr5_tail_tile_kernel( const magma_index_t *d_row_pointer, const magma_index_t *d_column_index, magmaDoubleComplex *d_value, magmaDoubleComplex *d_x, magmaDoubleComplex *d_y, const magma_index_t tail_tile_start, const magma_index_t p, const int sigma, const magmaDoubleComplex alpha) { const int local_id = threadIdx.x; const magma_index_t row_id = tail_tile_start + blockIdx.x; const magma_index_t row_start = !blockIdx.x ? (p - 1) * MAGMA_CSR5_OMEGA * sigma : d_row_pointer[row_id]; const magma_index_t row_stop = d_row_pointer[row_id + 1]; magmaDoubleComplex sum = MAGMA_Z_ZERO; for (magma_index_t idx = local_id + row_start; idx < row_stop; idx += MAGMA_CSR5_OMEGA) { sum += candidate(d_value, d_x, d_column_index, idx, alpha); } //#if __CUDA_ARCH__ >= 300 // use shfl intrinsic // sum = sum_32_shfl<vT>(sum); //#else __shared__ magmaDoubleComplex s_sum[MAGMA_CSR5_OMEGA]; s_sum[local_id] = sum; sum_32(s_sum, local_id); //#endif if (!local_id) d_y[row_id] += s_sum[0]; //= !blockIdx.x ? d_y[row_id] + sum : sum; } __global__ void zgecsr5mv_kernel_update_y(int num_rows, magmaDoubleComplex beta, magmaDoubleComplex * dy) { const magma_index_t row = blockIdx.x * blockDim.x + threadIdx.x; if (row < num_rows) { if (beta == MAGMA_Z_ZERO) dy[row] = MAGMA_Z_ZERO; else dy[row] *= beta; } } #endif /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR5 (val (tile-wise column-major), row_pointer, col (tile-wise column-major), tile_pointer, tile_desc). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] p magma_int_t number of tiles in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] sigma magma_int_t sigma in A in CSR5 @param[in] bit_y_offset magma_int_t bit_y_offset in A in CSR5 @param[in] bit_scansum_offset magma_int_t bit_scansum_offset in A in CSR5 @param[in] num_packet magma_int_t num_packet in A in CSR5 @param[in] dtile_ptr magmaUIndex_ptr tilepointer of A in CSR5 @param[in] dtile_desc magmaUIndex_ptr tiledescriptor of A in CSR5 @param[in] dtile_desc_offset_ptr magmaIndex_ptr tiledescriptor_offsetpointer of A in CSR5 @param[in] dtile_desc_offset magmaIndex_ptr tiledescriptor_offsetpointer of A in CSR5 @param[in] dcalibrator magmaDoubleComplex_ptr calibrator of A in CSR5 @param[in] tail_tile_start magma_int_t start of the last tile in A @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsr5mv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t p, magmaDoubleComplex alpha, magma_int_t sigma, magma_int_t bit_y_offset, magma_int_t bit_scansum_offset, magma_int_t num_packet, magmaUIndex_ptr dtile_ptr, magmaUIndex_ptr dtile_desc, magmaIndex_ptr dtile_desc_offset_ptr, magmaIndex_ptr dtile_desc_offset, magmaDoubleComplex_ptr dcalibrator, magma_int_t tail_tile_start, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { int info = MAGMA_ERR_NOT_SUPPORTED; #if (defined( TORCH_HIP_VERSION ) && ( TORCH_HIP_VERSION >= 8000 )) magma_int_t arch = magma_getdevice_arch(); if ( arch >= 600 ) { //dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); //magma_int_t threads = BLOCK_SIZE; //zgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>> // (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); // phase 1. update y: y = beta * y magma_int_t num_threads = MAGMA_CSR5_THREAD_GROUP; magma_int_t num_blocks = magma_ceildiv( m, num_threads ); //ceil ((double)m / (double)num_threads); hipLaunchKernelGGL(( zgecsr5mv_kernel_update_y) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , m, beta, dy); // phase 2. spmv: y += alpha * A * x num_threads = MAGMA_CSR5_THREAD_GROUP; num_blocks = magma_ceildiv( p-1, num_threads / MAGMA_CSR5_OMEGA ); // ceil ((double)(p-1) / (double)(num_threads / MAGMA_CSR5_OMEGA)); switch (sigma) { case 4: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<4>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 5: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<5>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 6: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<6>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 7: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<7>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 8: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<8>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 9: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<9>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 10: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<10>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 11: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<11>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 12: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<12>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 13: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<13>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 14: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<14>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 15: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<15>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 16: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<16>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 17: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<17>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 18: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<18>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 19: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<19>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 20: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<20>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 21: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<21>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 22: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<22>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 23: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<23>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 24: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<24>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 25: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<25>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 26: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<26>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 27: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<27>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 28: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<28>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 29: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<29>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 30: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<30>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 31: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<31>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 32: hipLaunchKernelGGL(( spmv_csr5_compute_kernel<32>) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; } num_threads = MAGMA_CSR5_THREAD_GROUP; num_blocks = ceil((double)(p-1)/(double)num_threads); hipLaunchKernelGGL(( spmv_csr5_calibrate_kernel) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , dtile_ptr, dcalibrator, dy, p); num_threads = MAGMA_CSR5_OMEGA; num_blocks = m - tail_tile_start; hipLaunchKernelGGL(( spmv_csr5_tail_tile_kernel) , dim3(num_blocks), dim3(num_threads), 0, queue->cuda_stream() , drowptr, dcolind, dval, dx, dy, tail_tile_start, p, sigma, alpha); info = MAGMA_SUCCESS; } else { info = MAGMA_ERR_NOT_SUPPORTED; } #endif return info; }
e85710632a3700fd9dae737df92e1c9f1ede55a3.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions normal z -> c d s @author Weifeng Liu */ // CSR5 SpMV kernel // see paper by W. Liu and B. Vinter. (2015). // "CSR5: An Efficient Storage Format for Cross-Platform // Sparse Matrix-Vector Multiplication". // 29th ACM International Conference on Supercomputing (ICS15). pp. 339-350. #include "magmasparse_internal.h" #include "atomicopsmagmaDoubleComplex.h" #include <cuda.h> // for CUDA_VERSION #define MAGMA_CSR5_THREAD_GROUP 128 #define MAGMA_CSR5_THREAD_BUNCH 32 #if (defined( CUDA_VERSION ) && ( CUDA_VERSION >= 8000 )) __inline__ __device__ void sum_32( magmaDoubleComplex *s_sum, const int local_id) { if (local_id < 16) s_sum[local_id] += s_sum[local_id + 16]; if (local_id < 8) s_sum[local_id] += s_sum[local_id + 8]; if (local_id < 4) s_sum[local_id] += s_sum[local_id + 4]; if (local_id < 2) s_sum[local_id] += s_sum[local_id + 2]; if (local_id < 1) s_sum[local_id] += s_sum[local_id + 1]; } __inline__ __device__ void scan_32( magmaDoubleComplex *s_scan, const int local_id) { int ai, bi; const int baseai = 2 * local_id + 1; const int basebi = baseai + 1; magmaDoubleComplex temp; if (local_id < 16) { ai = baseai - 1; bi = basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; s_scan[bi] += s_scan[ai]; } if (local_id == 0) { s_scan[31] = s_scan[15]; s_scan[15] = MAGMA_Z_ZERO; } if (local_id < 2) { ai = 8 * baseai - 1; bi = 8 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } if (local_id < 4) { ai = 4 * baseai - 1; bi = 4 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } if (local_id < 8) { ai = 2 * baseai - 1; bi = 2 * basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } if (local_id < 16) { ai = baseai - 1; bi = basebi - 1; temp = s_scan[ai]; s_scan[ai] = s_scan[bi]; s_scan[bi] += temp; } } __inline__ __device__ magmaDoubleComplex candidate( magmaDoubleComplex *d_value_tile, magmaDoubleComplex *d_x, const magma_index_t *d_column_index_tile, const magma_index_t candidate_index, const magmaDoubleComplex alpha) { magmaDoubleComplex x = MAGMA_Z_ZERO; #if __CUDA_ARCH__ >= 350 x = __ldg(&d_x[d_column_index_tile[candidate_index]]); #else x = d_x[d_column_index_tile[candidate_index]]; #endif return d_value_tile[candidate_index] * x * alpha; } //template<typename vT> //__forceinline__ __device__ //vT segmented_sum_shfl(vT tmp_sum, // const int scansum_offset, // const int lane_id) //{ // vT sum = __shfl_down(tmp_sum, 1); // sum = lane_id == MAGMA_CSR5_OMEGA - 1 ? 0 : sum; // // inclusive scan // vT scan_sum = scan_32_shfl(sum); //scan_32_shfl<vT>(sum, lane_id); // tmp_sum = __shfl_down(scan_sum, scansum_offset); // tmp_sum = tmp_sum - scan_sum + sum; // // return tmp_sum; //} __forceinline__ __device__ magmaDoubleComplex segmented_sum( magmaDoubleComplex tmp_sum, magmaDoubleComplex *s_sum, const magma_index_t scansum_offset, const magma_index_t lane_id) { if (lane_id) s_sum[lane_id - 1] = tmp_sum; s_sum[lane_id] = lane_id == MAGMA_CSR5_OMEGA - 1 ? MAGMA_Z_ZERO : s_sum[lane_id]; magmaDoubleComplex sum = tmp_sum = s_sum[lane_id]; scan_32(s_sum, lane_id); // exclusive scan s_sum[lane_id] += tmp_sum; // inclusive scan (exclusive scan+original val) tmp_sum = s_sum[lane_id + scansum_offset]; tmp_sum = tmp_sum - s_sum[lane_id] + sum; return tmp_sum; } template<int c_sigma> __inline__ __device__ void tile_fast_track( magmaDoubleComplex *d_value_tile, magmaDoubleComplex *d_x, const magma_index_t *d_column_index_tile, magmaDoubleComplex *d_calibrator, //#if __CUDA_ARCH__ < 300 magmaDoubleComplex *s_sum, //#endif const int lane_id, const magma_index_t par_id, const magmaDoubleComplex alpha) { magmaDoubleComplex sum = MAGMA_Z_ZERO; #pragma unroll for (int i = 0; i < c_sigma; i++) { sum += candidate(d_value_tile, d_x, d_column_index_tile, i * MAGMA_CSR5_OMEGA + lane_id, alpha); } //#if __CUDA_ARCH__ >= 300 // use shfl intrinsic // sum = sum_32_shfl<vT>(sum); // if (!lane_id) // d_calibrator[par_id] = sum; //#else // use smem s_sum[lane_id] = sum; sum_32(s_sum, lane_id); if (!lane_id) { d_calibrator[par_id] = s_sum[0]; } //#endif } template<int c_sigma> __inline__ __device__ void tile_normal_track( const magma_index_t *d_column_index_tile, magmaDoubleComplex *d_value_tile, magmaDoubleComplex *d_x, const magma_uindex_t *d_tile_desc, const magma_index_t *d_tile_desc_offset_ptr, const magma_index_t *d_tile_desc_offset, magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, //#if __CUDA_ARCH__ < 300 magmaDoubleComplex *s_sum, volatile int *s_scan, //#endif const magma_index_t par_id, const int lane_id, const int bit_y_offset, const int bit_scansum_offset, const bool empty_rows, const magmaDoubleComplex alpha) { int start = 0; int stop = 0; bool local_bit; magmaDoubleComplex sum = MAGMA_Z_ZERO; magma_index_t offset_pointer = empty_rows ? d_tile_desc_offset_ptr[par_id] : 0; magma_uindex_t descriptor = d_tile_desc[lane_id]; magma_index_t y_offset = descriptor >> (32 - bit_y_offset); const int scansum_offset = (descriptor << bit_y_offset) >> (32 - bit_scansum_offset); const int bit_bitflag = 32 - bit_y_offset - bit_scansum_offset; bool direct = false; magmaDoubleComplex first_sum, last_sum; // step 1. thread-level seg sum int ly = 0; // extract the first bit-flag packet descriptor = descriptor << (bit_y_offset + bit_scansum_offset); descriptor = lane_id ? descriptor : descriptor | 0x80000000; local_bit = (descriptor >> 31) & 0x1; start = !local_bit; direct = local_bit & (bool)lane_id; sum = candidate(d_value_tile, d_x, d_column_index_tile, lane_id, alpha); #pragma unroll for (int i = 1; i < c_sigma; i++) { int norm_i = i - bit_bitflag; if (!(ly || norm_i) || (ly && !(31 & norm_i))) { ly++; descriptor = d_tile_desc[ly * MAGMA_CSR5_OMEGA + lane_id]; } norm_i = !ly ? 31 & i : 31 & norm_i; norm_i = 31 - norm_i; local_bit = (descriptor >> norm_i) & 0x1; if (local_bit) { if (direct) d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset] : y_offset] += sum; else first_sum = sum; } y_offset += local_bit & direct; direct |= local_bit; sum = local_bit ? MAGMA_Z_ZERO : sum; stop += local_bit; sum += candidate(d_value_tile, d_x, d_column_index_tile, i * MAGMA_CSR5_OMEGA + lane_id, alpha); } first_sum = direct ? first_sum : sum; last_sum = sum; // step 2. segmented sum sum = start ? first_sum : MAGMA_Z_ZERO; //#if __CUDA_ARCH__ >= 300 // sum = segmented_sum_shfl<vT>(sum, scansum_offset, lane_id); //#else sum = segmented_sum(sum, s_sum, scansum_offset, lane_id); //#endif // step 3-1. add s_sum to position stop last_sum += (start <= stop) ? sum : MAGMA_Z_ZERO; // step 3-2. write sums to result array if (direct) d_y[empty_rows ? d_tile_desc_offset[offset_pointer + y_offset] : y_offset] += last_sum; // the first/last value of the first thread goes to calibration if (!lane_id) d_calibrator[par_id] = direct ? first_sum : last_sum; } template<int c_sigma> __inline__ __device__ void spmv_tile( const magma_index_t *d_column_index_tile, magmaDoubleComplex *d_value_tile, const magma_index_t *d_row_pointer, magmaDoubleComplex *d_x, const magma_uindex_t *d_tile_ptr, const magma_uindex_t *d_tile_desc, const magma_index_t *d_tile_desc_offset_ptr, const magma_index_t *d_tile_desc_offset, magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, const magma_index_t par_id, const int lane_id, const int bunch_id, const int bit_y_offset, const int bit_scansum_offset, const magmaDoubleComplex alpha) { //#if __CUDA_ARCH__ < 300 __shared__ magmaDoubleComplex s_sum[MAGMA_CSR5_THREAD_GROUP]; volatile __shared__ int s_scan[(MAGMA_CSR5_OMEGA + 1) * (MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA)]; //#endif magma_uindex_t row_start, row_stop; //#if __CUDA_ARCH__ >= 350 // if (lane_id < 2) // row_start = __ldg(&d_tile_ptr[par_id + lane_id]); // row_stop = __shfl(row_start, 1); // row_start = __shfl(row_start, 0); // row_stop &= 0x7FFFFFFF; //#else volatile __shared__ magma_uindex_t s_row_start_stop[MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1]; if (threadIdx.x < MAGMA_CSR5_THREAD_GROUP / MAGMA_CSR5_OMEGA + 1) { s_row_start_stop[threadIdx.x] = d_tile_ptr[par_id + threadIdx.x]; } __syncthreads(); row_start = s_row_start_stop[bunch_id]; row_stop = s_row_start_stop[bunch_id + 1] & 0x7FFFFFFF; //#endif if (row_start == row_stop) // fast track through reduction { tile_fast_track<c_sigma> (d_value_tile, d_x, d_column_index_tile, d_calibrator, //#if __CUDA_ARCH__ < 300 &s_sum[bunch_id * MAGMA_CSR5_OMEGA], //#endif lane_id, par_id, alpha); } else { const bool empty_rows = (row_start >> 31) & 0x1; row_start &= 0x7FFFFFFF; d_y = &d_y[row_start+1]; tile_normal_track<c_sigma> (d_column_index_tile, d_value_tile, d_x, d_tile_desc, d_tile_desc_offset_ptr, d_tile_desc_offset, d_calibrator, d_y, //#if __CUDA_ARCH__ < 300 &s_sum[bunch_id * MAGMA_CSR5_OMEGA], &s_scan[bunch_id * (MAGMA_CSR5_OMEGA + 1)], //#endif par_id, lane_id, bit_y_offset, bit_scansum_offset, empty_rows, alpha); } } template<int c_sigma> __global__ void spmv_csr5_compute_kernel( const magma_index_t *d_column_index, magmaDoubleComplex *d_value, const magma_index_t *d_row_pointer, magmaDoubleComplex *d_x, const magma_uindex_t *d_tile_ptr, const magma_uindex_t *d_tile_desc, const magma_index_t *d_tile_desc_offset_ptr, const magma_index_t *d_tile_desc_offset, magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, const magma_index_t p, const int num_packet, const int bit_y_offset, const int bit_scansum_offset, const magmaDoubleComplex alpha) { // warp lane id const int lane_id = 31 & threadIdx.x; //threadIdx.x % CSR5_OMEGA; // warp global id == par_id const magma_index_t par_id = (blockIdx.x * blockDim.x + threadIdx.x) / MAGMA_CSR5_OMEGA; const int bunch_id = threadIdx.x / MAGMA_CSR5_OMEGA; if (par_id >= p - 1) return; spmv_tile<c_sigma> (&d_column_index[par_id * MAGMA_CSR5_OMEGA * c_sigma], &d_value[par_id * MAGMA_CSR5_OMEGA * c_sigma], d_row_pointer, d_x, d_tile_ptr, &d_tile_desc[par_id * MAGMA_CSR5_OMEGA * num_packet], d_tile_desc_offset_ptr, d_tile_desc_offset, d_calibrator, d_y, par_id, lane_id, bunch_id, bit_y_offset, bit_scansum_offset, alpha); } __global__ void spmv_csr5_calibrate_kernel( const magma_uindex_t *d_tile_ptr, const magmaDoubleComplex *d_calibrator, magmaDoubleComplex *d_y, const magma_index_t p) { //const int lane_id = threadIdx.x % MAGMA_CSR5_THREAD_BUNCH; //const int bunch_id = threadIdx.x / MAGMA_CSR5_THREAD_BUNCH; const int local_id = threadIdx.x; const magma_index_t global_id = blockIdx.x * blockDim.x + threadIdx.x; magmaDoubleComplex sum; volatile __shared__ magma_index_t s_tile_ptr[MAGMA_CSR5_THREAD_GROUP+1]; __shared__ magmaDoubleComplex s_calibrator[MAGMA_CSR5_THREAD_GROUP]; //volatile __shared__ // magmaDoubleComplex s_sum[MAGMA_CSR5_THREAD_GROUP // / MAGMA_CSR5_THREAD_BUNCH]; s_tile_ptr[local_id] = global_id < p-1 ? (magma_index_t)(d_tile_ptr[global_id] & 0x7FFFFFFF) : -1; s_calibrator[local_id] = sum = global_id < p-1 ? d_calibrator[global_id] : MAGMA_Z_ZERO; __syncthreads(); // do a fast track if all s_tile_ptr are the same if (s_tile_ptr[0] == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP - 1]) { //sum = sum_32_shfl<vT>(sum); //if (!lane_id) // s_sum[bunch_id] = sum; //__syncthreads(); //if (!bunch_id) //{ // sum = lane_id < (MAGMA_CSR5_THREAD_GROUP // / MAGMA_CSR5_THREAD_BUNCH) ? s_sum[lane_id] : 0; // sum = sum_32_shfl<vT>(sum); //} if (local_id < 64) s_calibrator[local_id] += s_calibrator[local_id+64]; __syncthreads(); if (local_id < 32) s_calibrator[local_id] += s_calibrator[local_id+32]; if (local_id < 16) s_calibrator[local_id] += s_calibrator[local_id+16]; if (local_id < 8) s_calibrator[local_id] += s_calibrator[local_id+8]; if (local_id < 4) s_calibrator[local_id] += s_calibrator[local_id+4]; if (local_id < 2) s_calibrator[local_id] += s_calibrator[local_id+2]; if (local_id < 1) s_calibrator[local_id] += s_calibrator[local_id+1]; if (!local_id) { atomicAddmagmaDoubleComplex(&d_y[s_tile_ptr[0]], s_calibrator[0]); } return; } int local_par_id = local_id; magma_index_t row_start_current, row_start_target, row_start_previous; sum = MAGMA_Z_ZERO; // use (p - 1), due to the tail tile is dealt with CSR-vector method if (global_id < p - 1) { row_start_previous = local_id ? s_tile_ptr[local_id-1] : -1; row_start_current = s_tile_ptr[local_id]; if (row_start_previous != row_start_current) { row_start_target = row_start_current; while (row_start_target == row_start_current && local_par_id < blockDim.x) { sum += s_calibrator[local_par_id]; local_par_id++; row_start_current = s_tile_ptr[local_par_id]; } if (row_start_target == s_tile_ptr[0] || row_start_target == s_tile_ptr[MAGMA_CSR5_THREAD_GROUP-1]) { atomicAddmagmaDoubleComplex(&d_y[row_start_target], sum); } else d_y[row_start_target] += sum; } } } __global__ void spmv_csr5_tail_tile_kernel( const magma_index_t *d_row_pointer, const magma_index_t *d_column_index, magmaDoubleComplex *d_value, magmaDoubleComplex *d_x, magmaDoubleComplex *d_y, const magma_index_t tail_tile_start, const magma_index_t p, const int sigma, const magmaDoubleComplex alpha) { const int local_id = threadIdx.x; const magma_index_t row_id = tail_tile_start + blockIdx.x; const magma_index_t row_start = !blockIdx.x ? (p - 1) * MAGMA_CSR5_OMEGA * sigma : d_row_pointer[row_id]; const magma_index_t row_stop = d_row_pointer[row_id + 1]; magmaDoubleComplex sum = MAGMA_Z_ZERO; for (magma_index_t idx = local_id + row_start; idx < row_stop; idx += MAGMA_CSR5_OMEGA) { sum += candidate(d_value, d_x, d_column_index, idx, alpha); } //#if __CUDA_ARCH__ >= 300 // use shfl intrinsic // sum = sum_32_shfl<vT>(sum); //#else __shared__ magmaDoubleComplex s_sum[MAGMA_CSR5_OMEGA]; s_sum[local_id] = sum; sum_32(s_sum, local_id); //#endif if (!local_id) d_y[row_id] += s_sum[0]; //= !blockIdx.x ? d_y[row_id] + sum : sum; } __global__ void zgecsr5mv_kernel_update_y(int num_rows, magmaDoubleComplex beta, magmaDoubleComplex * dy) { const magma_index_t row = blockIdx.x * blockDim.x + threadIdx.x; if (row < num_rows) { if (beta == MAGMA_Z_ZERO) dy[row] = MAGMA_Z_ZERO; else dy[row] *= beta; } } #endif /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. The input format is CSR5 (val (tile-wise column-major), row_pointer, col (tile-wise column-major), tile_pointer, tile_desc). Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] p magma_int_t number of tiles in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] sigma magma_int_t sigma in A in CSR5 @param[in] bit_y_offset magma_int_t bit_y_offset in A in CSR5 @param[in] bit_scansum_offset magma_int_t bit_scansum_offset in A in CSR5 @param[in] num_packet magma_int_t num_packet in A in CSR5 @param[in] dtile_ptr magmaUIndex_ptr tilepointer of A in CSR5 @param[in] dtile_desc magmaUIndex_ptr tiledescriptor of A in CSR5 @param[in] dtile_desc_offset_ptr magmaIndex_ptr tiledescriptor_offsetpointer of A in CSR5 @param[in] dtile_desc_offset magmaIndex_ptr tiledescriptor_offsetpointer of A in CSR5 @param[in] dcalibrator magmaDoubleComplex_ptr calibrator of A in CSR5 @param[in] tail_tile_start magma_int_t start of the last tile in A @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] dval magmaDoubleComplex_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgecsr5mv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t p, magmaDoubleComplex alpha, magma_int_t sigma, magma_int_t bit_y_offset, magma_int_t bit_scansum_offset, magma_int_t num_packet, magmaUIndex_ptr dtile_ptr, magmaUIndex_ptr dtile_desc, magmaIndex_ptr dtile_desc_offset_ptr, magmaIndex_ptr dtile_desc_offset, magmaDoubleComplex_ptr dcalibrator, magma_int_t tail_tile_start, magmaDoubleComplex_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { int info = MAGMA_ERR_NOT_SUPPORTED; #if (defined( CUDA_VERSION ) && ( CUDA_VERSION >= 8000 )) magma_int_t arch = magma_getdevice_arch(); if ( arch >= 600 ) { //dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); //magma_int_t threads = BLOCK_SIZE; //zgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>> // (m, n, alpha, dval, drowptr, dcolind, dx, beta, dy); // phase 1. update y: y = beta * y magma_int_t num_threads = MAGMA_CSR5_THREAD_GROUP; magma_int_t num_blocks = magma_ceildiv( m, num_threads ); //ceil ((double)m / (double)num_threads); zgecsr5mv_kernel_update_y <<< num_blocks, num_threads, 0, queue->cuda_stream() >>>(m, beta, dy); // phase 2. spmv: y += alpha * A * x num_threads = MAGMA_CSR5_THREAD_GROUP; num_blocks = magma_ceildiv( p-1, num_threads / MAGMA_CSR5_OMEGA ); // ceil ((double)(p-1) / (double)(num_threads / MAGMA_CSR5_OMEGA)); switch (sigma) { case 4: spmv_csr5_compute_kernel<4> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 5: spmv_csr5_compute_kernel<5> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 6: spmv_csr5_compute_kernel<6> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 7: spmv_csr5_compute_kernel<7> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 8: spmv_csr5_compute_kernel<8> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 9: spmv_csr5_compute_kernel<9> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 10: spmv_csr5_compute_kernel<10> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 11: spmv_csr5_compute_kernel<11> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 12: spmv_csr5_compute_kernel<12> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 13: spmv_csr5_compute_kernel<13> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 14: spmv_csr5_compute_kernel<14> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 15: spmv_csr5_compute_kernel<15> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 16: spmv_csr5_compute_kernel<16> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 17: spmv_csr5_compute_kernel<17> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 18: spmv_csr5_compute_kernel<18> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 19: spmv_csr5_compute_kernel<19> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 20: spmv_csr5_compute_kernel<20> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 21: spmv_csr5_compute_kernel<21> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 22: spmv_csr5_compute_kernel<22> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 23: spmv_csr5_compute_kernel<23> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 24: spmv_csr5_compute_kernel<24> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 25: spmv_csr5_compute_kernel<25> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 26: spmv_csr5_compute_kernel<26> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 27: spmv_csr5_compute_kernel<27> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 28: spmv_csr5_compute_kernel<28> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 29: spmv_csr5_compute_kernel<29> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 30: spmv_csr5_compute_kernel<30> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 31: spmv_csr5_compute_kernel<31> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; case 32: spmv_csr5_compute_kernel<32> <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dcolind, dval, drowptr, dx, dtile_ptr, dtile_desc, dtile_desc_offset_ptr, dtile_desc_offset, dcalibrator, dy, p, num_packet, bit_y_offset, bit_scansum_offset, alpha); break; } num_threads = MAGMA_CSR5_THREAD_GROUP; num_blocks = ceil((double)(p-1)/(double)num_threads); spmv_csr5_calibrate_kernel <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (dtile_ptr, dcalibrator, dy, p); num_threads = MAGMA_CSR5_OMEGA; num_blocks = m - tail_tile_start; spmv_csr5_tail_tile_kernel <<< num_blocks, num_threads, 0, queue->cuda_stream() >>> (drowptr, dcolind, dval, dx, dy, tail_tile_start, p, sigma, alpha); info = MAGMA_SUCCESS; } else { info = MAGMA_ERR_NOT_SUPPORTED; } #endif return info; }
99ac4892deda1000ef89e9d2309e394634a660c7.hip
// !!! This is a file automatically generated by hipify!!! // // Written by Allan Engsig-Karup, October 20, 2010. // // Included C libraries #include <stdio.h> #include <ctime> #include <math.h> #include <assert.h> // Included CUDA libraries #include <cutil_inline.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Included C subroutines #include "fdcoeffF.c" // Macro defining maximum stencil array size #define STENCILARRAYSIZE_MAX 81 // number of times timed calculation is done for averaging #define ITERATIONS 100 // Global scope __constant__ float weightsconstant[STENCILARRAYSIZE_MAX]; // FIX linear memory size and choose maximum size here (FIXME: define maximum size somewhere else!!!) // Included CUDA C subroutines #include "FlexFDM1D_kernel.cu" #include "FlexFDM1D_Gold.c" // External routine for computing finite difference stencils extern void fdcoeffF(int k, float xbar, float x[], float c[], int n); // external function prototype int main(int argc, char *argv[]) { // Screen output printf("FlexFDM1D\n"); printf("Approximation of first derivative in 1D using the finite difference method.\n"); printf(" ./FlexFDM1D <Nx:default=1000> <alpha:default=3> <THREADS_PR_BLOCK:default=MaxOnDevice>\n\n"); // Check limitation of available device int dev = 0; // assumed only one device hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Device 0: Maximum number of threads per block is %d.\n", deviceProp.maxThreadsPerBlock); int MAX_THREADS_PR_BLOCK = deviceProp.maxThreadsPerBlock; int MAX_BLOCKS = deviceProp.maxGridSize[0]; // Assume default parameters or define based on input argument list int Nx, alpha, THREADS_PR_BLOCK; if (argc>1 ? Nx = strtol(argv[1],NULL,10) : Nx = 1000); printf("Number of points in x-direction, Nx = %d. \n",Nx); if (argc>2 ? alpha = strtol(argv[2],NULL,10) : alpha = 3); printf("Halfwidth of finite difference stencil, alpha = %d. \n",alpha); if (argc>3 ? THREADS_PR_BLOCK = strtol(argv[3],NULL,10) : THREADS_PR_BLOCK = MAX_THREADS_PR_BLOCK); printf("Threads per block = %d. \n\n",THREADS_PR_BLOCK); // Compute useful flops to be computed int flops = Nx*2*(2*alpha+1); /************************************************** * Create timers **************************************************/ unsigned timer_cpu; unsigned timer_gpu1; unsigned timer_gpu2; unsigned timer_gpu3; unsigned timer_mem1; unsigned timer_mem2; unsigned timer_mem3; CUT_SAFE_CALL(cutCreateTimer(&timer_cpu)); CUT_SAFE_CALL(cutCreateTimer(&timer_gpu1)); CUT_SAFE_CALL(cutCreateTimer(&timer_gpu2)); CUT_SAFE_CALL(cutCreateTimer(&timer_gpu3)); CUT_SAFE_CALL(cutCreateTimer(&timer_mem1)); CUT_SAFE_CALL(cutCreateTimer(&timer_mem2)); CUT_SAFE_CALL(cutCreateTimer(&timer_mem3)); /************************************************** * Pre-processing **************************************************/ // define order of derivative int q = 1; // define total stencil size int rank = 2*alpha+1; // Setup mesh float* grid = (float*) malloc(sizeof(float)*Nx); for (int i=0; i<Nx; ++i) grid[i] = (float) (i) / (float) (Nx-1); // Setup finite difference weights table float xbar; float* weights_h = (float*) malloc(sizeof(float)*STENCILARRAYSIZE_MAX); // stencil 2d-array float* c = (float*) malloc(sizeof(float)*rank); float* x = (float*) malloc(sizeof(float)*rank); // local grid for use in computing stencils for (int i=0; i<rank; ++i) x[i] = grid[i]; for (int i=0; i<rank; ++i) { xbar = grid[i]; fdcoeffF(q,xbar,x,c,rank); for (int j=0; j<rank; ++j) weights_h[i*rank+j] = c[j]; // array storage according to row major order } /************************************************** * Host execution **************************************************/ printf("Average timings per kernel invocation: \n\n"); // Function and derivative definitions float* U_h = (float*) malloc(sizeof(float)*Nx); float* Ux_h = (float*) malloc(sizeof(float)*Nx); float* UxCPU = (float*) malloc(sizeof(float)*Nx); // Initialize input array for (int i=0; i<Nx; ++i) { U_h[i] = 0.5*grid[i]*grid[i]; // u(x) = 0.5*x^2 Ux_h[i] = 0.0; UxCPU[i] = 0.0; } CUT_SAFE_CALL(cutStartTimer(timer_cpu)); for (unsigned i = 0; i < ITERATIONS; i++) { FlexFDM1D_Gold(U_h, Ux_h, Nx, alpha, weights_h, rank); } // Transfer data to CPU array to be used for comparisons with GPU results for (int i=0; i<Nx; ++i) UxCPU[i] = Ux_h[i]; CUT_SAFE_CALL(cutStopTimer(timer_cpu)); // output a few of the calculated gradients... // printf("A few gradients computed by the CPU version\n"); // for (int i=Nx-10; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); /************************************************** * Allocate GPU memory **************************************************/ float *stencils_d; float *U_d; float *Ux_d; // Transfer stencil weights to device memory int size; size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(hipMalloc((void**) &stencils_d, size)); // Allocate memory for result on device size = Nx*sizeof(float); CUDA_SAFE_CALL(hipMalloc((void**) &U_d, size)); CUDA_SAFE_CALL(hipMalloc((void**) &Ux_d, size)); /************************************************** * Device execution **************************************************/ /************************************************** * GPU execution v1 **************************************************/ // make sure that we have at least one block int blocksPerGrid = (Nx + THREADS_PR_BLOCK)/THREADS_PR_BLOCK; blocksPerGrid = min(blocksPerGrid,MAX_BLOCKS); // printf("blocksPerGrid=%d \n",blocksPerGrid); CUT_SAFE_CALL(cutStartTimer(timer_mem1)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(stencils_d,weights_h,size,hipMemcpyHostToDevice)); size = Nx*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(U_d,U_h,size,hipMemcpyHostToDevice)); CUT_SAFE_CALL(cutStopTimer(timer_mem1)); CUT_SAFE_CALL(cutStartTimer(timer_gpu1)); for (unsigned i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( FlexFDM1D_naive), dim3(blocksPerGrid), dim3(THREADS_PR_BLOCK), 0, 0, U_d, Ux_d, Nx, alpha, stencils_d); } hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation v1"); CUT_SAFE_CALL(cutStopTimer(timer_gpu1)); CUT_SAFE_CALL(cutStartTimer(timer_mem1)); size = Nx*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(Ux_h,Ux_d,size,hipMemcpyDeviceToHost)); CUT_SAFE_CALL(cutStopTimer(timer_mem1)); // output a few of the calculated gradients... // printf("A few gradients computed by the GPU version 1\n"); // for (int i=Nx-10; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); // Verification float sum1 = 0.0f; for (int n=0; n<Nx; ++n) sum1 = max(sum1,abs(Ux_h[n]-UxCPU[n])); // Reset device vector CUDA_SAFE_CALL(hipMemset(Ux_d, 0, size)); /************************************************** * GPU execution v2 **************************************************/ // make sure that we have at least one block blocksPerGrid = (Nx + THREADS_PR_BLOCK - 2*alpha)/(THREADS_PR_BLOCK - 2*alpha); blocksPerGrid = min(blocksPerGrid,MAX_BLOCKS); // printf("blocksPerGrid=%d \n",blocksPerGrid); CUT_SAFE_CALL(cutStartTimer(timer_mem2)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(stencils_d,weights_h,size,hipMemcpyHostToDevice)); size = Nx*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(U_d,U_h,size,hipMemcpyHostToDevice)); CUT_SAFE_CALL(cutStopTimer(timer_mem2)); CUT_SAFE_CALL(cutStartTimer(timer_gpu2)); printf("THREADS_PR_BLOCK = %d\n",THREADS_PR_BLOCK); size = THREADS_PR_BLOCK*sizeof(float)*2; // FIXME: factor of two too much for (unsigned i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( FlexFDM1D_v2), dim3(blocksPerGrid), dim3(THREADS_PR_BLOCK), size, 0, U_d, Ux_d, Nx, alpha, stencils_d); } hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation v2"); CUT_SAFE_CALL(cutStopTimer(timer_gpu2)); CUT_SAFE_CALL(cutStartTimer(timer_mem2)); size = Nx*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(Ux_h,Ux_d,size,hipMemcpyDeviceToHost)); CUT_SAFE_CALL(cutStopTimer(timer_mem2)); // output a few of the calculated gradients... // printf("A few gradients computed by the GPU version 2\n"); // for (int i=0; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); // Verification float sum2 = 0.0f; for (int n=0; n<Nx; ++n) sum2 = max(sum2,abs(Ux_h[n]-UxCPU[n])); // Reset device vector CUDA_SAFE_CALL(hipMemset(Ux_d, 0, size)); /************************************************** * GPU execution v3 **************************************************/ // make sure that we have at least one block blocksPerGrid = (Nx + THREADS_PR_BLOCK - 2*alpha)/(THREADS_PR_BLOCK - 2*alpha); blocksPerGrid = min(blocksPerGrid,MAX_BLOCKS); // printf("blocksPerGrid=%d \n",blocksPerGrid); CUT_SAFE_CALL(cutStartTimer(timer_mem3)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(stencils_d,weights_h,size,hipMemcpyHostToDevice)); size = Nx*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(U_d,U_h,size,hipMemcpyHostToDevice)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(hipMemcpyToSymbol(weightsconstant,weights_h,size,0)); // block data transfer CUT_SAFE_CALL(cutStopTimer(timer_mem3)); CUT_SAFE_CALL(cutStartTimer(timer_gpu3)); size = THREADS_PR_BLOCK*sizeof(float); for (unsigned i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( FlexFDM1D_v3), dim3(blocksPerGrid), dim3(THREADS_PR_BLOCK), size, 0, U_d, Ux_d, Nx, alpha, stencils_d); } hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation v3"); CUT_SAFE_CALL(cutStopTimer(timer_gpu3)); CUT_SAFE_CALL(cutStartTimer(timer_mem3)); size = Nx*sizeof(float); CUDA_SAFE_CALL(hipMemcpy(Ux_h,Ux_d,size,hipMemcpyDeviceToHost)); CUT_SAFE_CALL(cutStopTimer(timer_mem3)); // output a few of the calculated gradients... // printf("A few gradients computed by the GPU version 3\n"); // for (int i=0; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); // Verification float sum3 = 0.0f; for (int n=0; n<Nx; ++n) sum3 = max(sum3,abs(Ux_h[n]-UxCPU[n])); // Reset device vector CUDA_SAFE_CALL(hipMemset(Ux_d, 0, size)); /************************************************** * Print timing results **************************************************/ printf(" CPU time : %.4f (ms)\n", cutGetTimerValue(timer_cpu)/ITERATIONS); printf(" CPU flops : %.4f (Gflops) \n\n",flops/(cutGetTimerValue(timer_cpu)/ITERATIONS*(float)(1 << 30))); printf(" GPU v1 time compute: %.4f (ms)\n", cutGetTimerValue(timer_gpu1)/ITERATIONS); printf(" GPU v1 time memory : %.4f (ms)\n", cutGetTimerValue(timer_mem1)); printf(" GPU v1 time total : %.4f (ms): speedup %.2fx\n", cutGetTimerValue(timer_gpu1)/ITERATIONS + cutGetTimerValue(timer_mem1), cutGetTimerValue(timer_cpu)/ITERATIONS/(cutGetTimerValue(timer_gpu1)/ITERATIONS + cutGetTimerValue(timer_mem1))); printf(" GPU v1 flops : %.4f (Gflops) \n",flops/(cutGetTimerValue(timer_gpu1)/ITERATIONS*(float)(1 << 30))); if (sum1<1e-2) { printf(" PASSED\n\n"); } else { printf(" FAILED %.4f \n\n",sum1); } printf(" GPU v2 time compute: %.4f (ms)\n", cutGetTimerValue(timer_gpu2)/ITERATIONS); printf(" GPU v2 time memory : %.4f (ms)\n", cutGetTimerValue(timer_mem2)); printf(" GPU v2 time total : %.4f (ms): speedup %.2fx\n", cutGetTimerValue(timer_gpu2)/ITERATIONS + cutGetTimerValue(timer_mem2), cutGetTimerValue(timer_cpu)/ITERATIONS/(cutGetTimerValue(timer_gpu2)/ITERATIONS + cutGetTimerValue(timer_mem2))); printf(" GPU v2 flops : %.4f (Gflops) \n",flops/(cutGetTimerValue(timer_gpu2)/ITERATIONS*(float)(1 << 30))); if (sum2<1e-2) { printf(" PASSED\n\n"); } else { printf(" FAILED %.4f \n\n",sum2); } printf(" GPU v3 time compute: %.4f (ms)\n", cutGetTimerValue(timer_gpu3)/ITERATIONS); printf(" GPU v3 time memory : %.4f (ms)\n", cutGetTimerValue(timer_mem3)); printf(" GPU v3 time total : %.4f (ms): speedup %.2fx\n", cutGetTimerValue(timer_gpu3)/ITERATIONS + cutGetTimerValue(timer_mem3), cutGetTimerValue(timer_cpu)/ITERATIONS/(cutGetTimerValue(timer_gpu3)/ITERATIONS + cutGetTimerValue(timer_mem3))); printf(" GPU v3 flops : %.4f (Gflops) \n",flops/(cutGetTimerValue(timer_gpu3)/ITERATIONS*(float)(1 << 30))); if (sum3<1e-2) { printf(" PASSED\n\n"); } else { printf(" FAILED %.4f \n\n",sum3); } /************************************************** * Free data structures **************************************************/ CUDA_SAFE_CALL(hipFree(U_d)); CUDA_SAFE_CALL(hipFree(Ux_d)); CUDA_SAFE_CALL(hipFree(stencils_d)); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
99ac4892deda1000ef89e9d2309e394634a660c7.cu
// // Written by Allan Engsig-Karup, October 20, 2010. // // Included C libraries #include <stdio.h> #include <ctime> #include <math.h> #include <assert.h> // Included CUDA libraries #include <cutil_inline.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Included C subroutines #include "fdcoeffF.c" // Macro defining maximum stencil array size #define STENCILARRAYSIZE_MAX 81 // number of times timed calculation is done for averaging #define ITERATIONS 100 // Global scope __constant__ float weightsconstant[STENCILARRAYSIZE_MAX]; // FIX linear memory size and choose maximum size here (FIXME: define maximum size somewhere else!!!) // Included CUDA C subroutines #include "FlexFDM1D_kernel.cu" #include "FlexFDM1D_Gold.c" // External routine for computing finite difference stencils extern void fdcoeffF(int k, float xbar, float x[], float c[], int n); // external function prototype int main(int argc, char *argv[]) { // Screen output printf("FlexFDM1D\n"); printf("Approximation of first derivative in 1D using the finite difference method.\n"); printf(" ./FlexFDM1D <Nx:default=1000> <alpha:default=3> <THREADS_PR_BLOCK:default=MaxOnDevice>\n\n"); // Check limitation of available device int dev = 0; // assumed only one device cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Device 0: Maximum number of threads per block is %d.\n", deviceProp.maxThreadsPerBlock); int MAX_THREADS_PR_BLOCK = deviceProp.maxThreadsPerBlock; int MAX_BLOCKS = deviceProp.maxGridSize[0]; // Assume default parameters or define based on input argument list int Nx, alpha, THREADS_PR_BLOCK; if (argc>1 ? Nx = strtol(argv[1],NULL,10) : Nx = 1000); printf("Number of points in x-direction, Nx = %d. \n",Nx); if (argc>2 ? alpha = strtol(argv[2],NULL,10) : alpha = 3); printf("Halfwidth of finite difference stencil, alpha = %d. \n",alpha); if (argc>3 ? THREADS_PR_BLOCK = strtol(argv[3],NULL,10) : THREADS_PR_BLOCK = MAX_THREADS_PR_BLOCK); printf("Threads per block = %d. \n\n",THREADS_PR_BLOCK); // Compute useful flops to be computed int flops = Nx*2*(2*alpha+1); /************************************************** * Create timers **************************************************/ unsigned timer_cpu; unsigned timer_gpu1; unsigned timer_gpu2; unsigned timer_gpu3; unsigned timer_mem1; unsigned timer_mem2; unsigned timer_mem3; CUT_SAFE_CALL(cutCreateTimer(&timer_cpu)); CUT_SAFE_CALL(cutCreateTimer(&timer_gpu1)); CUT_SAFE_CALL(cutCreateTimer(&timer_gpu2)); CUT_SAFE_CALL(cutCreateTimer(&timer_gpu3)); CUT_SAFE_CALL(cutCreateTimer(&timer_mem1)); CUT_SAFE_CALL(cutCreateTimer(&timer_mem2)); CUT_SAFE_CALL(cutCreateTimer(&timer_mem3)); /************************************************** * Pre-processing **************************************************/ // define order of derivative int q = 1; // define total stencil size int rank = 2*alpha+1; // Setup mesh float* grid = (float*) malloc(sizeof(float)*Nx); for (int i=0; i<Nx; ++i) grid[i] = (float) (i) / (float) (Nx-1); // Setup finite difference weights table float xbar; float* weights_h = (float*) malloc(sizeof(float)*STENCILARRAYSIZE_MAX); // stencil 2d-array float* c = (float*) malloc(sizeof(float)*rank); float* x = (float*) malloc(sizeof(float)*rank); // local grid for use in computing stencils for (int i=0; i<rank; ++i) x[i] = grid[i]; for (int i=0; i<rank; ++i) { xbar = grid[i]; fdcoeffF(q,xbar,x,c,rank); for (int j=0; j<rank; ++j) weights_h[i*rank+j] = c[j]; // array storage according to row major order } /************************************************** * Host execution **************************************************/ printf("Average timings per kernel invocation: \n\n"); // Function and derivative definitions float* U_h = (float*) malloc(sizeof(float)*Nx); float* Ux_h = (float*) malloc(sizeof(float)*Nx); float* UxCPU = (float*) malloc(sizeof(float)*Nx); // Initialize input array for (int i=0; i<Nx; ++i) { U_h[i] = 0.5*grid[i]*grid[i]; // u(x) = 0.5*x^2 Ux_h[i] = 0.0; UxCPU[i] = 0.0; } CUT_SAFE_CALL(cutStartTimer(timer_cpu)); for (unsigned i = 0; i < ITERATIONS; i++) { FlexFDM1D_Gold(U_h, Ux_h, Nx, alpha, weights_h, rank); } // Transfer data to CPU array to be used for comparisons with GPU results for (int i=0; i<Nx; ++i) UxCPU[i] = Ux_h[i]; CUT_SAFE_CALL(cutStopTimer(timer_cpu)); // output a few of the calculated gradients... // printf("A few gradients computed by the CPU version\n"); // for (int i=Nx-10; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); /************************************************** * Allocate GPU memory **************************************************/ float *stencils_d; float *U_d; float *Ux_d; // Transfer stencil weights to device memory int size; size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(cudaMalloc((void**) &stencils_d, size)); // Allocate memory for result on device size = Nx*sizeof(float); CUDA_SAFE_CALL(cudaMalloc((void**) &U_d, size)); CUDA_SAFE_CALL(cudaMalloc((void**) &Ux_d, size)); /************************************************** * Device execution **************************************************/ /************************************************** * GPU execution v1 **************************************************/ // make sure that we have at least one block int blocksPerGrid = (Nx + THREADS_PR_BLOCK)/THREADS_PR_BLOCK; blocksPerGrid = min(blocksPerGrid,MAX_BLOCKS); // printf("blocksPerGrid=%d \n",blocksPerGrid); CUT_SAFE_CALL(cutStartTimer(timer_mem1)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(stencils_d,weights_h,size,cudaMemcpyHostToDevice)); size = Nx*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(U_d,U_h,size,cudaMemcpyHostToDevice)); CUT_SAFE_CALL(cutStopTimer(timer_mem1)); CUT_SAFE_CALL(cutStartTimer(timer_gpu1)); for (unsigned i = 0; i < ITERATIONS; i++) { FlexFDM1D_naive<<< blocksPerGrid, THREADS_PR_BLOCK>>>(U_d, Ux_d, Nx, alpha, stencils_d); } cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation v1"); CUT_SAFE_CALL(cutStopTimer(timer_gpu1)); CUT_SAFE_CALL(cutStartTimer(timer_mem1)); size = Nx*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(Ux_h,Ux_d,size,cudaMemcpyDeviceToHost)); CUT_SAFE_CALL(cutStopTimer(timer_mem1)); // output a few of the calculated gradients... // printf("A few gradients computed by the GPU version 1\n"); // for (int i=Nx-10; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); // Verification float sum1 = 0.0f; for (int n=0; n<Nx; ++n) sum1 = max(sum1,abs(Ux_h[n]-UxCPU[n])); // Reset device vector CUDA_SAFE_CALL(cudaMemset(Ux_d, 0, size)); /************************************************** * GPU execution v2 **************************************************/ // make sure that we have at least one block blocksPerGrid = (Nx + THREADS_PR_BLOCK - 2*alpha)/(THREADS_PR_BLOCK - 2*alpha); blocksPerGrid = min(blocksPerGrid,MAX_BLOCKS); // printf("blocksPerGrid=%d \n",blocksPerGrid); CUT_SAFE_CALL(cutStartTimer(timer_mem2)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(stencils_d,weights_h,size,cudaMemcpyHostToDevice)); size = Nx*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(U_d,U_h,size,cudaMemcpyHostToDevice)); CUT_SAFE_CALL(cutStopTimer(timer_mem2)); CUT_SAFE_CALL(cutStartTimer(timer_gpu2)); printf("THREADS_PR_BLOCK = %d\n",THREADS_PR_BLOCK); size = THREADS_PR_BLOCK*sizeof(float)*2; // FIXME: factor of two too much for (unsigned i = 0; i < ITERATIONS; i++) { FlexFDM1D_v2<<< blocksPerGrid, THREADS_PR_BLOCK, size>>>(U_d, Ux_d, Nx, alpha, stencils_d); } cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation v2"); CUT_SAFE_CALL(cutStopTimer(timer_gpu2)); CUT_SAFE_CALL(cutStartTimer(timer_mem2)); size = Nx*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(Ux_h,Ux_d,size,cudaMemcpyDeviceToHost)); CUT_SAFE_CALL(cutStopTimer(timer_mem2)); // output a few of the calculated gradients... // printf("A few gradients computed by the GPU version 2\n"); // for (int i=0; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); // Verification float sum2 = 0.0f; for (int n=0; n<Nx; ++n) sum2 = max(sum2,abs(Ux_h[n]-UxCPU[n])); // Reset device vector CUDA_SAFE_CALL(cudaMemset(Ux_d, 0, size)); /************************************************** * GPU execution v3 **************************************************/ // make sure that we have at least one block blocksPerGrid = (Nx + THREADS_PR_BLOCK - 2*alpha)/(THREADS_PR_BLOCK - 2*alpha); blocksPerGrid = min(blocksPerGrid,MAX_BLOCKS); // printf("blocksPerGrid=%d \n",blocksPerGrid); CUT_SAFE_CALL(cutStartTimer(timer_mem3)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(stencils_d,weights_h,size,cudaMemcpyHostToDevice)); size = Nx*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(U_d,U_h,size,cudaMemcpyHostToDevice)); size = (rank*rank)*sizeof(float); CUDA_SAFE_CALL(cudaMemcpyToSymbol(weightsconstant,weights_h,size,0)); // block data transfer CUT_SAFE_CALL(cutStopTimer(timer_mem3)); CUT_SAFE_CALL(cutStartTimer(timer_gpu3)); size = THREADS_PR_BLOCK*sizeof(float); for (unsigned i = 0; i < ITERATIONS; i++) { FlexFDM1D_v3<<< blocksPerGrid, THREADS_PR_BLOCK, size>>>(U_d, Ux_d, Nx, alpha, stencils_d); } cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation v3"); CUT_SAFE_CALL(cutStopTimer(timer_gpu3)); CUT_SAFE_CALL(cutStartTimer(timer_mem3)); size = Nx*sizeof(float); CUDA_SAFE_CALL(cudaMemcpy(Ux_h,Ux_d,size,cudaMemcpyDeviceToHost)); CUT_SAFE_CALL(cutStopTimer(timer_mem3)); // output a few of the calculated gradients... // printf("A few gradients computed by the GPU version 3\n"); // for (int i=0; i<Nx; ++i) // printf("x[%d]=%f, Ux[%d]=%f \n",i,grid[i],i,Ux_h[i]); // printf("\n"); // Verification float sum3 = 0.0f; for (int n=0; n<Nx; ++n) sum3 = max(sum3,abs(Ux_h[n]-UxCPU[n])); // Reset device vector CUDA_SAFE_CALL(cudaMemset(Ux_d, 0, size)); /************************************************** * Print timing results **************************************************/ printf(" CPU time : %.4f (ms)\n", cutGetTimerValue(timer_cpu)/ITERATIONS); printf(" CPU flops : %.4f (Gflops) \n\n",flops/(cutGetTimerValue(timer_cpu)/ITERATIONS*(float)(1 << 30))); printf(" GPU v1 time compute: %.4f (ms)\n", cutGetTimerValue(timer_gpu1)/ITERATIONS); printf(" GPU v1 time memory : %.4f (ms)\n", cutGetTimerValue(timer_mem1)); printf(" GPU v1 time total : %.4f (ms): speedup %.2fx\n", cutGetTimerValue(timer_gpu1)/ITERATIONS + cutGetTimerValue(timer_mem1), cutGetTimerValue(timer_cpu)/ITERATIONS/(cutGetTimerValue(timer_gpu1)/ITERATIONS + cutGetTimerValue(timer_mem1))); printf(" GPU v1 flops : %.4f (Gflops) \n",flops/(cutGetTimerValue(timer_gpu1)/ITERATIONS*(float)(1 << 30))); if (sum1<1e-2) { printf(" PASSED\n\n"); } else { printf(" FAILED %.4f \n\n",sum1); } printf(" GPU v2 time compute: %.4f (ms)\n", cutGetTimerValue(timer_gpu2)/ITERATIONS); printf(" GPU v2 time memory : %.4f (ms)\n", cutGetTimerValue(timer_mem2)); printf(" GPU v2 time total : %.4f (ms): speedup %.2fx\n", cutGetTimerValue(timer_gpu2)/ITERATIONS + cutGetTimerValue(timer_mem2), cutGetTimerValue(timer_cpu)/ITERATIONS/(cutGetTimerValue(timer_gpu2)/ITERATIONS + cutGetTimerValue(timer_mem2))); printf(" GPU v2 flops : %.4f (Gflops) \n",flops/(cutGetTimerValue(timer_gpu2)/ITERATIONS*(float)(1 << 30))); if (sum2<1e-2) { printf(" PASSED\n\n"); } else { printf(" FAILED %.4f \n\n",sum2); } printf(" GPU v3 time compute: %.4f (ms)\n", cutGetTimerValue(timer_gpu3)/ITERATIONS); printf(" GPU v3 time memory : %.4f (ms)\n", cutGetTimerValue(timer_mem3)); printf(" GPU v3 time total : %.4f (ms): speedup %.2fx\n", cutGetTimerValue(timer_gpu3)/ITERATIONS + cutGetTimerValue(timer_mem3), cutGetTimerValue(timer_cpu)/ITERATIONS/(cutGetTimerValue(timer_gpu3)/ITERATIONS + cutGetTimerValue(timer_mem3))); printf(" GPU v3 flops : %.4f (Gflops) \n",flops/(cutGetTimerValue(timer_gpu3)/ITERATIONS*(float)(1 << 30))); if (sum3<1e-2) { printf(" PASSED\n\n"); } else { printf(" FAILED %.4f \n\n",sum3); } /************************************************** * Free data structures **************************************************/ CUDA_SAFE_CALL(cudaFree(U_d)); CUDA_SAFE_CALL(cudaFree(Ux_d)); CUDA_SAFE_CALL(cudaFree(stencils_d)); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
dab9bbd7408ac093467c3a009e812347794a5cd8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <class T> __global__ void im2col(const T* data_im, int num_outs, int im_height, int im_width, int dilation_h, int dilation_w, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* data_col) { const int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < num_outs) { int w_out = index % col_width; int h_out = (index / col_width) % col_height; int channel_in = index / col_width / col_height; int channel_out = channel_in * filter_height * filter_width; int h_in = h_out * stride_height - padding_height; int w_in = w_out * stride_width - padding_width; data_col += (channel_out * col_height + h_out) * col_width + w_out; data_im += (channel_in * im_height + h_in) * im_width + w_in; for (int i = 0; i < filter_height; ++i) { for (int j = 0; j < filter_width; ++j) { int rIdx = h_in + i * dilation_h; int cIdx = w_in + j * dilation_w; *data_col = (rIdx >= im_height || rIdx < 0 || cIdx >= im_width || cIdx < 0) ? 0 : data_im[i * dilation_h * im_width + j * dilation_w]; data_col += col_height * col_width; } } } } /* * im = [input_channels, input_height, input_width] * col = * [input_channels, filter_height, filter_width, output_height, output_width] */ template <class T> class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& im, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* col) { PADDLE_ENFORCE(im.dims().size() == 3); PADDLE_ENFORCE(col->dims().size() == 5); int im_channels = im.dims()[0]; int im_height = im.dims()[1]; int im_width = im.dims()[2]; int filter_height = col->dims()[1]; int filter_width = col->dims()[2]; int col_height = col->dims()[3]; int col_width = col->dims()[4]; int num_outputs = im_channels * col_height * col_width; int blocks = (num_outputs + 1024 - 1) / 1024; int block_x = 512; int block_y = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(block_x, block_y); hipLaunchKernelGGL(( im2col<T>), dim3(grid), dim3(threads), 0, context.stream(), im.data<T>(), num_outputs, im_height, im_width, dilation[0], dilation[1], filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, col->data<T>()); } }; template <class T> __global__ void col2im(int n, const T* data_col, int im_height, int im_width, int dilation_h, int dilation_w, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* data_im) { const int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; const int d_filter_height = dilation_h * (filter_height - 1) + 1; const int d_filter_width = dilation_w * (filter_width - 1) + 1; if (index < n) { T val = 0; int w = index % im_width + padding_width; int h = (index / im_width) % im_height + padding_height; int c = index / (im_width * im_height); // compute the start and end of the output int w_col_start = (w < d_filter_width) ? 0 : (w - d_filter_width) / stride_width + 1; int w_col_end = min(w / stride_width + 1, col_width); int h_col_start = (h < d_filter_height) ? 0 : (h - d_filter_height) / stride_height + 1; int h_col_end = min(h / stride_height + 1, col_height); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_off = (h - h_col * stride_height); int w_off = (w - w_col * stride_width); if (h_off % dilation_h == 0 && w_off % dilation_w == 0) { h_off /= dilation_h; w_off /= dilation_w; int data_col_index = (((c * filter_height + h_off) * filter_width + w_off) * col_height + h_col) * col_width + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } /* * im = [input_channels, input_height, input_width] * col = * [input_channels, filter_height, filter_width, output_height, output_width] */ template <class T> class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& col, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* im) { PADDLE_ENFORCE(im->dims().size() == 3); PADDLE_ENFORCE(col.dims().size() == 5); int im_channels = im->dims()[0]; int im_height = im->dims()[1]; int im_width = im->dims()[2]; int filter_height = col.dims()[1]; int filter_width = col.dims()[2]; int col_height = col.dims()[3]; int col_width = col.dims()[4]; PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - (dilation[0] * (filter_height - 1) + 1)) / stride[0] + 1, col_height, "Output_height and padding(padding_up, padding_down) are " "inconsistent."); PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - (dilation[1] * (filter_width - 1) + 1)) / stride[1] + 1, col_width, "col_width and padding(padding_left, padding_right) are " "inconsistent."); size_t num_kernels = im_channels * im_height * im_width; size_t blocks = (num_kernels + 1024 - 1) / 1024; size_t block_x = 512; size_t block_y = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(block_x, block_y); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. hipLaunchKernelGGL(( col2im<T>), dim3(grid), dim3(threads), 0, context.stream(), num_kernels, col.data<T>(), im_height, im_width, dilation[0], dilation[1], filter_height, filter_width, stride[0], stride[1], padding[0], padding[2], col_height, col_width, im->data<T>()); } }; template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, float>; template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, double>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, float>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, double>; template <class T> __global__ void im2colOCF(const T* im_data, int im_channels, int im_height, int im_width, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* col_data) { int swid = blockIdx.x; int shid = blockIdx.y; for (int channelid = threadIdx.z; channelid < im_channels; channelid += blockDim.z) { for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { int width_offset = idx + swid * stride_width - padding_width; int height_offset = idy + shid * stride_height - padding_height; int im_offset = width_offset + height_offset * im_width + channelid * im_height * im_width; int col_offset = idx + idy * filter_width + channelid * filter_height * filter_width + (shid * col_width + swid) * (im_channels * filter_height * filter_width); col_data[col_offset] = (height_offset >= im_height || height_offset < 0 || width_offset >= im_width || width_offset < 0) ? T(0) : im_data[im_offset]; } } } } /* * im = [input_channels, input_height, input_width] * col = * [output_height, output_width, input_channels, filter_height, filter_width] */ template <class T> class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& im, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* col) { PADDLE_ENFORCE(im.dims().size() == 3); PADDLE_ENFORCE(col->dims().size() == 5); int im_channels = im.dims()[0]; int im_height = im.dims()[1]; int im_width = im.dims()[2]; int filter_height = col->dims()[3]; int filter_width = col->dims()[4]; int col_height = col->dims()[0]; int col_width = col->dims()[1]; int block_dim_x = 0; int block_dim_y = 0; if (filter_height <= 4 && filter_width <= 4) { block_dim_x = 4; block_dim_y = 4; } else if (filter_height <= 8 && filter_width <= 8) { block_dim_x = 8; block_dim_y = 8; } else if (filter_height <= 16 && filter_width <= 16) { block_dim_x = 16; block_dim_y = 16; } else { block_dim_x = 32; block_dim_y = 32; } int block_dim_z = 1024 / block_dim_x / block_dim_y; dim3 threads(block_dim_x, block_dim_y, ::min(block_dim_z, im_channels)); dim3 grid(col_width, col_height); hipLaunchKernelGGL(( im2colOCF<T>), dim3(grid), dim3(threads), 0, context.stream(), im.data<T>(), im_channels, im_height, im_width, filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, col->data<T>()); } }; template <class T> __global__ void col2imOCF(const T* col_data, int im_channels, int im_height, int im_width, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* im_data) { int swid = blockIdx.x; int shid = blockIdx.y; for (int channelid = threadIdx.z; channelid < im_channels; channelid += blockDim.z) { for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { int width_offset = idx + swid * stride_width - padding_width; int height_offset = idy + shid * stride_height - padding_height; int im_offset = width_offset + height_offset * im_width + channelid * im_height * im_width; int col_offset = idx + idy * filter_width + channelid * filter_height * filter_width + (shid * col_width + swid) * (im_channels * filter_height * filter_width); if (height_offset >= 0 && height_offset < im_height && width_offset >= 0 && width_offset < im_width) { paddle::platform::CudaAtomicAdd(im_data + im_offset, col_data[col_offset]); } } } } } /* * im = [input_channels, input_height, input_width] * col = * [output_height, output_width, input_channels, filter_height, filter_width] */ template <class T> class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& col, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* im) { PADDLE_ENFORCE(im->dims().size() == 3); PADDLE_ENFORCE(col.dims().size() == 5); int im_channels = im->dims()[0]; int im_height = im->dims()[1]; int im_width = im->dims()[2]; int filter_height = col.dims()[3]; int filter_width = col.dims()[4]; int col_height = col.dims()[0]; int col_width = col.dims()[1]; PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - (dilation[0] * (filter_height - 1) + 1)) / stride[0] + 1, col_height, "Output_height and padding(padding_up, padding_down) are " "inconsistent."); PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - (dilation[1] * (filter_width - 1) + 1)) / stride[1] + 1, col_width, "col_width and padding(padding_left, padding_right) are " "inconsistent."); int block_dim_x = 0; int block_dim_y = 0; if (filter_height <= 4 && filter_width <= 4) { block_dim_x = 4; block_dim_y = 4; } else if (filter_height <= 8 && filter_width <= 8) { block_dim_x = 8; block_dim_y = 8; } else if (filter_height <= 16 && filter_width <= 16) { block_dim_x = 16; block_dim_y = 16; } else { block_dim_x = 32; block_dim_y = 32; } int block_dim_z = 1024 / block_dim_x / block_dim_y; dim3 threads(block_dim_x, block_dim_y, ::min(block_dim_z, im_channels)); dim3 grid(col_width, col_height); hipLaunchKernelGGL(( col2imOCF<T>), dim3(grid), dim3(threads), 0, context.stream(), col.data<T>(), im_channels, im_height, im_width, filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, im->data<T>()); } }; template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, float>; template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, double>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, float>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
dab9bbd7408ac093467c3a009e812347794a5cd8.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <vector> #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { template <class T> __global__ void im2col(const T* data_im, int num_outs, int im_height, int im_width, int dilation_h, int dilation_w, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* data_col) { const int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < num_outs) { int w_out = index % col_width; int h_out = (index / col_width) % col_height; int channel_in = index / col_width / col_height; int channel_out = channel_in * filter_height * filter_width; int h_in = h_out * stride_height - padding_height; int w_in = w_out * stride_width - padding_width; data_col += (channel_out * col_height + h_out) * col_width + w_out; data_im += (channel_in * im_height + h_in) * im_width + w_in; for (int i = 0; i < filter_height; ++i) { for (int j = 0; j < filter_width; ++j) { int rIdx = h_in + i * dilation_h; int cIdx = w_in + j * dilation_w; *data_col = (rIdx >= im_height || rIdx < 0 || cIdx >= im_width || cIdx < 0) ? 0 : data_im[i * dilation_h * im_width + j * dilation_w]; data_col += col_height * col_width; } } } } /* * im = [input_channels, input_height, input_width] * col = * [input_channels, filter_height, filter_width, output_height, output_width] */ template <class T> class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& im, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* col) { PADDLE_ENFORCE(im.dims().size() == 3); PADDLE_ENFORCE(col->dims().size() == 5); int im_channels = im.dims()[0]; int im_height = im.dims()[1]; int im_width = im.dims()[2]; int filter_height = col->dims()[1]; int filter_width = col->dims()[2]; int col_height = col->dims()[3]; int col_width = col->dims()[4]; int num_outputs = im_channels * col_height * col_width; int blocks = (num_outputs + 1024 - 1) / 1024; int block_x = 512; int block_y = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(block_x, block_y); im2col<T><<<grid, threads, 0, context.stream()>>>( im.data<T>(), num_outputs, im_height, im_width, dilation[0], dilation[1], filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, col->data<T>()); } }; template <class T> __global__ void col2im(int n, const T* data_col, int im_height, int im_width, int dilation_h, int dilation_w, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* data_im) { const int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; const int d_filter_height = dilation_h * (filter_height - 1) + 1; const int d_filter_width = dilation_w * (filter_width - 1) + 1; if (index < n) { T val = 0; int w = index % im_width + padding_width; int h = (index / im_width) % im_height + padding_height; int c = index / (im_width * im_height); // compute the start and end of the output int w_col_start = (w < d_filter_width) ? 0 : (w - d_filter_width) / stride_width + 1; int w_col_end = min(w / stride_width + 1, col_width); int h_col_start = (h < d_filter_height) ? 0 : (h - d_filter_height) / stride_height + 1; int h_col_end = min(h / stride_height + 1, col_height); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { int h_off = (h - h_col * stride_height); int w_off = (w - w_col * stride_width); if (h_off % dilation_h == 0 && w_off % dilation_w == 0) { h_off /= dilation_h; w_off /= dilation_w; int data_col_index = (((c * filter_height + h_off) * filter_width + w_off) * col_height + h_col) * col_width + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } /* * im = [input_channels, input_height, input_width] * col = * [input_channels, filter_height, filter_width, output_height, output_width] */ template <class T> class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& col, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* im) { PADDLE_ENFORCE(im->dims().size() == 3); PADDLE_ENFORCE(col.dims().size() == 5); int im_channels = im->dims()[0]; int im_height = im->dims()[1]; int im_width = im->dims()[2]; int filter_height = col.dims()[1]; int filter_width = col.dims()[2]; int col_height = col.dims()[3]; int col_width = col.dims()[4]; PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - (dilation[0] * (filter_height - 1) + 1)) / stride[0] + 1, col_height, "Output_height and padding(padding_up, padding_down) are " "inconsistent."); PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - (dilation[1] * (filter_width - 1) + 1)) / stride[1] + 1, col_width, "col_width and padding(padding_left, padding_right) are " "inconsistent."); size_t num_kernels = im_channels * im_height * im_width; size_t blocks = (num_kernels + 1024 - 1) / 1024; size_t block_x = 512; size_t block_y = (blocks + 512 - 1) / 512; dim3 threads(1024, 1); dim3 grid(block_x, block_y); // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. col2im<T><<<grid, threads, 0, context.stream()>>>( num_kernels, col.data<T>(), im_height, im_width, dilation[0], dilation[1], filter_height, filter_width, stride[0], stride[1], padding[0], padding[2], col_height, col_width, im->data<T>()); } }; template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, float>; template class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, double>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, float>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, platform::CUDADeviceContext, double>; template <class T> __global__ void im2colOCF(const T* im_data, int im_channels, int im_height, int im_width, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* col_data) { int swid = blockIdx.x; int shid = blockIdx.y; for (int channelid = threadIdx.z; channelid < im_channels; channelid += blockDim.z) { for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { int width_offset = idx + swid * stride_width - padding_width; int height_offset = idy + shid * stride_height - padding_height; int im_offset = width_offset + height_offset * im_width + channelid * im_height * im_width; int col_offset = idx + idy * filter_width + channelid * filter_height * filter_width + (shid * col_width + swid) * (im_channels * filter_height * filter_width); col_data[col_offset] = (height_offset >= im_height || height_offset < 0 || width_offset >= im_width || width_offset < 0) ? T(0) : im_data[im_offset]; } } } } /* * im = [input_channels, input_height, input_width] * col = * [output_height, output_width, input_channels, filter_height, filter_width] */ template <class T> class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& im, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* col) { PADDLE_ENFORCE(im.dims().size() == 3); PADDLE_ENFORCE(col->dims().size() == 5); int im_channels = im.dims()[0]; int im_height = im.dims()[1]; int im_width = im.dims()[2]; int filter_height = col->dims()[3]; int filter_width = col->dims()[4]; int col_height = col->dims()[0]; int col_width = col->dims()[1]; int block_dim_x = 0; int block_dim_y = 0; if (filter_height <= 4 && filter_width <= 4) { block_dim_x = 4; block_dim_y = 4; } else if (filter_height <= 8 && filter_width <= 8) { block_dim_x = 8; block_dim_y = 8; } else if (filter_height <= 16 && filter_width <= 16) { block_dim_x = 16; block_dim_y = 16; } else { block_dim_x = 32; block_dim_y = 32; } int block_dim_z = 1024 / block_dim_x / block_dim_y; dim3 threads(block_dim_x, block_dim_y, std::min(block_dim_z, im_channels)); dim3 grid(col_width, col_height); im2colOCF<T><<<grid, threads, 0, context.stream()>>>( im.data<T>(), im_channels, im_height, im_width, filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, col->data<T>()); } }; template <class T> __global__ void col2imOCF(const T* col_data, int im_channels, int im_height, int im_width, int filter_height, int filter_width, int stride_height, int stride_width, int padding_height, int padding_width, int col_height, int col_width, T* im_data) { int swid = blockIdx.x; int shid = blockIdx.y; for (int channelid = threadIdx.z; channelid < im_channels; channelid += blockDim.z) { for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { int width_offset = idx + swid * stride_width - padding_width; int height_offset = idy + shid * stride_height - padding_height; int im_offset = width_offset + height_offset * im_width + channelid * im_height * im_width; int col_offset = idx + idy * filter_width + channelid * filter_height * filter_width + (shid * col_width + swid) * (im_channels * filter_height * filter_width); if (height_offset >= 0 && height_offset < im_height && width_offset >= 0 && width_offset < im_width) { paddle::platform::CudaAtomicAdd(im_data + im_offset, col_data[col_offset]); } } } } } /* * im = [input_channels, input_height, input_width] * col = * [output_height, output_width, input_channels, filter_height, filter_width] */ template <class T> class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& col, const std::vector<int>& dilation, const std::vector<int>& stride, const std::vector<int>& padding, framework::Tensor* im) { PADDLE_ENFORCE(im->dims().size() == 3); PADDLE_ENFORCE(col.dims().size() == 5); int im_channels = im->dims()[0]; int im_height = im->dims()[1]; int im_width = im->dims()[2]; int filter_height = col.dims()[3]; int filter_width = col.dims()[4]; int col_height = col.dims()[0]; int col_width = col.dims()[1]; PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - (dilation[0] * (filter_height - 1) + 1)) / stride[0] + 1, col_height, "Output_height and padding(padding_up, padding_down) are " "inconsistent."); PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - (dilation[1] * (filter_width - 1) + 1)) / stride[1] + 1, col_width, "col_width and padding(padding_left, padding_right) are " "inconsistent."); int block_dim_x = 0; int block_dim_y = 0; if (filter_height <= 4 && filter_width <= 4) { block_dim_x = 4; block_dim_y = 4; } else if (filter_height <= 8 && filter_width <= 8) { block_dim_x = 8; block_dim_y = 8; } else if (filter_height <= 16 && filter_width <= 16) { block_dim_x = 16; block_dim_y = 16; } else { block_dim_x = 32; block_dim_y = 32; } int block_dim_z = 1024 / block_dim_x / block_dim_y; dim3 threads(block_dim_x, block_dim_y, std::min(block_dim_z, im_channels)); dim3 grid(col_width, col_height); col2imOCF<T><<<grid, threads, 0, context.stream()>>>( col.data<T>(), im_channels, im_height, im_width, filter_height, filter_width, stride[0], stride[1], padding[0], padding[1], col_height, col_width, im->data<T>()); } }; template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, float>; template class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, double>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, float>; template class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
d92e800a84f20b522a76979e1b4b2e2aa1337975.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> int main(void){ int count; hipGetDeviceCount(&count); printf("There are %d GPU devices on this machine\n", count); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); printf( " --- General Information for device %d ---\n",0); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n",0); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n",0); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); return 0; }
d92e800a84f20b522a76979e1b4b2e2aa1337975.cu
#include <stdio.h> int main(void){ int count; cudaGetDeviceCount(&count); printf("There are %d GPU devices on this machine\n", count); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); printf( " --- General Information for device %d ---\n",0); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n",0); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n",0); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); return 0; }
fe52be4bded43f764fa6f0d8a20c32cf866ac5d7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <rocblas.h> #include <hip/hip_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { hipblasHandle_t cublasH = NULL; hipStream_t stream = NULL; /* * A = | 1.0 2.0 3.0 4.0 | */ const std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0}; const int incx = 1; data_type result = 0.0; data_type *d_A = nullptr; printf("A\n"); print_vector(A.size(), A.data()); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(hipblasCreate(&cublasH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUBLAS_CHECK(hipblasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(hipMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), hipMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(hipblasDasum(cublasH, A.size(), d_A, incx, &result)); CUDA_CHECK(hipStreamSynchronize(stream)); /* * result = 10.00 */ printf("result\n"); std::printf("%0.2f\n", result); printf("=====\n"); /* free resources */ CUDA_CHECK(hipFree(d_A)); CUBLAS_CHECK(hipblasDestroy(cublasH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
fe52be4bded43f764fa6f0d8a20c32cf866ac5d7.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cublas_v2.h> #include <cuda_runtime.h> #include "cublas_utils.h" using data_type = double; int main(int argc, char *argv[]) { cublasHandle_t cublasH = NULL; cudaStream_t stream = NULL; /* * A = | 1.0 2.0 3.0 4.0 | */ const std::vector<data_type> A = {1.0, 2.0, 3.0, 4.0}; const int incx = 1; data_type result = 0.0; data_type *d_A = nullptr; printf("A\n"); print_vector(A.size(), A.data()); printf("=====\n"); /* step 1: create cublas handle, bind a stream */ CUBLAS_CHECK(cublasCreate(&cublasH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUBLAS_CHECK(cublasSetStream(cublasH, stream)); /* step 2: copy data to device */ CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * A.size())); CUDA_CHECK(cudaMemcpyAsync(d_A, A.data(), sizeof(data_type) * A.size(), cudaMemcpyHostToDevice, stream)); /* step 3: compute */ CUBLAS_CHECK(cublasDasum(cublasH, A.size(), d_A, incx, &result)); CUDA_CHECK(cudaStreamSynchronize(stream)); /* * result = 10.00 */ printf("result\n"); std::printf("%0.2f\n", result); printf("=====\n"); /* free resources */ CUDA_CHECK(cudaFree(d_A)); CUBLAS_CHECK(cublasDestroy(cublasH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
c734af1aee5380b65e2c1c4e47e69aaa9bea38f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "utils/utils.h" __global__ void kernel_gflops(float* a, float* b, int N) { int row = threadIdx.y; int col = threadIdx.x; int idx = row * N + col; // ~TODO~ // Execute at least two floating point operations (e.g. +, -, *, /) // on the value from array a at index idx and // store the result in array b at index idx. // NOTE: Do not change value directly in array a. for(int i = 0; i < 1000; i++) { b[idx] = a[idx] * 3.f + a[idx] / 2.f + 1.f; } } int main(void) { int nDevices; // Get the number of CUDA-capable GPU(s) hipGetDeviceCount(&nDevices); hipSetDevice(0); float *device_a = 0; float *device_b = 0; float *host_a = 0; float *host_b = 0; int N = 4096; int size = N * N; // Arrays a and b are of size N * N host_a = (float *) malloc(size * sizeof(float)); host_b = (float *) malloc(size * sizeof(float)); hipMalloc((void **) &device_a, size * sizeof(float)); hipMalloc((void **) &device_b, size * sizeof(float)); if (host_a == 0 || host_b == 0 || device_a == 0 || device_b == 0) { printf("[HOST] Couldn't allocate memory\n"); return 1; } // Populate array a randomly fill_array_random(host_a, size); hipMemcpy(device_a, host_a, size * sizeof(float), hipMemcpyHostToDevice); hipEvent_t start, stop; // ~TODO~ // Create two cuda events (start and stop) // by using the hipEventCreate function. hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( kernel_gflops), dim3(size / 256), dim3(256), 0, 0, device_a, device_b, N); hipEventRecord(stop); hipEventSynchronize(stop); float ms = 0; hipEventElapsedTime(&ms, start, stop); float seconds = ms / pow((float) 10, 3); printf("SECONDS: %.3f\n", seconds); // ~TODO~ // Set num_ops to the number of floating point operations // done in the kernel multiplied with the size of the matrix. long num_ops = 4000 * (long)size; float gflops = (float) num_ops / seconds / 1e+9; printf("GFLOPS: %.3f\n", gflops); free(host_a); free(host_b); hipFree(device_a); hipFree(device_b); return 0; }
c734af1aee5380b65e2c1c4e47e69aaa9bea38f2.cu
#include <stdio.h> #include "utils/utils.h" __global__ void kernel_gflops(float* a, float* b, int N) { int row = threadIdx.y; int col = threadIdx.x; int idx = row * N + col; // ~TODO~ // Execute at least two floating point operations (e.g. +, -, *, /) // on the value from array a at index idx and // store the result in array b at index idx. // NOTE: Do not change value directly in array a. for(int i = 0; i < 1000; i++) { b[idx] = a[idx] * 3.f + a[idx] / 2.f + 1.f; } } int main(void) { int nDevices; // Get the number of CUDA-capable GPU(s) cudaGetDeviceCount(&nDevices); cudaSetDevice(0); float *device_a = 0; float *device_b = 0; float *host_a = 0; float *host_b = 0; int N = 4096; int size = N * N; // Arrays a and b are of size N * N host_a = (float *) malloc(size * sizeof(float)); host_b = (float *) malloc(size * sizeof(float)); cudaMalloc((void **) &device_a, size * sizeof(float)); cudaMalloc((void **) &device_b, size * sizeof(float)); if (host_a == 0 || host_b == 0 || device_a == 0 || device_b == 0) { printf("[HOST] Couldn't allocate memory\n"); return 1; } // Populate array a randomly fill_array_random(host_a, size); cudaMemcpy(device_a, host_a, size * sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; // ~TODO~ // Create two cuda events (start and stop) // by using the cudaEventCreate function. cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); kernel_gflops<<<size / 256, 256>>> (device_a, device_b, N); cudaEventRecord(stop); cudaEventSynchronize(stop); float ms = 0; cudaEventElapsedTime(&ms, start, stop); float seconds = ms / pow((float) 10, 3); printf("SECONDS: %.3f\n", seconds); // ~TODO~ // Set num_ops to the number of floating point operations // done in the kernel multiplied with the size of the matrix. long num_ops = 4000 * (long)size; float gflops = (float) num_ops / seconds / 1e+9; printf("GFLOPS: %.3f\n", gflops); free(host_a); free(host_b); cudaFree(device_a); cudaFree(device_b); return 0; }
833810c618042901790160c008bed2048c9ce03d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/activation/relu_layer.hpp" namespace caffe { template <typename Dtype> static __global__ void ReLUForward(const int n, const int negative_slope, const Dtype* in, bool* flag, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { flag[index] = in[index] > 0; out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } //template <typename Dtype> //static __global__ void ReLUForward_test(const int n, const int negative_slope, const Dtype* in, Dtype* out) //{ // CUDA_KERNEL_LOOP(index, n) // { // out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; // } //} template <typename Dtype> static __global__ void ReLUBackward(const int n, const int negative_slope, const Dtype* in_diff,const bool* flag, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { if (flag[index]) out_diff[index] = in_diff[index]; else out_diff[index] = in_diff[index] * negative_slope; } } template <typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); bool* flag_data = flag.mutable_gpu_data(); hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, negative_slope, bottom_data, flag_data,top_data); CUDA_POST_KERNEL_CHECK; //ReLUForward_test<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> //(count, negative_slope, bottom_data,top_data); //CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); const bool* flag_data = flag.gpu_data(); hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, negative_slope, top_diff, flag_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void ReLULayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const bool* flag_data = flag.gpu_data(); hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, negative_slope, bottom[0]->gpu_sec_diff(), flag_data, top[0]->mutable_gpu_sec_diff()); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
833810c618042901790160c008bed2048c9ce03d.cu
#include <algorithm> #include <vector> #include "caffe/layers/activation/relu_layer.hpp" namespace caffe { template <typename Dtype> static __global__ void ReLUForward(const int n, const int negative_slope, const Dtype* in, bool* flag, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { flag[index] = in[index] > 0; out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } //template <typename Dtype> //static __global__ void ReLUForward_test(const int n, const int negative_slope, const Dtype* in, Dtype* out) //{ // CUDA_KERNEL_LOOP(index, n) // { // out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; // } //} template <typename Dtype> static __global__ void ReLUBackward(const int n, const int negative_slope, const Dtype* in_diff,const bool* flag, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { if (flag[index]) out_diff[index] = in_diff[index]; else out_diff[index] = in_diff[index] * negative_slope; } } template <typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); bool* flag_data = flag.mutable_gpu_data(); ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (count, negative_slope, bottom_data, flag_data,top_data); CUDA_POST_KERNEL_CHECK; //ReLUForward_test<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> //(count, negative_slope, bottom_data,top_data); //CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); const bool* flag_data = flag.gpu_data(); ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (count, negative_slope, top_diff, flag_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void ReLULayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const bool* flag_data = flag.gpu_data(); ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (count, negative_slope, bottom[0]->gpu_sec_diff(), flag_data, top[0]->mutable_gpu_sec_diff()); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
a43c64c339fb9ea0f09eb4e6b985284e070c7fda.hip
// !!! This is a file automatically generated by hipify!!! /* * Implementing the FFT algorithm for general input * Input should be fp32 vectors with size equals to the power of 4 * Number of vectors is given by BATCH (B) * Recursive algorithm * Base case is fft4 * Combine all components in one file */ // C includes #include <stdio.h> #include <assert.h> #include <math.h> #include <stdlib.h> #include <string.h> // CUDA includes #include <hip/hip_runtime.h> #include <rocblas.h> #include <hip/hip_fp16.h> #include "nvidia_helper/checkCudaErrors.h" // Matrix and vector #include "helper/my_vector.h" #include "helper/my_matrix.h" #include "helper/my_const.h" #define PI 3.14159265 #define EPS 0.0000001192f const float UPPER_BOUND = 1.0f; const int BATCH = 16; const int SIZE = 256; // Utility function declaration __global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B); FFT_S init_F4(); __global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B); FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im); __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im); FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im); // Global variables fft::MatrixH F4_re; fft::MatrixH F4_im; float* buffer; float* X_temp; int main() { int mem_size; // Set device heap size hipDeviceSetLimit(hipLimitMallocHeapSize, 1024 * 1024 * 64); // allocate unified memory for input matrix fft::MatrixF input_re; input_re.width = BATCH; input_re.height = SIZE; mem_size = input_re.width * input_re.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(input_re.array), mem_size)); fft::MatrixF input_im; input_im.width = BATCH; input_im.height = SIZE; mem_size = input_im.width * input_im.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(input_im.array), mem_size)); // Initialize the input matrix srand(time(NULL)); printf("The input is: \n"); for (int j = 1; j <= BATCH; j++){ printf("Vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_re.element(i, j) = (float)i; input_im.element(i, j) = 0.0f; printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j)); } printf("\n"); } // allocate unified memory for output matrix fft::MatrixF output_re; output_re.width = BATCH; output_re.height = SIZE; mem_size = output_re.width * output_re.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(output_re.array), mem_size)); fft::MatrixF output_im; output_im.width = BATCH; output_im.height = SIZE; mem_size = output_im.width * output_im.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(output_im.array), mem_size)); // allocate unified memory for the buffer (array of float) mem_size = SIZE * BATCH * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &buffer, mem_size)); checkCudaErrors(hipMallocManaged((void **) &X_temp, mem_size)); FFT_S status; // Initialize Fourier matrix status = init_F4(); if (status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n"); return FFT_FAILURE; } // Call gfft function status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im); if (status != FFT_SUCCESS){ printf("Error in running fft algorithm\n"); exit(1); } printf("Result: \n"); for (int j = 1; j <= BATCH; j++){ printf("Resulting vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j)); } } checkCudaErrors(hipFree(input_re.array)); checkCudaErrors(hipFree(input_im.array)); checkCudaErrors(hipFree(output_re.array)); checkCudaErrors(hipFree(output_im.array)); return 0; } __global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B, float* Xtemp) { /* * fft::MatrixF X (N*B), fft::MatrixH Xhi (N*B), fft::MatrixH Xlo (N*B) * fft::VectorF s1, fft::VectorF s2 * int N, int B. N is always 4 * Grid and dim size should be 1D, total size = B * All data should be in unified memory or device memory * */ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < B){ // Calculate scaling factor 1 float scale1 = 0.0f; for (int i = 0; i < N; i++){ float norm = (float) fabs(X[i + idx * N]); if (norm > scale1) scale1 = norm; } // If all number are zero, skip if (scale1 == 0.0f){ s1[idx] = 0.0f; s2[idx] = 0.0f; for (int i = 0; i < N; i++){ Xhi[i + idx * N] = Xlo[i + idx * N] = 0.0f; } } else { // Restrict scale range if (scale1 < EPS) scale1 = EPS; if (scale1 > 1.0f/EPS) scale1 = 1.0f/EPS; s1[idx] = scale1; // Scale the high half for (int i = 0; i < N; i++){ Xtemp[i + idx * N] = X[i + idx * N]/scale1; Xhi[i + idx * N] = (half)(Xtemp[i + idx * N]); // Use Xtemp to store the residual Xtemp[i + idx * N] = X[i + idx * N] - scale1 * (float)(Xhi[i + idx * N]); } // Calculate the lower scaling factor float scale2 = 0.0f; for (int i = 0; i < N; i++){ float norm = (float) fabs(Xtemp[i + idx * N]); if (norm > scale2) scale2 = norm; } // If all number are zero, skip if (scale2 == 0.0f){ s2[idx] = 0.0f; for (int i = 0; i < N; i++){ Xlo[i + idx * N] = 0.0f; } } else { // Restrict scale range if (scale2 < EPS) scale2 = EPS; if (scale2 > 1.0f/EPS) scale2 = 1.0f/EPS; s2[idx] = scale2; for (int i = 0; i < N; i++){ Xlo[i + idx * N] = (half) (Xtemp[i + idx * N] / scale2); } } } } } FFT_S init_F4() { // Allocate unified memory for Fourier Matrix int mem_size; F4_re.width = 4; F4_re.height = 4; mem_size = F4_re.width * F4_re.height * sizeof(half); checkCudaErrors(hipMallocManaged((void **) &(F4_re.array), mem_size)); F4_im.width = 4; F4_im.height = 4; mem_size = F4_im.width * F4_im.height * sizeof(half); checkCudaErrors(hipMallocManaged((void **) &(F4_im.array), mem_size)); F4_re.element(1, 1) = 1.0f; F4_re.element(2, 1) = 1.0f; F4_re.element(3, 1) = 1.0f; F4_re.element(4, 1) = 1.0f; F4_re.element(1, 2) = 1.0f; F4_re.element(2, 2) = 0.0f; F4_re.element(3, 2) =-1.0f; F4_re.element(4, 2) = 0.0f; F4_re.element(1, 3) = 1.0f; F4_re.element(2, 3) =-1.0f; F4_re.element(3, 3) = 1.0f; F4_re.element(4, 3) =-1.0f; F4_re.element(1, 4) = 1.0f; F4_re.element(2, 4) = 0.0f; F4_re.element(3, 4) =-1.0f; F4_re.element(4, 4) = 0.0f; F4_im.element(1, 1) = 0.0f; F4_im.element(2, 1) = 0.0f; F4_im.element(3, 1) = 0.0f; F4_im.element(4, 1) = 0.0f; F4_im.element(1, 2) = 0.0f; F4_im.element(2, 2) =-1.0f; F4_im.element(3, 2) = 0.0f; F4_im.element(4, 2) = 1.0f; F4_im.element(1, 3) = 0.0f; F4_im.element(2, 3) = 0.0f; F4_im.element(3, 3) = 0.0f; F4_im.element(4, 3) = 0.0f; F4_im.element(1, 4) = 0.0f; F4_im.element(2, 4) = 1.0f; F4_im.element(3, 4) = 0.0f; F4_im.element(4, 4) =-1.0f; return FFT_SUCCESS; } __global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B) { /* * N is number of elements (always 4) * X1, X2 are 4 * (B * 4) column-major matrix. Inner order is by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo * alpha is B * 4 array. Inner order is by batch. Outer order is re_s1, re_s2, im_s1, im_s2 * R1, R2 are 4 * B matrix * B is batch size * */ int i = blockIdx.y * blockDim.y + threadIdx.y; // row number int j = blockIdx.x * blockDim.x + threadIdx.x; // column number if (i < N && j < B){ R1[i + j * N] = R2[i + j * N] = 0.0f; R1[i + j * N] += alpha[j] * X1[i + j * N]; R1[i + j * N] += alpha[j + B] * X1[i + j * N + N * B]; R1[i + j * N] += -1.0f * alpha[j + 2*B] * X2[i + j * N + N * 2 * B]; R1[i + j * N] += -1.0f * alpha[j + 3*B] * X2[i + j * N + N * 3 * B]; R2[i + j * N] += alpha[j] * X2[i + j * N]; R2[i + j * N] += alpha[j + B] * X2[i + j * N + N * B]; R2[i + j * N] += alpha[j + 2*B] * X1[i + j * N + N * 2 * B]; R2[i + j * N] += alpha[j + 3*B] * X1[i + j * N + N * 3 * B]; } } FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im) { // Variable declaration hipblasStatus_t status; hipblasHandle_t handle; //// Unified variables float *scales; // = *re_s1, *re_s2, *im_s1, *im_s2; half *X_split; // = *X_re_hi, *X_re_lo, *X_im_hi, *X_im_lo; float *result1, *result2; // Store the intermediate result //// Scaling variables float alpha = 1.0f, beta = 0.0f; // Initialize cublas status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return FFT_FAILURE; } status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); // allow Tensor Core if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS setting math mode error\n"); return FFT_FAILURE; } // Allocate unified memory with 0 initialization checkCudaErrors(hipMallocManaged((void **) &scales, B * 4 * sizeof(float))); checkCudaErrors(hipMemset(scales, 0.0f, B * 4 * sizeof(float))); checkCudaErrors(hipMallocManaged((void **) &X_split, 4 * B * 4 * sizeof(half))); checkCudaErrors(hipMemset(X_split, 0.0f, 4 * B * 4 * sizeof(half))); checkCudaErrors(hipMallocManaged((void **) &result1, 4 * B * 4 * sizeof(result1[0]))); checkCudaErrors(hipMemset(result1, 0.0f, 4 * B * 4 * sizeof(result1[0]))); checkCudaErrors(hipMallocManaged((void **) &result2, 4 * 4 * sizeof(result2[0]))); checkCudaErrors(hipMemset(result2, 0.0f, 4 * B * 4 * sizeof(result2[0]))); // Split input //// Initialize Matrix and Vector data structure to store split result fft::MatrixH X_re_hi; X_re_hi.width = B; X_re_hi.height = 4; X_re_hi.array = X_split + 4 * B * 0; fft::MatrixH X_re_lo; X_re_lo.width = B; X_re_lo.height = 4; X_re_lo.array = X_split + 4 * B * 1; fft::MatrixH X_im_hi; X_im_hi.width = B; X_im_hi.height = 4; X_im_hi.array = X_split + 4 * B * 2; fft::MatrixH X_im_lo; X_im_lo.width = B; X_im_lo.height = 4; X_im_lo.array = X_split + 4 * B * 3; fft::VectorF re_s1; re_s1.size = B; re_s1.array = scales + B * 0; fft::VectorF re_s2; re_s2.size = B; re_s2.array = scales + B * 1; fft::VectorF im_s1; im_s1.size = B; im_s1.array = scales + B * 2; fft::VectorF im_s2; im_s2.size = B; im_s2.array = scales + B * 3; //// Call splitting function int numThreads = 64; int numBlocks = (B + 63) / 64; hipLaunchKernelGGL(( mySplit), dim3(numBlocks), dim3(numThreads), 0, 0, X_re.array, X_re_hi.array, X_re_lo.array, re_s1.array, re_s2.array, 4, B, X_temp); hipLaunchKernelGGL(( mySplit), dim3(numBlocks), dim3(numThreads), 0, 0, X_im.array, X_im_hi.array, X_im_lo.array, im_s1.array, im_s2.array, 4, B, X_temp); hipError_t cerror = hipGetLastError(); if (cerror != hipSuccess) { printf("CUDA error: %s during splitting\n", hipGetErrorString(cerror)); return FFT_FAILURE; } // Call cublas function and finish Matrix multiplication calculation //// Call cublas gemm on F4_re status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B * 4, 4, &alpha, F4_re.array, HIP_R_16F, 4, X_split, HIP_R_16F, 4, &beta, result1, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (a * (c, d)).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_im status = hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, 4, B * 4, 4, &alpha, F4_im.array, HIP_R_16F, 4, X_split, HIP_R_16F, 4, &beta, result2, HIP_R_32F, 4, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (b * (c, d)).\n"); return FFT_FAILURE; } // Scale, combine and get result, add to output //// Set grid and block size dim3 threadsPerBlock(16, 4); dim3 BlocksPerGrid((B+15)/16, 1); //// call kernel function (buffer is zero-initialized inside) hipLaunchKernelGGL(( myAccumulate), dim3(BlocksPerGrid), dim3(threadsPerBlock), 0, 0, 4, result1, result2, scales, FX_re.array, FX_im.array, B); // Deallocate unified memory if (hipFree(scales) != hipSuccess) { fprintf(stderr, "!!!! unified memory free error (free scales vector)\n"); return FFT_FAILURE; } if (hipFree(X_split) != hipSuccess) { fprintf(stderr, "!!!! unified memory free error (free split result matrix)\n"); return FFT_FAILURE; } if (hipFree(result1) != hipSuccess) { fprintf(stderr, "!!!! unified memory free error (free result 1 Matrix)\n"); return FFT_FAILURE; } if (hipFree(result2) != hipSuccess) { fprintf(stderr, "!!!! unified memory free error (free result 2 Matrix)\n"); return FFT_FAILURE; } // Shutdown cublas status = hipblasDestroy(handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return FFT_FAILURE; } hipDeviceSynchronize(); return FFT_SUCCESS; } __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im) { /* * Multifly every element of the input matrix with twiddle factor * Block and thread layout should be 2D * Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j) * Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j) * */ // Calculate position (0 based) int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < m && j < n){ // Per-thread local variables int index = j * m + i; float tw_re = cos(2 * PI / N * i * j); float tw_im = sin(2 * PI / N * i * j); float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im; float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re; matrix_re[index] = result_re; matrix_im[index] = result_im; } } FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im) { FFT_S fft_status; if (N == 4) { return fft4(B, X_re, X_im, FX_re, FX_im); } // cublas variable declaration hipblasStatus_t status; hipblasHandle_t handle; // Scaling variables float alpha = 1.0f, beta = 0.0f; // Temporary variables for intermediate result swapping float* temp; // Initialize cublas status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return FFT_FAILURE; } // Reshape the output matrix: (N -(Reshape)->4*(N/4)) * B FX_re.width = N / 4 * B; FX_re.height = 4; FX_im.width = N / 4 * B; FX_im.height = 4; // Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, X_re.array + j * N, 4, &beta, X_re.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose real input).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension (Note that the transpose happens batch-wisely) FX_re.height = N / 4; FX_re.width = B * 4; //// Imaginary for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, X_im.array + j * N, 4, &beta, X_im.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose imaginary input).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = N / 4; FX_im.width = B * 4; hipDeviceSynchronize(); // Recursively call gfft function, not! using buffer matrix //// Call gfft, store result in buffer matrix fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n"); return FFT_FAILURE; } // Multiplication with twiddle factors //// Set grid and block size dim3 threadsPerBlock(4, 16); dim3 numBlocks(1, (N + 63)/64); // Make sure blocks are enough //// Call kernel function for (int j = 0; j < B; j++){ hipLaunchKernelGGL(( multiply_twiddle), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, N, N/4, 4, FX_re.array + j * N, FX_im.array + j * N); } hipDeviceSynchronize(); // Transpose the matrix again // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 4, N/4, &alpha, FX_re.array + j * N, N/4, &beta, FX_re.array + j * N, N/4, buffer + j * N, 4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension, note that the transpose happens per batch FX_re.height = 4; FX_re.width = N / 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 4, N/4, &alpha, FX_im.array + j * N, N/4, &beta, FX_im.array + j * N, N/4, buffer + j * N, 4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = 4; FX_im.width = N / 4 * B; hipDeviceSynchronize(); // Call fft4, not! using buffer matrix //// Call fft4, store result in buffer matrix fft_status = fft4(N / 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (combine step calling fft4).\n"); return FFT_FAILURE; } // Do the final transpose to get the output //// Real matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, FX_re.array + j * N, 4, &beta, FX_re.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, FX_im.array + j * N, 4, &beta, FX_im.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; hipDeviceSynchronize(); // Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B FX_re.width = B; FX_re.height = N; FX_im.width = B; FX_im.height = N; // Shutdown cublas status = hipblasDestroy(handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return FFT_FAILURE; } return FFT_SUCCESS; }
a43c64c339fb9ea0f09eb4e6b985284e070c7fda.cu
/* * Implementing the FFT algorithm for general input * Input should be fp32 vectors with size equals to the power of 4 * Number of vectors is given by BATCH (B) * Recursive algorithm * Base case is fft4 * Combine all components in one file */ // C includes #include <stdio.h> #include <assert.h> #include <math.h> #include <stdlib.h> #include <string.h> // CUDA includes #include <cuda_runtime.h> #include <cublas_v2.h> #include <cuda_fp16.h> #include "nvidia_helper/checkCudaErrors.h" // Matrix and vector #include "helper/my_vector.h" #include "helper/my_matrix.h" #include "helper/my_const.h" #define PI 3.14159265 #define EPS 0.0000001192f const float UPPER_BOUND = 1.0f; const int BATCH = 16; const int SIZE = 256; // Utility function declaration __global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B); FFT_S init_F4(); __global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B); FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im); __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im); FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im); // Global variables fft::MatrixH F4_re; fft::MatrixH F4_im; float* buffer; float* X_temp; int main() { int mem_size; // Set device heap size cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024 * 1024 * 64); // allocate unified memory for input matrix fft::MatrixF input_re; input_re.width = BATCH; input_re.height = SIZE; mem_size = input_re.width * input_re.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(input_re.array), mem_size)); fft::MatrixF input_im; input_im.width = BATCH; input_im.height = SIZE; mem_size = input_im.width * input_im.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(input_im.array), mem_size)); // Initialize the input matrix srand(time(NULL)); printf("The input is: \n"); for (int j = 1; j <= BATCH; j++){ printf("Vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_re.element(i, j) = (float)i; input_im.element(i, j) = 0.0f; printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j)); } printf("\n"); } // allocate unified memory for output matrix fft::MatrixF output_re; output_re.width = BATCH; output_re.height = SIZE; mem_size = output_re.width * output_re.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(output_re.array), mem_size)); fft::MatrixF output_im; output_im.width = BATCH; output_im.height = SIZE; mem_size = output_im.width * output_im.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(output_im.array), mem_size)); // allocate unified memory for the buffer (array of float) mem_size = SIZE * BATCH * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &buffer, mem_size)); checkCudaErrors(cudaMallocManaged((void **) &X_temp, mem_size)); FFT_S status; // Initialize Fourier matrix status = init_F4(); if (status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n"); return FFT_FAILURE; } // Call gfft function status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im); if (status != FFT_SUCCESS){ printf("Error in running fft algorithm\n"); exit(1); } printf("Result: \n"); for (int j = 1; j <= BATCH; j++){ printf("Resulting vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j)); } } checkCudaErrors(cudaFree(input_re.array)); checkCudaErrors(cudaFree(input_im.array)); checkCudaErrors(cudaFree(output_re.array)); checkCudaErrors(cudaFree(output_im.array)); return 0; } __global__ void mySplit(float* X, half* Xhi, half* Xlo, float* s1, float* s2, int N, int B, float* Xtemp) { /* * fft::MatrixF X (N*B), fft::MatrixH Xhi (N*B), fft::MatrixH Xlo (N*B) * fft::VectorF s1, fft::VectorF s2 * int N, int B. N is always 4 * Grid and dim size should be 1D, total size = B * All data should be in unified memory or device memory * */ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < B){ // Calculate scaling factor 1 float scale1 = 0.0f; for (int i = 0; i < N; i++){ float norm = (float) fabs(X[i + idx * N]); if (norm > scale1) scale1 = norm; } // If all number are zero, skip if (scale1 == 0.0f){ s1[idx] = 0.0f; s2[idx] = 0.0f; for (int i = 0; i < N; i++){ Xhi[i + idx * N] = Xlo[i + idx * N] = 0.0f; } } else { // Restrict scale range if (scale1 < EPS) scale1 = EPS; if (scale1 > 1.0f/EPS) scale1 = 1.0f/EPS; s1[idx] = scale1; // Scale the high half for (int i = 0; i < N; i++){ Xtemp[i + idx * N] = X[i + idx * N]/scale1; Xhi[i + idx * N] = (half)(Xtemp[i + idx * N]); // Use Xtemp to store the residual Xtemp[i + idx * N] = X[i + idx * N] - scale1 * (float)(Xhi[i + idx * N]); } // Calculate the lower scaling factor float scale2 = 0.0f; for (int i = 0; i < N; i++){ float norm = (float) fabs(Xtemp[i + idx * N]); if (norm > scale2) scale2 = norm; } // If all number are zero, skip if (scale2 == 0.0f){ s2[idx] = 0.0f; for (int i = 0; i < N; i++){ Xlo[i + idx * N] = 0.0f; } } else { // Restrict scale range if (scale2 < EPS) scale2 = EPS; if (scale2 > 1.0f/EPS) scale2 = 1.0f/EPS; s2[idx] = scale2; for (int i = 0; i < N; i++){ Xlo[i + idx * N] = (half) (Xtemp[i + idx * N] / scale2); } } } } } FFT_S init_F4() { // Allocate unified memory for Fourier Matrix int mem_size; F4_re.width = 4; F4_re.height = 4; mem_size = F4_re.width * F4_re.height * sizeof(half); checkCudaErrors(cudaMallocManaged((void **) &(F4_re.array), mem_size)); F4_im.width = 4; F4_im.height = 4; mem_size = F4_im.width * F4_im.height * sizeof(half); checkCudaErrors(cudaMallocManaged((void **) &(F4_im.array), mem_size)); F4_re.element(1, 1) = 1.0f; F4_re.element(2, 1) = 1.0f; F4_re.element(3, 1) = 1.0f; F4_re.element(4, 1) = 1.0f; F4_re.element(1, 2) = 1.0f; F4_re.element(2, 2) = 0.0f; F4_re.element(3, 2) =-1.0f; F4_re.element(4, 2) = 0.0f; F4_re.element(1, 3) = 1.0f; F4_re.element(2, 3) =-1.0f; F4_re.element(3, 3) = 1.0f; F4_re.element(4, 3) =-1.0f; F4_re.element(1, 4) = 1.0f; F4_re.element(2, 4) = 0.0f; F4_re.element(3, 4) =-1.0f; F4_re.element(4, 4) = 0.0f; F4_im.element(1, 1) = 0.0f; F4_im.element(2, 1) = 0.0f; F4_im.element(3, 1) = 0.0f; F4_im.element(4, 1) = 0.0f; F4_im.element(1, 2) = 0.0f; F4_im.element(2, 2) =-1.0f; F4_im.element(3, 2) = 0.0f; F4_im.element(4, 2) = 1.0f; F4_im.element(1, 3) = 0.0f; F4_im.element(2, 3) = 0.0f; F4_im.element(3, 3) = 0.0f; F4_im.element(4, 3) = 0.0f; F4_im.element(1, 4) = 0.0f; F4_im.element(2, 4) = 1.0f; F4_im.element(3, 4) = 0.0f; F4_im.element(4, 4) =-1.0f; return FFT_SUCCESS; } __global__ void myAccumulate(int N, float* X1, float* X2, float* alpha, float* R1, float* R2, int B) { /* * N is number of elements (always 4) * X1, X2 are 4 * (B * 4) column-major matrix. Inner order is by batch. Outer order is Re_hi, Re_lo, Im_hi, Im_lo * alpha is B * 4 array. Inner order is by batch. Outer order is re_s1, re_s2, im_s1, im_s2 * R1, R2 are 4 * B matrix * B is batch size * */ int i = blockIdx.y * blockDim.y + threadIdx.y; // row number int j = blockIdx.x * blockDim.x + threadIdx.x; // column number if (i < N && j < B){ R1[i + j * N] = R2[i + j * N] = 0.0f; R1[i + j * N] += alpha[j] * X1[i + j * N]; R1[i + j * N] += alpha[j + B] * X1[i + j * N + N * B]; R1[i + j * N] += -1.0f * alpha[j + 2*B] * X2[i + j * N + N * 2 * B]; R1[i + j * N] += -1.0f * alpha[j + 3*B] * X2[i + j * N + N * 3 * B]; R2[i + j * N] += alpha[j] * X2[i + j * N]; R2[i + j * N] += alpha[j + B] * X2[i + j * N + N * B]; R2[i + j * N] += alpha[j + 2*B] * X1[i + j * N + N * 2 * B]; R2[i + j * N] += alpha[j + 3*B] * X1[i + j * N + N * 3 * B]; } } FFT_S fft4(int B, fft::MatrixF X_re, fft::MatrixF X_im, fft::MatrixF FX_re, fft::MatrixF FX_im) { // Variable declaration cublasStatus_t status; cublasHandle_t handle; //// Unified variables float *scales; // = *re_s1, *re_s2, *im_s1, *im_s2; half *X_split; // = *X_re_hi, *X_re_lo, *X_im_hi, *X_im_lo; float *result1, *result2; // Store the intermediate result //// Scaling variables float alpha = 1.0f, beta = 0.0f; // Initialize cublas status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return FFT_FAILURE; } status = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); // allow Tensor Core if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS setting math mode error\n"); return FFT_FAILURE; } // Allocate unified memory with 0 initialization checkCudaErrors(cudaMallocManaged((void **) &scales, B * 4 * sizeof(float))); checkCudaErrors(cudaMemset(scales, 0.0f, B * 4 * sizeof(float))); checkCudaErrors(cudaMallocManaged((void **) &X_split, 4 * B * 4 * sizeof(half))); checkCudaErrors(cudaMemset(X_split, 0.0f, 4 * B * 4 * sizeof(half))); checkCudaErrors(cudaMallocManaged((void **) &result1, 4 * B * 4 * sizeof(result1[0]))); checkCudaErrors(cudaMemset(result1, 0.0f, 4 * B * 4 * sizeof(result1[0]))); checkCudaErrors(cudaMallocManaged((void **) &result2, 4 * 4 * sizeof(result2[0]))); checkCudaErrors(cudaMemset(result2, 0.0f, 4 * B * 4 * sizeof(result2[0]))); // Split input //// Initialize Matrix and Vector data structure to store split result fft::MatrixH X_re_hi; X_re_hi.width = B; X_re_hi.height = 4; X_re_hi.array = X_split + 4 * B * 0; fft::MatrixH X_re_lo; X_re_lo.width = B; X_re_lo.height = 4; X_re_lo.array = X_split + 4 * B * 1; fft::MatrixH X_im_hi; X_im_hi.width = B; X_im_hi.height = 4; X_im_hi.array = X_split + 4 * B * 2; fft::MatrixH X_im_lo; X_im_lo.width = B; X_im_lo.height = 4; X_im_lo.array = X_split + 4 * B * 3; fft::VectorF re_s1; re_s1.size = B; re_s1.array = scales + B * 0; fft::VectorF re_s2; re_s2.size = B; re_s2.array = scales + B * 1; fft::VectorF im_s1; im_s1.size = B; im_s1.array = scales + B * 2; fft::VectorF im_s2; im_s2.size = B; im_s2.array = scales + B * 3; //// Call splitting function int numThreads = 64; int numBlocks = (B + 63) / 64; mySplit<<<numBlocks, numThreads>>>(X_re.array, X_re_hi.array, X_re_lo.array, re_s1.array, re_s2.array, 4, B, X_temp); mySplit<<<numBlocks, numThreads>>>(X_im.array, X_im_hi.array, X_im_lo.array, im_s1.array, im_s2.array, 4, B, X_temp); cudaError_t cerror = cudaGetLastError(); if (cerror != cudaSuccess) { printf("CUDA error: %s during splitting\n", cudaGetErrorString(cerror)); return FFT_FAILURE; } // Call cublas function and finish Matrix multiplication calculation //// Call cublas gemm on F4_re status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B * 4, 4, &alpha, F4_re.array, CUDA_R_16F, 4, X_split, CUDA_R_16F, 4, &beta, result1, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (a * (c, d)).\n"); return FFT_FAILURE; } //// Call cublas gemm on F4_im status = cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, 4, B * 4, 4, &alpha, F4_im.array, CUDA_R_16F, 4, X_split, CUDA_R_16F, 4, &beta, result2, CUDA_R_32F, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (b * (c, d)).\n"); return FFT_FAILURE; } // Scale, combine and get result, add to output //// Set grid and block size dim3 threadsPerBlock(16, 4); dim3 BlocksPerGrid((B+15)/16, 1); //// call kernel function (buffer is zero-initialized inside) myAccumulate<<<BlocksPerGrid, threadsPerBlock>>>(4, result1, result2, scales, FX_re.array, FX_im.array, B); // Deallocate unified memory if (cudaFree(scales) != cudaSuccess) { fprintf(stderr, "!!!! unified memory free error (free scales vector)\n"); return FFT_FAILURE; } if (cudaFree(X_split) != cudaSuccess) { fprintf(stderr, "!!!! unified memory free error (free split result matrix)\n"); return FFT_FAILURE; } if (cudaFree(result1) != cudaSuccess) { fprintf(stderr, "!!!! unified memory free error (free result 1 Matrix)\n"); return FFT_FAILURE; } if (cudaFree(result2) != cudaSuccess) { fprintf(stderr, "!!!! unified memory free error (free result 2 Matrix)\n"); return FFT_FAILURE; } // Shutdown cublas status = cublasDestroy(handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return FFT_FAILURE; } cudaDeviceSynchronize(); return FFT_SUCCESS; } __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im) { /* * Multifly every element of the input matrix with twiddle factor * Block and thread layout should be 2D * Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j) * Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j) * */ // Calculate position (0 based) int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < m && j < n){ // Per-thread local variables int index = j * m + i; float tw_re = cos(2 * PI / N * i * j); float tw_im = sin(2 * PI / N * i * j); float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im; float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re; matrix_re[index] = result_re; matrix_im[index] = result_im; } } FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im) { FFT_S fft_status; if (N == 4) { return fft4(B, X_re, X_im, FX_re, FX_im); } // cublas variable declaration cublasStatus_t status; cublasHandle_t handle; // Scaling variables float alpha = 1.0f, beta = 0.0f; // Temporary variables for intermediate result swapping float* temp; // Initialize cublas status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return FFT_FAILURE; } // Reshape the output matrix: (N -(Reshape)->4*(N/4)) * B FX_re.width = N / 4 * B; FX_re.height = 4; FX_im.width = N / 4 * B; FX_im.height = 4; // Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, X_re.array + j * N, 4, &beta, X_re.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose real input).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension (Note that the transpose happens batch-wisely) FX_re.height = N / 4; FX_re.width = B * 4; //// Imaginary for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, X_im.array + j * N, 4, &beta, X_im.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose imaginary input).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = N / 4; FX_im.width = B * 4; cudaDeviceSynchronize(); // Recursively call gfft function, not! using buffer matrix //// Call gfft, store result in buffer matrix fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n"); return FFT_FAILURE; } // Multiplication with twiddle factors //// Set grid and block size dim3 threadsPerBlock(4, 16); dim3 numBlocks(1, (N + 63)/64); // Make sure blocks are enough //// Call kernel function for (int j = 0; j < B; j++){ multiply_twiddle<<<numBlocks, threadsPerBlock>>>(N, N/4, 4, FX_re.array + j * N, FX_im.array + j * N); } cudaDeviceSynchronize(); // Transpose the matrix again // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, 4, N/4, &alpha, FX_re.array + j * N, N/4, &beta, FX_re.array + j * N, N/4, buffer + j * N, 4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension, note that the transpose happens per batch FX_re.height = 4; FX_re.width = N / 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, 4, N/4, &alpha, FX_im.array + j * N, N/4, &beta, FX_im.array + j * N, N/4, buffer + j * N, 4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = 4; FX_im.width = N / 4 * B; cudaDeviceSynchronize(); // Call fft4, not! using buffer matrix //// Call fft4, store result in buffer matrix fft_status = fft4(N / 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (combine step calling fft4).\n"); return FFT_FAILURE; } // Do the final transpose to get the output //// Real matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, FX_re.array + j * N, 4, &beta, FX_re.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, FX_im.array + j * N, 4, &beta, FX_im.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; cudaDeviceSynchronize(); // Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B FX_re.width = B; FX_re.height = N; FX_im.width = B; FX_im.height = N; // Shutdown cublas status = cublasDestroy(handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return FFT_FAILURE; } return FFT_SUCCESS; }
f3bc94ec0746d20cabe380cdd554c3469e9b4af6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while(0) #define Mask_width 5 #define Mask_radius Mask_width/2 //@@ INSERT CODE HERE #define OTILE_SIZE 12 #define ITILE_SIZE (OTILE_SIZE+(Mask_width-1)) #define NCHANNELS 3 __global__ void conv2D(float *img, float *out, const float *__restrict__ mask, int W, int H) { int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int ro = blockIdx.y*OTILE_SIZE + ty; int co = blockIdx.x*OTILE_SIZE + tx; int ri = ro - Mask_radius; int ci = co - Mask_radius; int i,j; float acc = 0.0f; __shared__ float iTile[ITILE_SIZE*ITILE_SIZE*NCHANNELS]; if(ri >= 0 && ci >= 0 && ri < H && ci < W){ iTile[(ty*ITILE_SIZE + tx)*NCHANNELS + tz] = img[(ri*W + ci)*NCHANNELS + tz]; } else{ iTile[(ty*ITILE_SIZE + tx)*NCHANNELS + tz] = 0.0f; } __syncthreads(); if( tx < OTILE_SIZE && ty < OTILE_SIZE){ for(i=0; i<Mask_width; i++){ for(j=0; j<Mask_width; j++){ acc += ( mask[i*Mask_width + j]*iTile[((ty+i)*ITILE_SIZE + tx + j)*NCHANNELS + tz] ); } } if(ro < H && co < W){ out[(ro*W + co)*NCHANNELS + tz] = acc; } } } int main(int argc, char* argv[]) { wbArg_t args; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char * inputImageFile; char * inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; float * hostMaskData; float * deviceInputImageData; float * deviceOutputImageData; float * deviceMaskData; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); inputMaskFile = wbArg_getInputFile(args, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); assert(imageChannels == NCHANNELS); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); hipMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); hipMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), hipMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE dim3 numThreads(ITILE_SIZE, ITILE_SIZE, NCHANNELS); dim3 numBlocks( ((imageWidth-1)/OTILE_SIZE)+1, ((imageHeight-1)/OTILE_SIZE)+1, 1); hipLaunchKernelGGL(( conv2D), dim3(numBlocks), dim3(numThreads) , 0, 0, deviceInputImageData, deviceOutputImageData, deviceMaskData, imageWidth, imageHeight); hipDeviceSynchronize(); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); hipMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(args, outputImage); hipFree(deviceInputImageData); hipFree(deviceOutputImageData); hipFree(deviceMaskData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
f3bc94ec0746d20cabe380cdd554c3469e9b4af6.cu
#include <wb.h> #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while(0) #define Mask_width 5 #define Mask_radius Mask_width/2 //@@ INSERT CODE HERE #define OTILE_SIZE 12 #define ITILE_SIZE (OTILE_SIZE+(Mask_width-1)) #define NCHANNELS 3 __global__ void conv2D(float *img, float *out, const float *__restrict__ mask, int W, int H) { int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int ro = blockIdx.y*OTILE_SIZE + ty; int co = blockIdx.x*OTILE_SIZE + tx; int ri = ro - Mask_radius; int ci = co - Mask_radius; int i,j; float acc = 0.0f; __shared__ float iTile[ITILE_SIZE*ITILE_SIZE*NCHANNELS]; if(ri >= 0 && ci >= 0 && ri < H && ci < W){ iTile[(ty*ITILE_SIZE + tx)*NCHANNELS + tz] = img[(ri*W + ci)*NCHANNELS + tz]; } else{ iTile[(ty*ITILE_SIZE + tx)*NCHANNELS + tz] = 0.0f; } __syncthreads(); if( tx < OTILE_SIZE && ty < OTILE_SIZE){ for(i=0; i<Mask_width; i++){ for(j=0; j<Mask_width; j++){ acc += ( mask[i*Mask_width + j]*iTile[((ty+i)*ITILE_SIZE + tx + j)*NCHANNELS + tz] ); } } if(ro < H && co < W){ out[(ro*W + co)*NCHANNELS + tz] = acc; } } } int main(int argc, char* argv[]) { wbArg_t args; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char * inputImageFile; char * inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float * hostInputImageData; float * hostOutputImageData; float * hostMaskData; float * deviceInputImageData; float * deviceOutputImageData; float * deviceMaskData; args = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(args, 0); inputMaskFile = wbArg_getInputFile(args, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); assert(imageChannels == NCHANNELS); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float)); cudaMalloc((void **) &deviceMaskData, maskRows * maskColumns * sizeof(float)); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); cudaMemcpy(deviceInputImageData, hostInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceMaskData, hostMaskData, maskRows * maskColumns * sizeof(float), cudaMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); wbTime_start(Compute, "Doing the computation on the GPU"); //@@ INSERT CODE HERE dim3 numThreads(ITILE_SIZE, ITILE_SIZE, NCHANNELS); dim3 numBlocks( ((imageWidth-1)/OTILE_SIZE)+1, ((imageHeight-1)/OTILE_SIZE)+1, 1); conv2D<<< numBlocks, numThreads >>>(deviceInputImageData, deviceOutputImageData, deviceMaskData, imageWidth, imageHeight); cudaDeviceSynchronize(); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(args, outputImage); cudaFree(deviceInputImageData); cudaFree(deviceOutputImageData); cudaFree(deviceMaskData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); return 0; }
8fe727557ad466bfe45ec0c05bc7428b669af708.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <stdio.h> #define TYPE1 double #define TYPE2 float #define TYPE3 float #define TYPE4 double double fun_ref( double x){ int k, n = 5; double t1; double d1 = 1.0; t1 = x; for ( k = 1; k <= n; k++ ){ d1 = 2.0 * d1; t1 = t1+ sin(d1 * x)/d1; } return t1; } __global__ void fun_gpu(double x[], double y[], int nthreads){ //y = fun(x) int tid = blockDim.x * blockIdx.x + threadIdx.x; int k, n = 5; if (tid < nthreads) { TYPE1 t1; TYPE2 d1 = 1.0; TYPE3 x_temp = x[tid]; t1 = x[tid]; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; TYPE4 sin_res = sin(d1 * x_temp); t1 = t1 + sin_res/d1; } y[tid] = t1; } } int main( int argc, char **argv) { int i,n = 1000000; double h, t1, t2, dppi; double s1; //cuda def hipEvent_t start, stop; float elapsedTime; double *d_x, *d_y, *h_x, *h_y ; size_t size = n*sizeof(double); h_x = (double*) malloc(size); h_y = (double*) malloc(size); hipMalloc(&d_x, size); hipMalloc(&d_y, size); t1 = -1.0; dppi = acos(t1); s1 = 0.0; t1 = 0.0; h = dppi / n; for ( i = 1; i <= n; i++){ h_x[i-1] = i * h; } /* Copy vectors from host memory to device memory */ hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice); int threads_per_block = 256; int block_count = (n + threads_per_block - 1)/threads_per_block; hipEventCreate(&start); hipEventRecord(start,0); for (int i =0;i < 10; i ++) hipLaunchKernelGGL(( fun_gpu), dim3(block_count), dim3(threads_per_block), 0, 0, d_x, d_y, n); hipDeviceSynchronize(); hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); printf("Elapsed time : %f ms\n" ,elapsedTime); hipMemcpy(h_y, d_y, size, hipMemcpyDeviceToHost); for ( i = 1; i <= n; i++) { t2 = h_y[i-1]; s1 = s1 + sqrt(h*h + (t2 - t1) * (t2 - t1)); t1 = t2; } double ref_value = 5.7957763224; printf("%.10f\n",s1); printf("abs err %.8f rel err %.8f\n", fabs(s1-ref_value), fabs((s1-ref_value)/ref_value) ); return 0; }
8fe727557ad466bfe45ec0c05bc7428b669af708.cu
#include <iostream> #include <math.h> #include <stdio.h> #define TYPE1 double #define TYPE2 float #define TYPE3 float #define TYPE4 double double fun_ref( double x){ int k, n = 5; double t1; double d1 = 1.0; t1 = x; for ( k = 1; k <= n; k++ ){ d1 = 2.0 * d1; t1 = t1+ sin(d1 * x)/d1; } return t1; } __global__ void fun_gpu(double x[], double y[], int nthreads){ //y = fun(x) int tid = blockDim.x * blockIdx.x + threadIdx.x; int k, n = 5; if (tid < nthreads) { TYPE1 t1; TYPE2 d1 = 1.0; TYPE3 x_temp = x[tid]; t1 = x[tid]; for ( k = 1; k <= n; k++ ) { d1 = 2.0 * d1; TYPE4 sin_res = sin(d1 * x_temp); t1 = t1 + sin_res/d1; } y[tid] = t1; } } int main( int argc, char **argv) { int i,n = 1000000; double h, t1, t2, dppi; double s1; //cuda def cudaEvent_t start, stop; float elapsedTime; double *d_x, *d_y, *h_x, *h_y ; size_t size = n*sizeof(double); h_x = (double*) malloc(size); h_y = (double*) malloc(size); cudaMalloc(&d_x, size); cudaMalloc(&d_y, size); t1 = -1.0; dppi = acos(t1); s1 = 0.0; t1 = 0.0; h = dppi / n; for ( i = 1; i <= n; i++){ h_x[i-1] = i * h; } /* Copy vectors from host memory to device memory */ cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice); int threads_per_block = 256; int block_count = (n + threads_per_block - 1)/threads_per_block; cudaEventCreate(&start); cudaEventRecord(start,0); for (int i =0;i < 10; i ++) fun_gpu<<<block_count, threads_per_block>>>(d_x, d_y, n); cudaDeviceSynchronize(); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); printf("Elapsed time : %f ms\n" ,elapsedTime); cudaMemcpy(h_y, d_y, size, cudaMemcpyDeviceToHost); for ( i = 1; i <= n; i++) { t2 = h_y[i-1]; s1 = s1 + sqrt(h*h + (t2 - t1) * (t2 - t1)); t1 = t2; } double ref_value = 5.7957763224; printf("%.10f\n",s1); printf("abs err %.8f rel err %.8f\n", fabs(s1-ref_value), fabs((s1-ref_value)/ref_value) ); return 0; }
b40b1237895d005c3eebda6110c8c452f0dcdfdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void callOperation(int *a, int *b, int *c, int n) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; if (tidx >= n || tidy >= n) { return; } int tid = tidx * n + tidy; if (a[tid] >= b[tid]) { c[tid] = a[tid]; } else { c[tid] = b[tid]; } }
b40b1237895d005c3eebda6110c8c452f0dcdfdd.cu
#include "includes.h" __global__ void callOperation(int *a, int *b, int *c, int n) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; if (tidx >= n || tidy >= n) { return; } int tid = tidx * n + tidy; if (a[tid] >= b[tid]) { c[tid] = a[tid]; } else { c[tid] = b[tid]; } }
710e016f115b5cb403efe142cfa90b0d9b2776ff.hip
// !!! This is a file automatically generated by hipify!!! // // Created by lidan on 23/10/2020. // #include <helper_cuda.h> #include <hip/hip_runtime.h> #include <time.h> #include <stdint.h> #include <iostream> #include <omp.h> #include <random> #include <cxxopts.hpp> #include <sys/timeb.h> #include <math.h> #include <book.cuh> #define min(a, b) (a < b ? a : b) template<typename T> void swap(T* f,T* s) { T temp = *f ; *f = *s ; *s = temp ; } template<typename T> void reverse(T *arr,T n) // { T i=0,j=n-1; while(i<j) { std::swap(arr[i],arr[j]); i++; j--; } } template<typename T> void exchange(T *arr,int n,int i) { reverse(arr,i); reverse(arr+i,n-i); reverse(arr,n); } template<typename T> void merge(T* first, T* second, int size) { T* end = first+size ; while(first<second & second<end) { if(*first<=*second) { first++ ; }else{ T* s = second+1 ; while(*s < *first){ s++ ;} exchange(first,s-first,second-first); first += (s-second+1) ; second = s ; } } } template<typename T> void mergesort(T* t,unsigned int size) { if(size>2) { int split = size/2 ; mergesort(t,split) ; mergesort(t+split,size-split) ; merge(t,t+split,size) ; }else{ if(t[0]>t[1]) { swap(t,t+1) ; } } } __device__ unsigned int getIdx(dim3* threads, dim3* blocks) { int x; return threadIdx.x + threadIdx.y * (x = threads->x) + threadIdx.z * (x *= threads->y) + blockIdx.x * (x *= threads->z) + blockIdx.y * (x *= blocks->z) + blockIdx.z * (x *= blocks->y); } __device__ void gpu_bottomUpMerge(int * from ,int* to ,int start,int mid,int end) { int i = start ; int j = mid ; for(int k = start;k<end;k++) { if(i<mid && (j>= end || from[i]<from[j])) { to[k] = from[i] ; i++ ; }else{ to[k] = from[j] ; j++; } } } __global__ void gpu_mergesort(int *from ,int * to ,int size ,int width ,int slices ,dim3* Dthreads,dim3* Dblocks ) { unsigned int idx = getIdx(Dthreads,Dblocks) ; long start = idx * slices * width ; long mid , end ; for( long slice = 0 ; slice< slices;slice++) { if(start >= size) { break ; } mid = min(start + (width>>1),size) ; end = min(start + width,size) ; gpu_bottomUpMerge(from,to,start,mid,end) ; start += width ; } } void mergesort(int* array, int size,dim3 threadBlock,dim3 blockGrid) { int* Tdata ; int* Wdata ; // all threads for a block less than 1024 dim3* Dthreads ; // all block for a grid less than long*long dim3* Dblocks ; tm() ; HANDLE_ERROR(hipMalloc( (void**) &Tdata, size * sizeof(int) ) ) ; HANDLE_ERROR(hipMalloc( (void**) &Wdata , size * sizeof(int) ) ) ; HANDLE_ERROR(hipMemcpy( Tdata,array,size * sizeof(int),hipMemcpyHostToDevice ) ) ; // HandleError(hipMemcpy(Wdata,array,size * sizeof(int),hipMemcpyHostToDevice)) ; HANDLE_ERROR(hipMalloc( (void**) &Dthreads,sizeof(dim3) ) ) ; HANDLE_ERROR(hipMalloc( (void**) &Dblocks,sizeof(dim3) ) ); HANDLE_ERROR(hipMemcpy( Dthreads,&threadBlock, sizeof(dim3),hipMemcpyHostToDevice )) ; HANDLE_ERROR(hipMemcpy( Dblocks,&blockGrid, sizeof(dim3),hipMemcpyHostToDevice )) ; int* first_T_data = Tdata ; int* first_W_data = Wdata ; long n_threads = threadBlock.x * threadBlock.y * threadBlock.z * blockGrid.x * blockGrid.y * blockGrid.z ; int width = 2 ; while( width < (size<<1)) { int slice = size /((n_threads) * width ) + 1 ; hipLaunchKernelGGL(( gpu_mergesort), dim3(blockGrid),dim3(threadBlock), 0, 0, first_T_data,first_W_data,size,width,slice,Dthreads,Dblocks) ; first_T_data = (first_T_data==Tdata)?Wdata:Tdata ; first_W_data = (first_W_data==Wdata)?Tdata:Wdata ; width <<= 1 ; } tm() ; HANDLE_ERROR(hipMemcpy(array,first_T_data,size * sizeof(int),hipMemcpyDeviceToHost)) ; hipFree(first_T_data) ; hipFree(first_W_data) ; } int main(void ) { int t[10] = {10,7,7,3,8,8,2,3,9,10} ; // mergesort(t,10) ; mergesort(t,10,dim3(10,10),dim3(1)) ; for(int i = 0;i < 10 ;i++) { std::cout<<t[i] ; } }
710e016f115b5cb403efe142cfa90b0d9b2776ff.cu
// // Created by lidan on 23/10/2020. // #include <helper_cuda.h> #include <cuda_runtime.h> #include <time.h> #include <stdint.h> #include <iostream> #include <omp.h> #include <random> #include <cxxopts.hpp> #include <sys/timeb.h> #include <math.h> #include <book.cuh> #define min(a, b) (a < b ? a : b) template<typename T> void swap(T* f,T* s) { T temp = *f ; *f = *s ; *s = temp ; } template<typename T> void reverse(T *arr,T n) //逆序操作 { T i=0,j=n-1; while(i<j) { std::swap(arr[i],arr[j]); i++; j--; } } template<typename T> void exchange(T *arr,int n,int i) { reverse(arr,i); reverse(arr+i,n-i); reverse(arr,n); } template<typename T> void merge(T* first, T* second, int size) { T* end = first+size ; while(first<second & second<end) { if(*first<=*second) { first++ ; }else{ T* s = second+1 ; while(*s < *first){ s++ ;} exchange(first,s-first,second-first); first += (s-second+1) ; second = s ; } } } template<typename T> void mergesort(T* t,unsigned int size) { if(size>2) { int split = size/2 ; mergesort(t,split) ; mergesort(t+split,size-split) ; merge(t,t+split,size) ; }else{ if(t[0]>t[1]) { swap(t,t+1) ; } } } __device__ unsigned int getIdx(dim3* threads, dim3* blocks) { int x; return threadIdx.x + threadIdx.y * (x = threads->x) + threadIdx.z * (x *= threads->y) + blockIdx.x * (x *= threads->z) + blockIdx.y * (x *= blocks->z) + blockIdx.z * (x *= blocks->y); } __device__ void gpu_bottomUpMerge(int * from ,int* to ,int start,int mid,int end) { int i = start ; int j = mid ; for(int k = start;k<end;k++) { if(i<mid && (j>= end || from[i]<from[j])) { to[k] = from[i] ; i++ ; }else{ to[k] = from[j] ; j++; } } } __global__ void gpu_mergesort(int *from ,int * to ,int size ,int width ,int slices ,dim3* Dthreads,dim3* Dblocks ) { unsigned int idx = getIdx(Dthreads,Dblocks) ; long start = idx * slices * width ; long mid , end ; for( long slice = 0 ; slice< slices;slice++) { if(start >= size) { break ; } mid = min(start + (width>>1),size) ; end = min(start + width,size) ; gpu_bottomUpMerge(from,to,start,mid,end) ; start += width ; } } void mergesort(int* array, int size,dim3 threadBlock,dim3 blockGrid) { int* Tdata ; int* Wdata ; // all threads for a block less than 1024 dim3* Dthreads ; // all block for a grid less than long*long dim3* Dblocks ; tm() ; HANDLE_ERROR(cudaMalloc( (void**) &Tdata, size * sizeof(int) ) ) ; HANDLE_ERROR(cudaMalloc( (void**) &Wdata , size * sizeof(int) ) ) ; HANDLE_ERROR(cudaMemcpy( Tdata,array,size * sizeof(int),cudaMemcpyHostToDevice ) ) ; // HandleError(cudaMemcpy(Wdata,array,size * sizeof(int),cudaMemcpyHostToDevice)) ; HANDLE_ERROR(cudaMalloc( (void**) &Dthreads,sizeof(dim3) ) ) ; HANDLE_ERROR(cudaMalloc( (void**) &Dblocks,sizeof(dim3) ) ); HANDLE_ERROR(cudaMemcpy( Dthreads,&threadBlock, sizeof(dim3),cudaMemcpyHostToDevice )) ; HANDLE_ERROR(cudaMemcpy( Dblocks,&blockGrid, sizeof(dim3),cudaMemcpyHostToDevice )) ; int* first_T_data = Tdata ; int* first_W_data = Wdata ; long n_threads = threadBlock.x * threadBlock.y * threadBlock.z * blockGrid.x * blockGrid.y * blockGrid.z ; int width = 2 ; while( width < (size<<1)) { int slice = size /((n_threads) * width ) + 1 ; gpu_mergesort<<<blockGrid,threadBlock>>>(first_T_data,first_W_data,size,width,slice,Dthreads,Dblocks) ; first_T_data = (first_T_data==Tdata)?Wdata:Tdata ; first_W_data = (first_W_data==Wdata)?Tdata:Wdata ; width <<= 1 ; } tm() ; HANDLE_ERROR(cudaMemcpy(array,first_T_data,size * sizeof(int),cudaMemcpyDeviceToHost)) ; cudaFree(first_T_data) ; cudaFree(first_W_data) ; } int main(void ) { int t[10] = {10,7,7,3,8,8,2,3,9,10} ; // mergesort(t,10) ; mergesort(t,10,dim3(10,10),dim3(1)) ; for(int i = 0;i < 10 ;i++) { std::cout<<t[i] ; } }
efc6f1bf671b53392d7756d8978a54711b65c5a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <stdlib.h> #include <sys/time.h> double get_useconds() { struct timeval tp; gettimeofday(&tp, NULL); return (double) ((tp.tv_sec * (1e+6)) + tp.tv_usec); } __global__ void fillmem(int *index,int hwsize,int unit,int mode,int param) { int tx = blockIdx.x*blockDim.x+threadIdx.x; int nb = tx/hwsize; int base = nb*unit; int offset =base ; if(mode==0){ offset += tx%hwsize; } else if(mode==1){ offset += 0;// * (unit/hwsize); } else if(mode==2){ offset += tx%hwsize + param; } index[tx]=offset; } const int elesize = 16; const int kernelloop = 10; struct udata { char s[elesize]; }; __global__ void bandwidth_kernel(unsigned int *time,int *index,char *data,int round,int rowsize,int zero) { int tx = blockIdx.x*blockDim.x+threadIdx.x; udata ud; int pos; __syncthreads(); volatile unsigned int start_time = clock(); #pragma unroll for(int j=0;j<kernelloop;j++){ pos = index[tx]; for(int i=0;i<round;i++){ ud = ((udata*)(data+pos))[0]; pos += rowsize + ((int)(ud.s) & zero); // pos += rowsize ; } } __syncthreads(); volatile unsigned int end_time = clock(); time[tx] = end_time - start_time; if(zero > 0) index[0]+=pos; } struct cu16 { char s[16]; }; struct cu8 { char s[8]; }; template<int mode> __global__ void memcpy_kernel(char *src,char *dst,int step) { int tx = blockIdx.x*blockDim.x+threadIdx.x; double2 *d2src=(double2*)src,*d2dst=(double2*)dst; double *dsrc = (double*)src, *ddst = (double*)dst; switch(mode) { case 0: dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; break; case 1: d2dst[tx]=d2src[tx]; break; case 2: ddst[tx]=dsrc[tx]; tx+=step;ddst[tx]=dsrc[tx]; } } __global__ void memcpy_kernel2(double2 *src,double2 *dst) { int tx = blockIdx.x*blockDim.x+threadIdx.x; dst[tx]=src[tx]; } void testbandwidth(int zero) { int nMode = 4; const int loop = 10; unsigned int blocksize = 1024; unsigned int gridsize = 15*128*16; unsigned int totalmem = 16*gridsize*blocksize+1024; hipDeviceProp_t props; CUDA_SAFE_CALL(hipSetDevice(0)); CUDA_SAFE_CALL(hipGetDeviceProperties(&props, 0)); unsigned int gmem_total = props.totalGlobalMem; if(totalmem*2 > gmem_total){ fprintf(stderr,"Memory exceed!\n"); return; } /* unsigned int gridsize_x = 1024; unsigned int gridsize_y = totalmem/(gridsize_x*blocksize); dim3 gridsize(gridsize_x,gridsize_y); */ char *src_gpu,*dst_gpu; CUDA_SAFE_CALL(hipMalloc((void**)&src_gpu, totalmem)); CUDA_SAFE_CALL(hipMemset(src_gpu, zero, totalmem)); CUDA_SAFE_CALL(hipMalloc((void**)&dst_gpu, totalmem)); printf("gridsize=%d,blocksize=%d\n",gridsize,blocksize); // printf("gridsize=(%d,%d),blocksize=%d\n",gridsize_x,gridsize_y,blocksize); for(int mode=0;mode<nMode;mode++){ printf("\nmode#%d\n",mode); printf("offset(B)\ttime(ms)\tmemsize(MB)\tbandwidth(GB/s)\n"); for(int offset=0;offset<512;offset+=16){ char *src = src_gpu+offset; char *dst = dst_gpu+offset; hipDeviceSynchronize(); double t0 = get_useconds(); switch(mode){ case 0: for(int i=0;i<loop;i++) hipMemcpy(dst,src,totalmem,hipMemcpyDeviceToDevice); break; case 1: for(int i=0;i<loop;i++) hipLaunchKernelGGL(( memcpy_kernel<0>), dim3(gridsize),dim3(blocksize), 0, 0, src,dst,gridsize*blocksize); break; case 2: for(int i=0;i<loop;i++) hipLaunchKernelGGL(( memcpy_kernel<1>), dim3(gridsize),dim3(blocksize), 0, 0, src,dst,gridsize*blocksize); break; case 3: for(int i=0;i<loop;i++) hipLaunchKernelGGL(( memcpy_kernel<2>), dim3(gridsize),dim3(blocksize), 0, 0, src,dst,gridsize*blocksize); break; } hipDeviceSynchronize(); double tt = get_useconds()-t0; char pos = 0; CUDA_SAFE_CALL(hipMemcpy(&pos,dst,sizeof(char),hipMemcpyDeviceToHost)); totalmem+=pos; if(pos!=0){ fprintf(stderr,"Error!\n"); exit(-1); } printf("%d\t%.2f\t%d\t%.2f\n",offset,tt/(loop*1000),totalmem/(1024*1024),2.0*loop*totalmem/(1000*tt)); } } CUDA_SAFE_CALL(hipFree(src_gpu)); CUDA_SAFE_CALL(hipFree(dst_gpu)); } int main(int argc,char *argv[]) { testbandwidth(argc>10); return 0; } int testThroughput(int argc, char ** argv) { const int blocksize = 512; const int data_size_unit = 1024; const int loop = 3; const int nmode = 3; hipDeviceProp_t props; CUDA_SAFE_CALL(hipSetDevice(0)); CUDA_SAFE_CALL(hipGetDeviceProperties(&props, 0)); size_t gmem_total = props.totalGlobalMem; int n_sm = props.multiProcessorCount; int n_hwsize = props.warpSize/2; int freq = props.clockRate; for(int gridsize = n_sm;gridsize<320*n_sm;gridsize*=2){ char * data_gpu; int *index_gpu; unsigned int *time_gpu; int data_size_max = gmem_total*3/4; CUDA_SAFE_CALL(hipMalloc((void**)&data_gpu, data_size_max)); CUDA_SAFE_CALL(hipMemset(data_gpu, 0, data_size_max)); CUDA_SAFE_CALL(hipMalloc((void**)&index_gpu, sizeof(int)*gridsize*blocksize)); CUDA_SAFE_CALL(hipMemset(index_gpu, 0, sizeof(int)*gridsize*blocksize)); CUDA_SAFE_CALL(hipMalloc((void**)&time_gpu, sizeof(int)*gridsize*blocksize)); CUDA_SAFE_CALL(hipMemset(time_gpu, 0, sizeof(int)*gridsize*blocksize)); int data_rowsize = data_size_unit*(gridsize*blocksize/n_hwsize); int round = data_size_max / data_rowsize; if(round <= 0) break; printf("sm=%d,half warpsize=%d,gridsize=%d,blocksize=%d\n",n_sm,n_hwsize,gridsize,blocksize); printf("data unit size=%d,data_rowsize=%d,data_size_max=%d,round=%d\n",data_size_unit,data_rowsize,data_size_max,round); for(int mode=0;mode<nmode-1;mode++){ hipLaunchKernelGGL(( fillmem), dim3(gridsize),dim3(blocksize), 0, 0, index_gpu,n_hwsize,data_size_unit,mode,0); hipDeviceSynchronize(); double t0 = get_useconds(); for(int i=0;i<loop;i++){ hipLaunchKernelGGL(( bandwidth_kernel), dim3(gridsize),dim3(blocksize), 0, 0, time_gpu,index_gpu,data_gpu,round,data_rowsize,0); } hipDeviceSynchronize(); double tt = get_useconds()-t0; unsigned int blocktime = 0; CUDA_SAFE_CALL(hipMemcpy(&blocktime,time_gpu,sizeof(int),hipMemcpyDeviceToHost)); double nstime = (double)blocktime / freq * 1000000.; unsigned long long totalsize= loop*kernelloop*elesize*blocksize*gridsize; printf("mode#%d: time %.0f ms, throughoutput=%.1f GB/s, time %.0f ms,bandwith=%.1f GB/s\n",mode,tt/1000,1.0*totalsize/tt,nstime/1000000,1000.0*kernelloop*elesize*blocksize*gridsize/nstime); } for(int param = 0;param<256;param+=10){ hipLaunchKernelGGL(( fillmem), dim3(gridsize),dim3(blocksize), 0, 0, index_gpu,n_hwsize,data_size_unit,nmode-1,param); hipDeviceSynchronize(); double t0 = get_useconds(); for(int i=0;i<loop;i++){ hipLaunchKernelGGL(( bandwidth_kernel), dim3(gridsize),dim3(blocksize), 0, 0, time_gpu,index_gpu,data_gpu,round,data_rowsize,0); } hipDeviceSynchronize(); double tt = get_useconds()-t0; unsigned int blocktime = 0; CUDA_SAFE_CALL(hipMemcpy(&blocktime,time_gpu,sizeof(int),hipMemcpyDeviceToHost)); double nstime = (double)blocktime / freq * 1000000.; unsigned long long totalsize= loop*kernelloop*elesize*blocksize*gridsize; printf("offset=%d,gridsize=%d: time %.0f ms, throughoutput=%.1f GB/s, time %.0f ms,bandwith=%.1f GB/s\n",param,gridsize,tt/1000,1.0*totalsize/tt,nstime/1000000,1000.0*kernelloop*elesize*blocksize*gridsize/nstime); } CUDA_SAFE_CALL(hipFree(index_gpu)); CUDA_SAFE_CALL(hipFree(data_gpu)); CUDA_SAFE_CALL(hipFree(time_gpu)); } return 0; }
efc6f1bf671b53392d7756d8978a54711b65c5a4.cu
#include "utils.h" #include <stdlib.h> #include <sys/time.h> double get_useconds() { struct timeval tp; gettimeofday(&tp, NULL); return (double) ((tp.tv_sec * (1e+6)) + tp.tv_usec); } __global__ void fillmem(int *index,int hwsize,int unit,int mode,int param) { int tx = blockIdx.x*blockDim.x+threadIdx.x; int nb = tx/hwsize; int base = nb*unit; int offset =base ; if(mode==0){ offset += tx%hwsize; } else if(mode==1){ offset += 0;// * (unit/hwsize); } else if(mode==2){ offset += tx%hwsize + param; } index[tx]=offset; } const int elesize = 16; const int kernelloop = 10; struct udata { char s[elesize]; }; __global__ void bandwidth_kernel(unsigned int *time,int *index,char *data,int round,int rowsize,int zero) { int tx = blockIdx.x*blockDim.x+threadIdx.x; udata ud; int pos; __syncthreads(); volatile unsigned int start_time = clock(); #pragma unroll for(int j=0;j<kernelloop;j++){ pos = index[tx]; for(int i=0;i<round;i++){ ud = ((udata*)(data+pos))[0]; pos += rowsize + ((int)(ud.s) & zero); // pos += rowsize ; } } __syncthreads(); volatile unsigned int end_time = clock(); time[tx] = end_time - start_time; if(zero > 0) index[0]+=pos; } struct cu16 { char s[16]; }; struct cu8 { char s[8]; }; template<int mode> __global__ void memcpy_kernel(char *src,char *dst,int step) { int tx = blockIdx.x*blockDim.x+threadIdx.x; double2 *d2src=(double2*)src,*d2dst=(double2*)dst; double *dsrc = (double*)src, *ddst = (double*)dst; switch(mode) { case 0: dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; tx+=step;dst[tx] = src[tx]; break; case 1: d2dst[tx]=d2src[tx]; break; case 2: ddst[tx]=dsrc[tx]; tx+=step;ddst[tx]=dsrc[tx]; } } __global__ void memcpy_kernel2(double2 *src,double2 *dst) { int tx = blockIdx.x*blockDim.x+threadIdx.x; dst[tx]=src[tx]; } void testbandwidth(int zero) { int nMode = 4; const int loop = 10; unsigned int blocksize = 1024; unsigned int gridsize = 15*128*16; unsigned int totalmem = 16*gridsize*blocksize+1024; cudaDeviceProp props; CUDA_SAFE_CALL(cudaSetDevice(0)); CUDA_SAFE_CALL(cudaGetDeviceProperties(&props, 0)); unsigned int gmem_total = props.totalGlobalMem; if(totalmem*2 > gmem_total){ fprintf(stderr,"Memory exceed!\n"); return; } /* unsigned int gridsize_x = 1024; unsigned int gridsize_y = totalmem/(gridsize_x*blocksize); dim3 gridsize(gridsize_x,gridsize_y); */ char *src_gpu,*dst_gpu; CUDA_SAFE_CALL(cudaMalloc((void**)&src_gpu, totalmem)); CUDA_SAFE_CALL(cudaMemset(src_gpu, zero, totalmem)); CUDA_SAFE_CALL(cudaMalloc((void**)&dst_gpu, totalmem)); printf("gridsize=%d,blocksize=%d\n",gridsize,blocksize); // printf("gridsize=(%d,%d),blocksize=%d\n",gridsize_x,gridsize_y,blocksize); for(int mode=0;mode<nMode;mode++){ printf("\nmode#%d\n",mode); printf("offset(B)\ttime(ms)\tmemsize(MB)\tbandwidth(GB/s)\n"); for(int offset=0;offset<512;offset+=16){ char *src = src_gpu+offset; char *dst = dst_gpu+offset; cudaThreadSynchronize(); double t0 = get_useconds(); switch(mode){ case 0: for(int i=0;i<loop;i++) cudaMemcpy(dst,src,totalmem,cudaMemcpyDeviceToDevice); break; case 1: for(int i=0;i<loop;i++) memcpy_kernel<0><<<gridsize,blocksize>>>(src,dst,gridsize*blocksize); break; case 2: for(int i=0;i<loop;i++) memcpy_kernel<1><<<gridsize,blocksize>>>(src,dst,gridsize*blocksize); break; case 3: for(int i=0;i<loop;i++) memcpy_kernel<2><<<gridsize,blocksize>>>(src,dst,gridsize*blocksize); break; } cudaThreadSynchronize(); double tt = get_useconds()-t0; char pos = 0; CUDA_SAFE_CALL(cudaMemcpy(&pos,dst,sizeof(char),cudaMemcpyDeviceToHost)); totalmem+=pos; if(pos!=0){ fprintf(stderr,"Error!\n"); exit(-1); } printf("%d\t%.2f\t%d\t%.2f\n",offset,tt/(loop*1000),totalmem/(1024*1024),2.0*loop*totalmem/(1000*tt)); } } CUDA_SAFE_CALL(cudaFree(src_gpu)); CUDA_SAFE_CALL(cudaFree(dst_gpu)); } int main(int argc,char *argv[]) { testbandwidth(argc>10); return 0; } int testThroughput(int argc, char ** argv) { const int blocksize = 512; const int data_size_unit = 1024; const int loop = 3; const int nmode = 3; cudaDeviceProp props; CUDA_SAFE_CALL(cudaSetDevice(0)); CUDA_SAFE_CALL(cudaGetDeviceProperties(&props, 0)); size_t gmem_total = props.totalGlobalMem; int n_sm = props.multiProcessorCount; int n_hwsize = props.warpSize/2; int freq = props.clockRate; for(int gridsize = n_sm;gridsize<320*n_sm;gridsize*=2){ char * data_gpu; int *index_gpu; unsigned int *time_gpu; int data_size_max = gmem_total*3/4; CUDA_SAFE_CALL(cudaMalloc((void**)&data_gpu, data_size_max)); CUDA_SAFE_CALL(cudaMemset(data_gpu, 0, data_size_max)); CUDA_SAFE_CALL(cudaMalloc((void**)&index_gpu, sizeof(int)*gridsize*blocksize)); CUDA_SAFE_CALL(cudaMemset(index_gpu, 0, sizeof(int)*gridsize*blocksize)); CUDA_SAFE_CALL(cudaMalloc((void**)&time_gpu, sizeof(int)*gridsize*blocksize)); CUDA_SAFE_CALL(cudaMemset(time_gpu, 0, sizeof(int)*gridsize*blocksize)); int data_rowsize = data_size_unit*(gridsize*blocksize/n_hwsize); int round = data_size_max / data_rowsize; if(round <= 0) break; printf("sm=%d,half warpsize=%d,gridsize=%d,blocksize=%d\n",n_sm,n_hwsize,gridsize,blocksize); printf("data unit size=%d,data_rowsize=%d,data_size_max=%d,round=%d\n",data_size_unit,data_rowsize,data_size_max,round); for(int mode=0;mode<nmode-1;mode++){ fillmem<<<gridsize,blocksize>>>(index_gpu,n_hwsize,data_size_unit,mode,0); cudaThreadSynchronize(); double t0 = get_useconds(); for(int i=0;i<loop;i++){ bandwidth_kernel<<<gridsize,blocksize>>>(time_gpu,index_gpu,data_gpu,round,data_rowsize,0); } cudaThreadSynchronize(); double tt = get_useconds()-t0; unsigned int blocktime = 0; CUDA_SAFE_CALL(cudaMemcpy(&blocktime,time_gpu,sizeof(int),cudaMemcpyDeviceToHost)); double nstime = (double)blocktime / freq * 1000000.; unsigned long long totalsize= loop*kernelloop*elesize*blocksize*gridsize; printf("mode#%d: time %.0f ms, throughoutput=%.1f GB/s, time %.0f ms,bandwith=%.1f GB/s\n",mode,tt/1000,1.0*totalsize/tt,nstime/1000000,1000.0*kernelloop*elesize*blocksize*gridsize/nstime); } for(int param = 0;param<256;param+=10){ fillmem<<<gridsize,blocksize>>>(index_gpu,n_hwsize,data_size_unit,nmode-1,param); cudaThreadSynchronize(); double t0 = get_useconds(); for(int i=0;i<loop;i++){ bandwidth_kernel<<<gridsize,blocksize>>>(time_gpu,index_gpu,data_gpu,round,data_rowsize,0); } cudaThreadSynchronize(); double tt = get_useconds()-t0; unsigned int blocktime = 0; CUDA_SAFE_CALL(cudaMemcpy(&blocktime,time_gpu,sizeof(int),cudaMemcpyDeviceToHost)); double nstime = (double)blocktime / freq * 1000000.; unsigned long long totalsize= loop*kernelloop*elesize*blocksize*gridsize; printf("offset=%d,gridsize=%d: time %.0f ms, throughoutput=%.1f GB/s, time %.0f ms,bandwith=%.1f GB/s\n",param,gridsize,tt/1000,1.0*totalsize/tt,nstime/1000000,1000.0*kernelloop*elesize*blocksize*gridsize/nstime); } CUDA_SAFE_CALL(cudaFree(index_gpu)); CUDA_SAFE_CALL(cudaFree(data_gpu)); CUDA_SAFE_CALL(cudaFree(time_gpu)); } return 0; }
81c4303617600185046285bd34ba380dec57d8ff.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> inline double seconds() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } int total_size = 1024 * 1024; // 1MB void test(int size) { double iStart, iElaps; int num = total_size / size; float *d[num]; iStart = seconds(); for (int i = 0; i < num; i++) { hipMalloc((float **)&d[i], size); } iElaps = seconds() - iStart; printf("hipMalloc(%d) x %d Time elapsed %f sec\n", size, num, iElaps); iStart = seconds(); for (int i = 0; i < num; i++) { hipFree(d[i]); } iElaps = seconds() - iStart; printf("hipFree(%d) x %d Time elapsed %f sec\n", size, num, iElaps); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); hipSetDevice(dev); int size = atoi(argv[1]); test(size); return (0); }
81c4303617600185046285bd34ba380dec57d8ff.cu
#include <cuda_runtime.h> #include <stdio.h> #include <sys/time.h> inline double seconds() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp, &tzp); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } int total_size = 1024 * 1024; // 1MB void test(int size) { double iStart, iElaps; int num = total_size / size; float *d[num]; iStart = seconds(); for (int i = 0; i < num; i++) { cudaMalloc((float **)&d[i], size); } iElaps = seconds() - iStart; printf("cudaMalloc(%d) x %d Time elapsed %f sec\n", size, num, iElaps); iStart = seconds(); for (int i = 0; i < num; i++) { cudaFree(d[i]); } iElaps = seconds() - iStart; printf("cudaFree(%d) x %d Time elapsed %f sec\n", size, num, iElaps); } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); cudaSetDevice(dev); int size = atoi(argv[1]); test(size); return (0); }
4ae423edc7d54ab2703b8d77d996620023dc5732.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/magma_zmcsrcompressor_gpu.cu, normal z -> d, Tue Aug 30 09:38:46 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_dmcsrgpu_kernel1( int num_rows, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double zero = MAGMA_D_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_dmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_dmcsrgpu_kernel3( int num_rows, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; double zero = MAGMA_D_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param[in,out] A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmcsrcompressor_gpu( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_d_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are hipLaunchKernelGGL(( magma_dmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() , A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); hipLaunchKernelGGL(( magma_dmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue->cuda_stream() , A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_dmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back hipLaunchKernelGGL(( magma_dmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() , A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_dmcsrcompressor_gpu( &dA, queue )); magma_dmfree( &dA, queue ); magma_dmfree( A, queue ); CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); } cleanup: magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
4ae423edc7d54ab2703b8d77d996620023dc5732.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/magma_zmcsrcompressor_gpu.cu, normal z -> d, Tue Aug 30 09:38:46 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_dmcsrgpu_kernel1( int num_rows, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double zero = MAGMA_D_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_dmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_dmcsrgpu_kernel3( int num_rows, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; double zero = MAGMA_D_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param[in,out] A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmcsrcompressor_gpu( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_d_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are magma_dmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>> ( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); magma_dmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue->cuda_stream() >>> ( A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_dmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back magma_dmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>> ( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_dmcsrcompressor_gpu( &dA, queue )); magma_dmfree( &dA, queue ); magma_dmfree( A, queue ); CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); } cleanup: magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
b0fb0cd407d8c9bb7df3c616f688980e3fc7a3fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches zlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zlacpy_batched_kernel( int m, int n, const magmaDoubleComplex * const *dAarray, int ldda, magmaDoubleComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaDoubleComplex *dA = dAarray[ blockIdx.y ]; magmaDoubleComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaDoubleComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = *dA; dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Note -------- - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ------- ZLACPY copies all or part of a set of two-dimensional matrices dAarray[i] to another set of matrices dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of each matrix dAarray[i] to be copied to dBarray[i]. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part Otherwise: All of each matrix dAarray[i] @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX_16 array, dimension (LDDA,N) The m by n matrices dAarray[i]. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX_16 array, dimension (LDDB,N) The m by n matrices dBarray[i]. On exit, matrix dBarray[i] = matrix dAarray[i] in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlacpy_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, const magmaDoubleComplex * const *dAarray, magma_int_t ldda, magmaDoubleComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { magma_int_t info = 0; if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); if ( uplo == MagmaUpper ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( uplo == MagmaLower ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { hipLaunchKernelGGL(( zlacpy_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, dAarray, ldda, dBarray, lddb ); } }
b0fb0cd407d8c9bb7df3c616f688980e3fc7a3fa.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches zlacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void zlacpy_batched_kernel( int m, int n, const magmaDoubleComplex * const *dAarray, int ldda, magmaDoubleComplex **dBarray, int lddb ) { // dA and dB iterate across row i const magmaDoubleComplex *dA = dAarray[ blockIdx.y ]; magmaDoubleComplex *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const magmaDoubleComplex *dAend = dA + n*ldda; while( dA < dAend ) { *dB = *dA; dA += ldda; dB += lddb; } } } /* ===================================================================== */ /** Note -------- - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ------- ZLACPY copies all or part of a set of two-dimensional matrices dAarray[i] to another set of matrices dBarray[i], for i = 0, ..., batchCount-1. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of each matrix dAarray[i] to be copied to dBarray[i]. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part Otherwise: All of each matrix dAarray[i] @param[in] m INTEGER The number of rows of each matrix dAarray[i]. M >= 0. @param[in] n INTEGER The number of columns of each matrix dAarray[i]. N >= 0. @param[in] dAarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX_16 array, dimension (LDDA,N) The m by n matrices dAarray[i]. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). @param[out] dBarray array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX_16 array, dimension (LDDB,N) The m by n matrices dBarray[i]. On exit, matrix dBarray[i] = matrix dAarray[i] in the locations specified by UPLO. @param[in] lddb INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). @param[in] batchCount INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zlacpy_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, const magmaDoubleComplex * const *dAarray, magma_int_t ldda, magmaDoubleComplex **dBarray, magma_int_t lddb, magma_int_t batchCount ) { magma_int_t info = 0; if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); if ( uplo == MagmaUpper ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( uplo == MagmaLower ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { zlacpy_batched_kernel<<< grid, threads, 0, magma_stream >>>( m, n, dAarray, ldda, dBarray, lddb ); } }
b9ceeee106b73b1ebb0ee27c3562291258ae352a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <cstdlib> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> void initWith(float num, float *a, int N) { for (int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addArraysInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for (int i = 0; i < N; i++) { if (array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { int deviceId; int numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); const int N = 2 << 24; size_t size = N * sizeof(float); float *a; float *b; float *c; hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); int threadsPerBlock; int numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; hipError_t addArraysErr; hipError_t asyncErr; initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); hipDeviceSynchronize(); addArraysInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N); addArraysErr = hipGetLastError(); if (addArraysErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addArraysErr)); asyncErr = hipDeviceSynchronize(); if (asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr)); checkElementsAre(7, c, N); hipFree(a); hipFree(b); hipFree(c); }
b9ceeee106b73b1ebb0ee27c3562291258ae352a.cu
#include <stdio.h> #include <cstdlib> #include <device_launch_parameters.h> #include <cuda_runtime.h> void initWith(float num, float *a, int N) { for (int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addArraysInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *array, int N) { for (int i = 0; i < N; i++) { if (array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); const int N = 2 << 24; size_t size = N * sizeof(float); float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); int threadsPerBlock; int numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; cudaError_t addArraysErr; cudaError_t asyncErr; initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); cudaDeviceSynchronize(); addArraysInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N); addArraysErr = cudaGetLastError(); if (addArraysErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addArraysErr)); asyncErr = cudaDeviceSynchronize(); if (asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
40de1e32e4cafea714d4324e38c54805eb2d1d6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file pooling_gpu.cu // @brief Pooling block implementation (GPU) // @author Andrea Vedaldi // @author Karel Lenc /* Copyright (C) 2014-15 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "maxout.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* pooling_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void maxout_kernel (T* pooled, const T* data, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int numunit, const int numpiece) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int thx = pooledIndex % (pooledWidth*pooledHeight); // which element in pooled int ut = (pooledIndex / (pooledWidth*pooledHeight)) % numunit; //which unit int ntr = pooledIndex / (pooledWidth*pooledHeight*numunit); // which trial T bestValue = data[thx + pooledWidth*pooledHeight*(ut + ntr*numunit*numpiece)]; // GET vlaue in data //for (int k = 0; k < numpiece ; ++k) { // bestValue = max(bestValue, data[thx + pooledWidth*pooledHeight*(ut*numpiece+k)]) ; // } for (int k = 0; k < numpiece ; ++k) { bestValue = max(bestValue, data[thx + pooledWidth*pooledHeight*(ut + k*numunit + ntr*numunit*numpiece)]) ; } pooled[pooledIndex] = bestValue ; } } template<> vl::Error vl::impl::maxout_forward<vl::GPU, float>(float* pooled, float const* data, size_t height, size_t width, size_t depth, size_t numunit, size_t numpiece) { int pooledWidth = width; int pooledHeight = height; int pooledVolume = pooledWidth * pooledHeight * depth / numpiece ; hipLaunchKernelGGL(( maxout_kernel<float>) , dim3(divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, pooled, data, pooledHeight, pooledWidth, pooledVolume, numunit, numpiece); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ; } /* ---------------------------------------------------------------- */ /* pooling_max_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void maxout_backward_kernel (T* derData, const T* data, const T* derPooled, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int numunit, const int numpiece) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int thx = pooledIndex % (pooledWidth*pooledHeight); // which element in pooled int ut = (pooledIndex / (pooledWidth*pooledHeight)) % numunit; //which unit int ntr = pooledIndex / (pooledWidth*pooledHeight*numunit); // which trial //T bestValue = data[thx + pooledWidth*pooledHeight*(ut*numpiece)]; // GET vlaue in data T bestValue = data[thx + pooledWidth*pooledHeight*(ut + ntr*numunit*numpiece)]; // GET vlaue in data int bestindex = 0; for (int k = 0; k < numpiece ; ++k) { //T value = data[thx + pooledWidth*pooledHeight*(ut*numpiece+k)]; T value = data[thx + pooledWidth*pooledHeight*(ut + k*numunit + ntr*numunit*numpiece)]; if (value > bestValue) { bestValue = value ; bestindex = k; } } /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requrires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ //int dain = thx + pooledWidth*pooledHeight*(ut*numpiece+bestindex); int dain = thx + pooledWidth*pooledHeight*(ut + bestindex*numunit + ntr*numunit*numpiece); atomicAdd(derData + dain, derPooled[pooledIndex]) ; //derData[dain] = derPooled[pooledIndex]; } } template<> vl::Error vl::impl::maxout_backward<vl::GPU, float>(float* derData, float const* data, float const* derPooled, size_t height, size_t width, size_t depth, size_t numunit, size_t numpiece) { int pooledWidth = width; int pooledHeight = height; int pooledVolume = pooledWidth * pooledHeight * depth / numpiece; hipLaunchKernelGGL(( maxout_backward_kernel<float>) , dim3(divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, data, derPooled, pooledHeight, pooledWidth, pooledVolume, numunit, numpiece); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ; }
40de1e32e4cafea714d4324e38c54805eb2d1d6b.cu
// @file pooling_gpu.cu // @brief Pooling block implementation (GPU) // @author Andrea Vedaldi // @author Karel Lenc /* Copyright (C) 2014-15 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "maxout.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* pooling_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void maxout_kernel (T* pooled, const T* data, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int numunit, const int numpiece) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int thx = pooledIndex % (pooledWidth*pooledHeight); // which element in pooled int ut = (pooledIndex / (pooledWidth*pooledHeight)) % numunit; //which unit int ntr = pooledIndex / (pooledWidth*pooledHeight*numunit); // which trial T bestValue = data[thx + pooledWidth*pooledHeight*(ut + ntr*numunit*numpiece)]; // GET vlaue in data //for (int k = 0; k < numpiece ; ++k) { // bestValue = max(bestValue, data[thx + pooledWidth*pooledHeight*(ut*numpiece+k)]) ; // } for (int k = 0; k < numpiece ; ++k) { bestValue = max(bestValue, data[thx + pooledWidth*pooledHeight*(ut + k*numunit + ntr*numunit*numpiece)]) ; } pooled[pooledIndex] = bestValue ; } } template<> vl::Error vl::impl::maxout_forward<vl::GPU, float>(float* pooled, float const* data, size_t height, size_t width, size_t depth, size_t numunit, size_t numpiece) { int pooledWidth = width; int pooledHeight = height; int pooledVolume = pooledWidth * pooledHeight * depth / numpiece ; maxout_kernel<float> <<< divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (pooled, data, pooledHeight, pooledWidth, pooledVolume, numunit, numpiece); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ; } /* ---------------------------------------------------------------- */ /* pooling_max_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void maxout_backward_kernel (T* derData, const T* data, const T* derPooled, const int pooledWidth, const int pooledHeight, const int pooledVolume, const int numunit, const int numpiece) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < pooledVolume) { int thx = pooledIndex % (pooledWidth*pooledHeight); // which element in pooled int ut = (pooledIndex / (pooledWidth*pooledHeight)) % numunit; //which unit int ntr = pooledIndex / (pooledWidth*pooledHeight*numunit); // which trial //T bestValue = data[thx + pooledWidth*pooledHeight*(ut*numpiece)]; // GET vlaue in data T bestValue = data[thx + pooledWidth*pooledHeight*(ut + ntr*numunit*numpiece)]; // GET vlaue in data int bestindex = 0; for (int k = 0; k < numpiece ; ++k) { //T value = data[thx + pooledWidth*pooledHeight*(ut*numpiece+k)]; T value = data[thx + pooledWidth*pooledHeight*(ut + k*numunit + ntr*numunit*numpiece)]; if (value > bestValue) { bestValue = value ; bestindex = k; } } /* This is bad, but required to eliminate a race condition when writing to bottom_diff. Caffe goes the other way around, but requrires remembering the layer output, or the maximal indexes. atomicAdd(add, val) */ //int dain = thx + pooledWidth*pooledHeight*(ut*numpiece+bestindex); int dain = thx + pooledWidth*pooledHeight*(ut + bestindex*numunit + ntr*numunit*numpiece); atomicAdd(derData + dain, derPooled[pooledIndex]) ; //derData[dain] = derPooled[pooledIndex]; } } template<> vl::Error vl::impl::maxout_backward<vl::GPU, float>(float* derData, float const* data, float const* derPooled, size_t height, size_t width, size_t depth, size_t numunit, size_t numpiece) { int pooledWidth = width; int pooledHeight = height; int pooledVolume = pooledWidth * pooledHeight * depth / numpiece; maxout_backward_kernel<float> <<< divideUpwards(pooledVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, data, derPooled, pooledHeight, pooledWidth, pooledVolume, numunit, numpiece); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ; }
354f66b3adcae1418f4a4910b5235a6f5bb83c34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride + i * mat2_col_stride; if (i < mat_dim.cols && j < mat_dim.rows) { mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dmat.cols && j < dmat.rows) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = j * dmat.stride + i; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { // we interpret these indexes oppositely from normal, but it doesn't // matter as it's invoked in a symmetric way. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index. int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.cols && j < d_out.rows) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<int TileDim, typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { // Use shared meme to achieve both coalesced memory reading and writing // '+1' to avoid bank conflict when reading sbuf __shared__ Real sbuf[TileDim][TileDim + 1]; const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride; int32_cuda index_in = i_in * d_in.stride + j_in; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_in + i < d_in.rows && j_in < d_in.cols) { sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]); } index_in += tile_stride_in; } __syncthreads(); // Grid is transposed, but block is not yet. // Warp (blockDim.x) is always along the row-dim. const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y; const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x; const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride; int32_cuda index_out = i_out * d_out.stride + j_out; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_out + i < d_out.rows && j_out < d_out.cols) { // block is tranposed when reading sbuf mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i]; } index_out += tile_stride_out; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; if (i < d.rows) { const int32_cuda start = i * d.stride; const Real scale = Real(1) / vec_div[i]; const int32_cuda grid_stride = blockDim.x * gridDim.x; for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j += grid_stride) { mat[start + j] *= scale; } } } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride + j * mat2_row_stride; if (j < mat_dim.rows && i < mat_dim.cols) mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index]; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index. int j = blockIdx.y * blockDim.y + threadIdx.y; // row index. if (i < d.cols && j < d.rows) { int index = i + j * d.stride; m_out[index] = v_in[i]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (float) v_in[i]; } } // _trace_mat_mat reduce the partial sum to value[blockIdx.y * gridDim.x + blockIdx.x] // It use shared mem to transpose matrix B to ensure coalesced memory access template<int TileDim, typename Real> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { // Reuse shared mem and make indexing easier. "+1" to avoid bank conflict __shared__ union { Real trans[TileDim][TileDim + 1]; Real sum[CU1DBLOCK]; } smem; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; // linear thread id; const int32_cuda grid_height = gridDim.y * TileDim; const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x; const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y; int32_cuda ia = blockIdx.y * TileDim + threadIdx.y; int32_cuda jb = blockIdx.y * TileDim + threadIdx.x; // Grid reduce Real tsum = Real(0); for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) { // Load from B, transpose the block and store in shared mem if (jb < dA.rows) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ib + i < dA.cols) { smem.trans[threadIdx.x][threadIdx.y + i] = B[(ib + i) * B_stride + jb]; } } } __syncthreads(); // Load from A, sum up the product. if (ja < dA.cols) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ia + i < dA.rows) { tsum += A[(ia + i) * dA.stride + ja] * smem.trans[threadIdx.y + i][threadIdx.x]; } } } __syncthreads(); ia += grid_height; jb += grid_height; } smem.sum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) smem.sum[tid] += smem.sum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem.sum[tid] += smem.sum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0]; } } // _trace_mat_mat_trans reduce the partial sum to value[blockIdx.y * gridDim.x + blockIdx.x] template<typename Real> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { __shared__ Real ssum[CU1DBLOCK]; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; // linear thread id; const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; const int32_cuda grid_height = gridDim.y * blockDim.y; int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; // Grid reduce Real tsum = Real(0); if (j < dA.cols) { while (i < dA.rows) { tsum += A[i * dA.stride + j] * B[i * B_stride + j]; i += grid_height; } } ssum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0]; } } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx < v_dim) { Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; } // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0 && v_idx < v_dim) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } enum EnumReduceType { MAX, MIN, SUM }; template<EnumReduceType ReduceType, typename Real> struct ReduceOperation { __device__ __forceinline__ Real InitValue() { return Real(0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return Real(0); } }; template<typename Real> struct ReduceOperation<MAX, Real> { __device__ __forceinline__ Real InitValue() { return Real(-1.0 / 0.0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return max(a, b); } }; template<typename Real> struct ReduceOperation<MIN, Real> { __device__ __forceinline__ Real InitValue() { return Real(1.0 / 0.0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return min(a, b); } }; template<typename Real> struct ReduceOperation<SUM, Real> { __device__ __forceinline__ Real InitValue() { return Real(0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return a + b; } }; // Vector reduce. template<EnumReduceType ReduceType, typename Real> __global__ static void _vec_reduce(const Real* v, Real* result, const int dim, const int inc) { ReduceOperation<ReduceType, Real> reduce; __shared__ Real sdata[CU1DBLOCK]; Real tdata = reduce.InitValue(); const int vec_len = dim * inc; const int grid_stride = gridDim.x * blockDim.x * inc; int i = (blockIdx.x * blockDim.x + threadIdx.x) * inc; // Grid reduce. Loop over the whole vector v. for (; i < vec_len; i += grid_stride) tdata = reduce(tdata, v[i]); sdata[threadIdx.x] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (threadIdx.x < shift) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (threadIdx.x < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); } // Output to vector result. if (threadIdx.x == 0) result[blockIdx.x] = sdata[0]; } // Reduce a matrix 'mat' to a column vector 'result' template<EnumReduceType ReduceType, typename Real> __global__ static void _reduce_mat_cols(Real *result, const Real *mat, const MatrixDim d) { ReduceOperation<ReduceType, Real> reduce; __shared__ Real sdata[CU1DBLOCK]; const int i = blockIdx.x; const int row_start = i * d.stride; Real tdata = reduce.InitValue(); for (int j = threadIdx.x; j < d.cols; j += CU1DBLOCK) { tdata = reduce(tdata, mat[row_start + j]); } sdata[threadIdx.x] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (threadIdx.x < shift) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (threadIdx.x < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); } // Output to vector result. if (threadIdx.x == 0) result[i] = sdata[0]; } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + reorder[i]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + index; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[j], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = reorder[j] * src_stride + i; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; const Real *pointer = src[j]; if (pointer != NULL) { dst[dst_index] = pointer[i]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { Real *pointer = dst[j]; if (pointer != NULL) { pointer[i] = src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (src[j] != NULL) { dst[dst_index] += alpha * src[j][i]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { if (dst[j] != NULL) { dst[j][i] += alpha * src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0 ? 1.0 : 0.0); y[dst_index] = res; } } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // if (i < dim.cols && j < dim.rows) { int dst_index = i + j * dim.stride, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, MatrixDim d) { const int32_cuda i = blockIdx.x; const int32_cuda base = i * d.stride; const int32_cuda tid = threadIdx.x; __shared__ Real smax[CU1DBLOCK]; __shared__ int32_cuda sidx[CU1DBLOCK]; Real tmax = -1e20; int32_cuda tidx = -1; // Loop over blocks for coalesced memory access. for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) { const Real val = mat[base + j]; if (val > tmax) { tmax = val; tidx = j; } } smax[tid] = tmax; sidx[tid] = tidx; // Parallel reduce #pragma unroll for (int32_cuda num_working_threads = CU1DBLOCK / 2; num_working_threads >= warpSize; num_working_threads >>= 1) { __syncthreads(); if (tid < num_working_threads) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } // Warp reduce without __syncthreads() // (note.: synchronizes implicitly within a warp at the multiprocessor) if (tid < warpSize / 2) { #pragma unroll for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0; num_working_threads >>= 1) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } if (tid == 0) { if (vec_val) { vec_val[i] = smax[0]; } vec_id[i] = sidx[0]; } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { hipLaunchKernelGGL(( _reduce_mat_cols<MAX>) , dim3(Gr),dim3(Bl), 0, 0, result,mat,d); } void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { hipLaunchKernelGGL(( _reduce_mat_cols<MIN>) , dim3(Gr),dim3(Bl), 0, 0, result,mat,d); } void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { hipLaunchKernelGGL(( _reduce_mat_cols<SUM>) , dim3(Gr),dim3(Bl), 0, 0, result,mat,d); } void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_reduce<MIN>), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_reduce<MAX>), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat<32>), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_reduce<SUM>), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_heaviside (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { hipLaunchKernelGGL(( _reduce_mat_cols<MAX>) , dim3(Gr),dim3(Bl), 0, 0, result,mat,d); } void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { hipLaunchKernelGGL(( _reduce_mat_cols<MIN>) , dim3(Gr),dim3(Bl), 0, 0, result,mat,d); } void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { hipLaunchKernelGGL(( _reduce_mat_cols<SUM>) , dim3(Gr),dim3(Bl), 0, 0, result,mat,d); } void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_reduce<MIN>), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_reduce<MAX>), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat<32>), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_reduce<SUM>), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_heaviside (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
354f66b3adcae1418f4a4910b5235a6f5bb83c34.cu
// cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride + i * mat2_col_stride; if (i < mat_dim.cols && j < mat_dim.rows) { mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dmat.cols && j < dmat.rows) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = j * dmat.stride + i; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { // we interpret these indexes oppositely from normal, but it doesn't // matter as it's invoked in a symmetric way. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index. int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.cols && j < d_out.rows) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<int TileDim, typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { // Use shared meme to achieve both coalesced memory reading and writing // '+1' to avoid bank conflict when reading sbuf __shared__ Real sbuf[TileDim][TileDim + 1]; const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride; int32_cuda index_in = i_in * d_in.stride + j_in; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_in + i < d_in.rows && j_in < d_in.cols) { sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]); } index_in += tile_stride_in; } __syncthreads(); // Grid is transposed, but block is not yet. // Warp (blockDim.x) is always along the row-dim. const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y; const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x; const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride; int32_cuda index_out = i_out * d_out.stride + j_out; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_out + i < d_out.rows && j_out < d_out.cols) { // block is tranposed when reading sbuf mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i]; } index_out += tile_stride_out; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; if (i < d.rows) { const int32_cuda start = i * d.stride; const Real scale = Real(1) / vec_div[i]; const int32_cuda grid_stride = blockDim.x * gridDim.x; for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j += grid_stride) { mat[start + j] *= scale; } } } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride + j * mat2_row_stride; if (j < mat_dim.rows && i < mat_dim.cols) mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index]; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index. int j = blockIdx.y * blockDim.y + threadIdx.y; // row index. if (i < d.cols && j < d.rows) { int index = i + j * d.stride; m_out[index] = v_in[i]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (float) v_in[i]; } } // _trace_mat_mat reduce the partial sum to value[blockIdx.y * gridDim.x + blockIdx.x] // It use shared mem to transpose matrix B to ensure coalesced memory access template<int TileDim, typename Real> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { // Reuse shared mem and make indexing easier. "+1" to avoid bank conflict __shared__ union { Real trans[TileDim][TileDim + 1]; Real sum[CU1DBLOCK]; } smem; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; // linear thread id; const int32_cuda grid_height = gridDim.y * TileDim; const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x; const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y; int32_cuda ia = blockIdx.y * TileDim + threadIdx.y; int32_cuda jb = blockIdx.y * TileDim + threadIdx.x; // Grid reduce Real tsum = Real(0); for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) { // Load from B, transpose the block and store in shared mem if (jb < dA.rows) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ib + i < dA.cols) { smem.trans[threadIdx.x][threadIdx.y + i] = B[(ib + i) * B_stride + jb]; } } } __syncthreads(); // Load from A, sum up the product. if (ja < dA.cols) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ia + i < dA.rows) { tsum += A[(ia + i) * dA.stride + ja] * smem.trans[threadIdx.y + i][threadIdx.x]; } } } __syncthreads(); ia += grid_height; jb += grid_height; } smem.sum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) smem.sum[tid] += smem.sum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem.sum[tid] += smem.sum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0]; } } // _trace_mat_mat_trans reduce the partial sum to value[blockIdx.y * gridDim.x + blockIdx.x] template<typename Real> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { __shared__ Real ssum[CU1DBLOCK]; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; // linear thread id; const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; const int32_cuda grid_height = gridDim.y * blockDim.y; int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; // Grid reduce Real tsum = Real(0); if (j < dA.cols) { while (i < dA.rows) { tsum += A[i * dA.stride + j] * B[i * B_stride + j]; i += grid_height; } } ssum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0]; } } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx < v_dim) { Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; } // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0 && v_idx < v_dim) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } enum EnumReduceType { MAX, MIN, SUM }; template<EnumReduceType ReduceType, typename Real> struct ReduceOperation { __device__ __forceinline__ Real InitValue() { return Real(0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return Real(0); } }; template<typename Real> struct ReduceOperation<MAX, Real> { __device__ __forceinline__ Real InitValue() { return Real(-1.0 / 0.0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return max(a, b); } }; template<typename Real> struct ReduceOperation<MIN, Real> { __device__ __forceinline__ Real InitValue() { return Real(1.0 / 0.0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return min(a, b); } }; template<typename Real> struct ReduceOperation<SUM, Real> { __device__ __forceinline__ Real InitValue() { return Real(0); } __device__ __forceinline__ Real operator()(const Real& a, const Real& b) { return a + b; } }; // Vector reduce. template<EnumReduceType ReduceType, typename Real> __global__ static void _vec_reduce(const Real* v, Real* result, const int dim, const int inc) { ReduceOperation<ReduceType, Real> reduce; __shared__ Real sdata[CU1DBLOCK]; Real tdata = reduce.InitValue(); const int vec_len = dim * inc; const int grid_stride = gridDim.x * blockDim.x * inc; int i = (blockIdx.x * blockDim.x + threadIdx.x) * inc; // Grid reduce. Loop over the whole vector v. for (; i < vec_len; i += grid_stride) tdata = reduce(tdata, v[i]); sdata[threadIdx.x] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (threadIdx.x < shift) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (threadIdx.x < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); } // Output to vector result. if (threadIdx.x == 0) result[blockIdx.x] = sdata[0]; } // Reduce a matrix 'mat' to a column vector 'result' template<EnumReduceType ReduceType, typename Real> __global__ static void _reduce_mat_cols(Real *result, const Real *mat, const MatrixDim d) { ReduceOperation<ReduceType, Real> reduce; __shared__ Real sdata[CU1DBLOCK]; const int i = blockIdx.x; const int row_start = i * d.stride; Real tdata = reduce.InitValue(); for (int j = threadIdx.x; j < d.cols; j += CU1DBLOCK) { tdata = reduce(tdata, mat[row_start + j]); } sdata[threadIdx.x] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (threadIdx.x < shift) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (threadIdx.x < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) sdata[threadIdx.x] = reduce(sdata[threadIdx.x], sdata[threadIdx.x + shift]); } // Output to vector result. if (threadIdx.x == 0) result[i] = sdata[0]; } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + reorder[i]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + index; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[j], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = reorder[j] * src_stride + i; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; const Real *pointer = src[j]; if (pointer != NULL) { dst[dst_index] = pointer[i]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { Real *pointer = dst[j]; if (pointer != NULL) { pointer[i] = src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (src[j] != NULL) { dst[dst_index] += alpha * src[j][i]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { if (dst[j] != NULL) { dst[j][i] += alpha * src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0 ? 1.0 : 0.0); y[dst_index] = res; } } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // if (i < dim.cols && j < dim.rows) { int dst_index = i + j * dim.stride, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, MatrixDim d) { const int32_cuda i = blockIdx.x; const int32_cuda base = i * d.stride; const int32_cuda tid = threadIdx.x; __shared__ Real smax[CU1DBLOCK]; __shared__ int32_cuda sidx[CU1DBLOCK]; Real tmax = -1e20; int32_cuda tidx = -1; // Loop over blocks for coalesced memory access. for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) { const Real val = mat[base + j]; if (val > tmax) { tmax = val; tidx = j; } } smax[tid] = tmax; sidx[tid] = tidx; // Parallel reduce #pragma unroll for (int32_cuda num_working_threads = CU1DBLOCK / 2; num_working_threads >= warpSize; num_working_threads >>= 1) { __syncthreads(); if (tid < num_working_threads) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } // Warp reduce without __syncthreads() // (note.: synchronizes implicitly within a warp at the multiprocessor) if (tid < warpSize / 2) { #pragma unroll for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0; num_working_threads >>= 1) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } if (tid == 0) { if (vec_val) { vec_val[i] = smax[0]; } vec_id[i] = sidx[0]; } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { _reduce_mat_cols<MAX> <<<Gr,Bl>>>(result,mat,d); } void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { _reduce_mat_cols<MIN> <<<Gr,Bl>>>(result,mat,d); } void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { _reduce_mat_cols<SUM> <<<Gr,Bl>>>(result,mat,d); } void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim, int inc) { _vec_reduce<MIN><<<Gr,Bl>>>(v, value, dim, inc); } void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim, int inc) { _vec_reduce<MAX><<<Gr,Bl>>>(v, value, dim, inc); } void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat<32><<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { _vec_reduce<SUM><<<Gr,Bl>>>(v, value, dim, inc); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_heaviside (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _heaviside<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { _copy_from_sp<<<Gr,Bl>>>(x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { _reduce_mat_cols<MAX> <<<Gr,Bl>>>(result,mat,d); } void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { _reduce_mat_cols<MIN> <<<Gr,Bl>>>(result,mat,d); } void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { _reduce_mat_cols<SUM> <<<Gr,Bl>>>(result,mat,d); } void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim, int inc) { _vec_reduce<MIN><<<Gr,Bl>>>(v, value, dim, inc); } void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim, int inc) { _vec_reduce<MAX><<<Gr,Bl>>>(v, value, dim, inc); } void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat<32><<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { _vec_reduce<SUM><<<Gr,Bl>>>(v,value,dim,inc); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_heaviside (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _heaviside<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { _copy_from_sp<<<Gr,Bl>>>(x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32><<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32><<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32><<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32><<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
03a54be84ffa2156baaa7affff84f2edf6df9ce4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DRAW_WINDOW 0 #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <Windows.h> typedef char unsigned uint8_t; typedef struct rgb { uint8_t r, g, b; } rgb_t; __global__ void calculate_mandelbrot(int width, int height, double scale, rgb_t **row_ptrs) { int const num_shades = 17; rgb_t const mapping[num_shades] = { { 66, 30, 15 },{ 25, 7, 26 },{ 9, 1, 47 },{ 4, 4, 73 },{ 0, 7, 100 }, { 12, 44, 138 },{ 24, 82, 177 },{ 57, 125, 209 },{ 134, 181, 229 },{ 211, 236, 248 }, { 241, 233, 191 },{ 248, 201, 95 },{ 255, 170, 0 },{ 204, 128, 0 },{ 153, 87, 0 }, { 106, 52, 3 } }; int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; double cx = -.6, cy = 0; double y = (i - height / 2) * scale + cy; rgb_t *px = row_ptrs[i]; px += j; double x = (j - width / 2) * scale + cx; double zx, zy, zx2, zy2; uint8_t iter = 0; zx = hypot(x - .25, y); if (x < zx - 2 * zx * zx + .25) {iter = 0xFF;} if ((x + 1)*(x + 1) + y * y < 1 / 16) {iter = 0xFF;} zx = zy = zx2 = zy2 = 0; do { zy = 2 * zx * zy + y; zx = zx2 - zy2 + x; zx2 = zx * zx; zy2 = zy * zy; } while ((iter++ < 0xFF) && (zx2 + zy2 < 4)); px->r = iter; px->g = iter; px->b = iter; if (px->r == 0xFF || px->r == 0) { px->r = 0; px->g = 0; px->b = 0; } else { uint8_t uc = px->r % num_shades; *px = mapping[uc]; } } __global__ void setRows(rgb_t *img_data, rgb_t **row_ptrs, int height, int width) { row_ptrs[0] = img_data; for (int i = 1; i < height; ++i) { row_ptrs[i] = row_ptrs[i - 1] + width; } } #if DRAW_WINDOW int __stdcall WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) { int const width = 1024; int const height = 1024; #else int main(int argc, char **argv) { int const width = (argc > 1) ? atoi(argv[1]) : 4096 / 4; int const height = (argc > 2) ? atoi(argv[2]) : 4096 / 4; #endif rgb_t **cpu_row_ptrs, *cpu_img_data; hipError_t err; double scale = 1.0 / (width / 4); size_t img_data_size = sizeof(rgb_t) * width *height; size_t row_ptrs_size = sizeof(rgb_t *) * height; // // Setup memory. // { cpu_img_data = (rgb_t *)malloc(img_data_size); cpu_row_ptrs = (rgb_t **)malloc(row_ptrs_size); cpu_row_ptrs[0] = cpu_img_data; for (int i = 1; i < height; ++i) { cpu_row_ptrs[i] = cpu_row_ptrs[i - 1] + width; } } // // Calculate mandelbrot. // { LARGE_INTEGER cpu_start; QueryPerformanceCounter(&cpu_start); hipEvent_t start, stop; err = hipEventCreate(&start); assert(err == hipSuccess); err = hipEventCreate(&stop); assert(err == hipSuccess); err = hipEventRecord(start); assert(err == hipSuccess); rgb_t *gpu_img_data, **gpu_row_ptrs; err = hipMalloc((void **)&gpu_img_data, img_data_size); assert(err == hipSuccess); err = hipMalloc((void **)&gpu_row_ptrs, row_ptrs_size); assert(err == hipSuccess); // Set the row information on GPU. setRows << <1, 1 >> >(gpu_img_data, gpu_row_ptrs, height, width); int minGridSize = 0, blockSize = 0; int dataLength = width * height; err = hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, calculate_mandelbrot, 0, 0); assert(err == hipSuccess); int blocks = (int)(pow(2, ceil(log(sqrt(blockSize)) / log(2)))); int grid = (int)((sqrt(dataLength) + blocks - 1) / blocks); calculate_mandelbrot << <dim3(grid, grid), dim3(blocks, blocks) >> >(width, height, scale, gpu_row_ptrs); err = hipMemcpy(cpu_img_data, gpu_img_data, sizeof(rgb_t) * width * height, hipMemcpyDeviceToHost); assert(err == hipSuccess); err = hipFree(gpu_row_ptrs); assert(err == hipSuccess); err = hipFree(gpu_img_data); assert(err == hipSuccess); err = hipEventRecord(stop); assert(err == hipSuccess); err = hipEventSynchronize(stop); assert(err == hipSuccess); float cuda_ms = 0; err = hipEventElapsedTime(&cuda_ms, start, stop); assert(err == hipSuccess); LARGE_INTEGER cpu_end; QueryPerformanceCounter(&cpu_end); printf("Cuda Timer: %fms\nCPU timer: %llums", cuda_ms, (cpu_end.QuadPart - cpu_start.QuadPart) / 1000); } // // Write to file. // { FILE *fp; fp = fopen("out_mandelbrot.ppm", "w"); fprintf(fp, "P6\n%d %d\n255\n", width, height); for (int i = height - 1; (i >= 0); --i) { fwrite(cpu_row_ptrs[i], 1, width * sizeof(rgb_t), fp); } } #if DRAW_WINDOW // // Draw window. // { WNDCLASS wc; memset(&wc, 0, sizeof(wc)); wc.lpfnWndProc = DefWindowProc; wc.hInstance = hInstance; wc.lpszClassName = "Mandelbrot"; RegisterClass(&wc); HWND win = CreateWindowA(wc.lpszClassName, "Mandelbrot", WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, width, height, 0, 0, hInstance, 0); ShowWindow(win, nCmdShow); HDC hdc = GetDC(win); for (int i = 0; i < width; ++i) { for (int j = 0; j < height; ++j) { rgb_t *cur = cpu_row_ptrs[i] + j; SetPixel(hdc, i, j, RGB(cur->r, cur->g, cur->b)); } } ReleaseDC(win, hdc); bool quit = false; while (!quit) { MSG msg; while (PeekMessageA(&msg, win, 0, 0, PM_REMOVE)) { if (msg.message == WM_QUIT) quit = true; TranslateMessage(&msg); DispatchMessage(&msg); } } } #endif return(0); }
03a54be84ffa2156baaa7affff84f2edf6df9ce4.cu
#define DRAW_WINDOW 0 #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <Windows.h> typedef char unsigned uint8_t; typedef struct rgb { uint8_t r, g, b; } rgb_t; __global__ void calculate_mandelbrot(int width, int height, double scale, rgb_t **row_ptrs) { int const num_shades = 17; rgb_t const mapping[num_shades] = { { 66, 30, 15 },{ 25, 7, 26 },{ 9, 1, 47 },{ 4, 4, 73 },{ 0, 7, 100 }, { 12, 44, 138 },{ 24, 82, 177 },{ 57, 125, 209 },{ 134, 181, 229 },{ 211, 236, 248 }, { 241, 233, 191 },{ 248, 201, 95 },{ 255, 170, 0 },{ 204, 128, 0 },{ 153, 87, 0 }, { 106, 52, 3 } }; int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; double cx = -.6, cy = 0; double y = (i - height / 2) * scale + cy; rgb_t *px = row_ptrs[i]; px += j; double x = (j - width / 2) * scale + cx; double zx, zy, zx2, zy2; uint8_t iter = 0; zx = hypot(x - .25, y); if (x < zx - 2 * zx * zx + .25) {iter = 0xFF;} if ((x + 1)*(x + 1) + y * y < 1 / 16) {iter = 0xFF;} zx = zy = zx2 = zy2 = 0; do { zy = 2 * zx * zy + y; zx = zx2 - zy2 + x; zx2 = zx * zx; zy2 = zy * zy; } while ((iter++ < 0xFF) && (zx2 + zy2 < 4)); px->r = iter; px->g = iter; px->b = iter; if (px->r == 0xFF || px->r == 0) { px->r = 0; px->g = 0; px->b = 0; } else { uint8_t uc = px->r % num_shades; *px = mapping[uc]; } } __global__ void setRows(rgb_t *img_data, rgb_t **row_ptrs, int height, int width) { row_ptrs[0] = img_data; for (int i = 1; i < height; ++i) { row_ptrs[i] = row_ptrs[i - 1] + width; } } #if DRAW_WINDOW int __stdcall WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) { int const width = 1024; int const height = 1024; #else int main(int argc, char **argv) { int const width = (argc > 1) ? atoi(argv[1]) : 4096 / 4; int const height = (argc > 2) ? atoi(argv[2]) : 4096 / 4; #endif rgb_t **cpu_row_ptrs, *cpu_img_data; cudaError_t err; double scale = 1.0 / (width / 4); size_t img_data_size = sizeof(rgb_t) * width *height; size_t row_ptrs_size = sizeof(rgb_t *) * height; // // Setup memory. // { cpu_img_data = (rgb_t *)malloc(img_data_size); cpu_row_ptrs = (rgb_t **)malloc(row_ptrs_size); cpu_row_ptrs[0] = cpu_img_data; for (int i = 1; i < height; ++i) { cpu_row_ptrs[i] = cpu_row_ptrs[i - 1] + width; } } // // Calculate mandelbrot. // { LARGE_INTEGER cpu_start; QueryPerformanceCounter(&cpu_start); cudaEvent_t start, stop; err = cudaEventCreate(&start); assert(err == cudaSuccess); err = cudaEventCreate(&stop); assert(err == cudaSuccess); err = cudaEventRecord(start); assert(err == cudaSuccess); rgb_t *gpu_img_data, **gpu_row_ptrs; err = cudaMalloc((void **)&gpu_img_data, img_data_size); assert(err == cudaSuccess); err = cudaMalloc((void **)&gpu_row_ptrs, row_ptrs_size); assert(err == cudaSuccess); // Set the row information on GPU. setRows << <1, 1 >> >(gpu_img_data, gpu_row_ptrs, height, width); int minGridSize = 0, blockSize = 0; int dataLength = width * height; err = cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, calculate_mandelbrot, 0, 0); assert(err == cudaSuccess); int blocks = (int)(pow(2, ceil(log(sqrt(blockSize)) / log(2)))); int grid = (int)((sqrt(dataLength) + blocks - 1) / blocks); calculate_mandelbrot << <dim3(grid, grid), dim3(blocks, blocks) >> >(width, height, scale, gpu_row_ptrs); err = cudaMemcpy(cpu_img_data, gpu_img_data, sizeof(rgb_t) * width * height, cudaMemcpyDeviceToHost); assert(err == cudaSuccess); err = cudaFree(gpu_row_ptrs); assert(err == cudaSuccess); err = cudaFree(gpu_img_data); assert(err == cudaSuccess); err = cudaEventRecord(stop); assert(err == cudaSuccess); err = cudaEventSynchronize(stop); assert(err == cudaSuccess); float cuda_ms = 0; err = cudaEventElapsedTime(&cuda_ms, start, stop); assert(err == cudaSuccess); LARGE_INTEGER cpu_end; QueryPerformanceCounter(&cpu_end); printf("Cuda Timer: %fms\nCPU timer: %llums", cuda_ms, (cpu_end.QuadPart - cpu_start.QuadPart) / 1000); } // // Write to file. // { FILE *fp; fp = fopen("out_mandelbrot.ppm", "w"); fprintf(fp, "P6\n%d %d\n255\n", width, height); for (int i = height - 1; (i >= 0); --i) { fwrite(cpu_row_ptrs[i], 1, width * sizeof(rgb_t), fp); } } #if DRAW_WINDOW // // Draw window. // { WNDCLASS wc; memset(&wc, 0, sizeof(wc)); wc.lpfnWndProc = DefWindowProc; wc.hInstance = hInstance; wc.lpszClassName = "Mandelbrot"; RegisterClass(&wc); HWND win = CreateWindowA(wc.lpszClassName, "Mandelbrot", WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, width, height, 0, 0, hInstance, 0); ShowWindow(win, nCmdShow); HDC hdc = GetDC(win); for (int i = 0; i < width; ++i) { for (int j = 0; j < height; ++j) { rgb_t *cur = cpu_row_ptrs[i] + j; SetPixel(hdc, i, j, RGB(cur->r, cur->g, cur->b)); } } ReleaseDC(win, hdc); bool quit = false; while (!quit) { MSG msg; while (PeekMessageA(&msg, win, 0, 0, PM_REMOVE)) { if (msg.message == WM_QUIT) quit = true; TranslateMessage(&msg); DispatchMessage(&msg); } } } #endif return(0); }
e8cbb216ae0a216958a904639eb42504301171b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include "scanLargeArray_kernel.cu" #include <assert.h> #include "GPUPrimitive_Def.cu" //#include <cutil.h> #include "scan.cuh" # define CUT_CHECK_ERROR(errorMessage) { \ hipError_t err = hipGetLastError(); \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ } inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; } inline int floorPow2(int n) { #ifdef WIN32 // method 2 return 1 << (int)logb((float)n); #else // method 1 // int nf = (int)n; // return 1 << (((*(int*)&nf) >> 23) - 127); int exp; frexp((float)n, &exp); return 1 << (exp - 1); #endif } //#define BLOCK_SIZE 256 int** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; void preallocBlockSums(unsigned int maxNumElements) { printf( "numThread: %d\n", BLOCK_SIZE ); assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); g_scanBlockSums = (int**) malloc(level * sizeof(int*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { GPUMALLOC((void**) &g_scanBlockSums[level++], numBlocks * sizeof(int)); } numElts = numBlocks; } while (numElts > 1); CUT_CHECK_ERROR("preallocBlockSums"); } void deallocBlockSums() { for (int i = 0; i < g_numLevelsAllocated; i++) { hipFree(g_scanBlockSums[i]); } CUT_CHECK_ERROR("deallocBlockSums"); free((void**)g_scanBlockSums); g_scanBlockSums = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void saven_initialPrefixSum(unsigned int maxNumElements) { if(g_numEltsAllocated == 0) preallocBlockSums(maxNumElements); else if(g_numEltsAllocated<maxNumElements) { deallocBlockSums(); preallocBlockSums(maxNumElements); } } void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) { assert(g_numEltsAllocated >= numElements); } #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // make sure there are no CUDA errors before we start CUT_CHECK_ERROR("prescanArrayRecursive before kernels"); int* d_data; // execute the scan if (numBlocks > 1) { #ifdef SHARED_MEM hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0); #else GPUMALLOC( (void**)&d_data, sharedMemSize*grid.x ); hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), 0, 0, d_data, outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0, sharedMemSize); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescanWithBlockSums"); if (np2LastBlock) { #ifdef SHARED_MEM hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0, outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); #else GPUMALLOC( (void**)&d_data, sharedMemLastBlock*1 ); hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), 0, 0, d_data, outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock, sharedMemLastBlock ); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescanNP2WithBlockSums"); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads) , 0, 0, outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); CUT_CHECK_ERROR("uniformAdd"); if (np2LastBlock) { hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); CUT_CHECK_ERROR("uniformAdd"); } } else if (isPowerOfTwo(numElements)) { #ifdef SHARED_MEM hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numThreads * 2, 0, 0); #else GPUMALLOC( (void**)&d_data, sharedMemSize*grid.x ); hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads) , 0, 0, d_data, outArray, inArray, 0, numThreads * 2, 0, 0, sharedMemSize); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescan"); } else { #ifdef SHARED_MEM hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numElements, 0, 0); #else GPUMALLOC( (void**)&d_data, sharedMemSize*grid.x ); hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), 0, 0, d_data, outArray, inArray, 0, numElements, 0, 0, sharedMemSize ); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescanNP2"); } } void prescanArray(int *outArray, int *inArray, int numElements) { #ifdef OUTPUT_INFO #ifdef COALESCED printf( "YES, COALESCED, SCAN\n" ); #else printf( "NO COALESCED, SCAN\n" ); #endif #ifdef SHARED_MEM printf( "YES, SHARED MEMORY, SCAN\n" ); #else printf( "NO SHARED MEMORY, SCAN\n" ); #endif #endif prescanArrayRecursive(outArray, inArray, numElements, 0); } #endif // _PRESCAN_CU_
e8cbb216ae0a216958a904639eb42504301171b3.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include "scanLargeArray_kernel.cu" #include <assert.h> #include "GPUPrimitive_Def.cu" //#include <cutil.h> #include "scan.cuh" # define CUT_CHECK_ERROR(errorMessage) { \ cudaError_t err = cudaGetLastError(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ } inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; } inline int floorPow2(int n) { #ifdef WIN32 // method 2 return 1 << (int)logb((float)n); #else // method 1 // int nf = (int)n; // return 1 << (((*(int*)&nf) >> 23) - 127); int exp; frexp((float)n, &exp); return 1 << (exp - 1); #endif } //#define BLOCK_SIZE 256 int** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; void preallocBlockSums(unsigned int maxNumElements) { printf( "numThread: %d\n", BLOCK_SIZE ); assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); g_scanBlockSums = (int**) malloc(level * sizeof(int*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { GPUMALLOC((void**) &g_scanBlockSums[level++], numBlocks * sizeof(int)); } numElts = numBlocks; } while (numElts > 1); CUT_CHECK_ERROR("preallocBlockSums"); } void deallocBlockSums() { for (int i = 0; i < g_numLevelsAllocated; i++) { cudaFree(g_scanBlockSums[i]); } CUT_CHECK_ERROR("deallocBlockSums"); free((void**)g_scanBlockSums); g_scanBlockSums = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void saven_initialPrefixSum(unsigned int maxNumElements) { if(g_numEltsAllocated == 0) preallocBlockSums(maxNumElements); else if(g_numEltsAllocated<maxNumElements) { deallocBlockSums(); preallocBlockSums(maxNumElements); } } void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) { assert(g_numEltsAllocated >= numElements); } #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // make sure there are no CUDA errors before we start CUT_CHECK_ERROR("prescanArrayRecursive before kernels"); int* d_data; // execute the scan if (numBlocks > 1) { #ifdef SHARED_MEM prescan<true, false><<< grid, threads, sharedMemSize >>>(outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0); #else GPUMALLOC( (void**)&d_data, sharedMemSize*grid.x ); prescan<true, false><<< grid, threads>>>(d_data, outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0, sharedMemSize); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescanWithBlockSums"); if (np2LastBlock) { #ifdef SHARED_MEM prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); #else GPUMALLOC( (void**)&d_data, sharedMemLastBlock*1 ); prescan<true, true><<< 1, numThreadsLastBlock>>> (d_data, outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock, sharedMemLastBlock ); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescanNP2WithBlockSums"); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); uniformAdd<<< grid, threads >>>(outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); CUT_CHECK_ERROR("uniformAdd"); if (np2LastBlock) { uniformAdd<<< 1, numThreadsLastBlock >>>(outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); CUT_CHECK_ERROR("uniformAdd"); } } else if (isPowerOfTwo(numElements)) { #ifdef SHARED_MEM prescan<false, false><<< grid, threads, sharedMemSize >>>(outArray, inArray, 0, numThreads * 2, 0, 0); #else GPUMALLOC( (void**)&d_data, sharedMemSize*grid.x ); prescan<false, false><<< grid, threads >>>(d_data, outArray, inArray, 0, numThreads * 2, 0, 0, sharedMemSize); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescan"); } else { #ifdef SHARED_MEM prescan<false, true><<< grid, threads, sharedMemSize >>>(outArray, inArray, 0, numElements, 0, 0); #else GPUMALLOC( (void**)&d_data, sharedMemSize*grid.x ); prescan<false, true><<< grid, threads>>>(d_data, outArray, inArray, 0, numElements, 0, 0, sharedMemSize ); GPUFREE( d_data ); #endif CUT_CHECK_ERROR("prescanNP2"); } } void prescanArray(int *outArray, int *inArray, int numElements) { #ifdef OUTPUT_INFO #ifdef COALESCED printf( "YES, COALESCED, SCAN\n" ); #else printf( "NO COALESCED, SCAN\n" ); #endif #ifdef SHARED_MEM printf( "YES, SHARED MEMORY, SCAN\n" ); #else printf( "NO SHARED MEMORY, SCAN\n" ); #endif #endif prescanArrayRecursive(outArray, inArray, numElements, 0); } #endif // _PRESCAN_CU_
1d6799a6614ebd0873675827fce304372824f2df.hip
// !!! This is a file automatically generated by hipify!!! const char* QN_cuvec_rcsid = "$Header: /u/drspeech/repos/quicknet2/QN_cuvec.cu,v 1.1 2011/03/10 00:27:57 davidj Exp $"; /* Must include the config.h file first */ #include <QN_config.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "QN_types.h" #include "QN_cuvec.h" #include <hip/hip_runtime.h> #include <rocblas.h> __global__ void qn_devnv_copy_vf_vf(size_t len, const float* from, float* to) { size_t i; for (i=len; i!=0; i--) *to++ = *from++; } __global__ void qn_devnv_sub_vfvf_vf(size_t n, const float *in_vec1, const float *in_vec2, float *res_vec) { size_t i; for (i=n; i!=0; i--) (*res_vec++) = *(in_vec1++) - (*in_vec2++); } __global__ void qn_devnv_mul_vfvf_vf(size_t n, const float *in_vec1, const float *in_vec2, float *res_vec) { size_t i; for (i=n; i!=0; i--) (*res_vec++) = (*in_vec1++) * (*in_vec2++); } __global__ void qn_devnv_mulacc_vff_vf(size_t n, const float *in, float scale, float *acc) { size_t i; for (i=n; i!=0; i--) (*acc++) += scale * (*in++); } __global__ void qn_devnv_dsigmoid_vf_vf(size_t n, const float* in_vec, float* out_vec) { size_t i; for (i=n; i!=0; i--) { const float y = *in_vec++; *out_vec++ = (1.0f - y) * y; } } __global__ void qn_devnv_sigmoid_vf_vf(int n, const float* in_vec, float* out_vec) { size_t i; for (i=0; i<n; i++) { *out_vec++ = qn_devin_sigmoid_f_f(*in_vec++); } } __global__ void qn_kern_sub_vfvf_vf(int n, const float* in_vec1, const float* in_vec2, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = in_vec1[i] - in_vec2[i]; } __global__ void qn_kern_mul_vfvf_vf(int n, const float* in_vec1, const float* in_vec2, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = in_vec1[i] * in_vec2[i]; } __global__ void qn_kern_sigmoid_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = 1.0f/(1.0f + __expf(-in_vec[i])); } __global__ void qn_kern_tanh_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = tanh(in_vec[i]); } __global__ void qn_kern_dsigmoid_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) { const float y = in_vec[i]; out_vec[i] = (1.0f - y) * y; } } __global__ void qn_kern_dtanh_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) { const float y = in_vec[i]; out_vec[i] = (1.0f - y) * (1.0f + y); } } //cz277 - criteria, in_vec1 = y_i, in_vec2 = t_i __global__ void qn_kern_dxentropy_vf_vf(size_t n, const float* in_vec1, const float* in_vec2, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { float yi = in_vec1[i]; if (yi == 0) yi = QN_MinProb; out_vec[i] = - in_vec2[i] / yi; //out_vec[i] = in_vec2[i]; } } //cz277 - nonlinearity __global__ void qn_kern_dsoftmax_vf_vf(size_t n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { const float yi = in_vec[i]; out_vec[i] = (1 - yi) * yi; } } //cz277 - nonlinearity, x > 0 -> y > 0; x = 0 -> y = 0; x < 0 -> y < 0; __global__ void qn_kern_dsoftsign_vf_vf(size_t n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { float temp; const float yi = in_vec[i]; if (yi > 0) temp = yi - 1; else temp = yi + 1; out_vec[i] = temp * temp; } } //cz277 - nonlinearity __global__ void qn_kern_softsign_vf_vf(size_t n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { out_vec[i] = in_vec[i] / (1 + abs(in_vec[i])); } } __global__ void qn_kern_copy_vf_mf(int mat_height, int vec_len, const float*vec, float* mat) { int col = (blockIdx.x * blockDim.x) + threadIdx.x; if (col<vec_len) { int j; float val = vec[col]; float* top = &mat[col]; for (j=mat_height; j!=0; j--) { *top = val; top += vec_len; } } } //cz277 - fast softmax __global__ void qn_kern_fastcopy_vf_mf(int mat_height, int vec_len, const float*vec, float* mat, int matsize) { int matidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (matidx < matsize) { int vecidx = matidx % vec_len; float val = vec[vecidx]; float *ptr = &mat[matidx]; *ptr = val; } } // Kernel to sum columns in a matrix // Do each column sum in its own thread __global__ void qn_kern_sumcol_mf_vf(int rows, int cols, const float* in, float* res) { int col = (blockIdx.x * blockDim.x) + threadIdx.x; if (col < cols) { int j; const float* fromp = &in[col]; float* top = &res[col]; (*top) = (*fromp); fromp+=cols; for (j=rows-1; j!=0; j--) { (*top) += (*fromp); fromp+=cols; } } } //cz277 - fast softmax __global__ void qn_kern_sumrow_vf_f(int rows, int cols, const float* in, float* res) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int off = row * cols; const float *inptr = &in[off]; float *outptr = &res[off]; float sum = 0.0; for (int i = 0; i != cols; ++i) { sum += (*inptr); ++inptr; } (*outptr) = 1.0f / sum; } } __global__ void qn_kern_maxrow_vf_f(int rows, int cols, const float* in, float* res) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int off = row * cols; const float *inptr = &in[off]; float *outptr = &res[off]; float max = *inptr; ++inptr; for (int i = 1; i != cols; ++i) { if ((*inptr) > max) { max = (*inptr); } ++inptr; } (*outptr) = max; } } //cz277 - fast softmax __global__ void qn_kern_dualmax_ff_f(int cols, int size, int incr, const float *in_vec, float *out_vec) { int absidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (absidx < size) { int row = absidx / cols; int off = row * cols; int index = absidx - off; int den = 2 * incr; float *outptr = &out_vec[absidx]; const float *inptr = &in_vec[absidx]; if (index % den == 0) { float lhval = (*inptr); if (index + incr >= cols) { *outptr = lhval; } else { inptr += incr; float rhval = (*inptr); if (lhval < rhval) { *outptr = rhval; } else { *outptr = lhval; } } } } } //cz277 - fast softmax __global__ void qn_kern_exp_f_f(int rows, int cols, int size, const float* in_vec, const float *comp_cache, float* out_vec) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { int row = index / cols; int off = row * cols; float *outptr = &out_vec[index]; const float *inptr = &in_vec[index]; const float *compptr = &comp_cache[off]; (*outptr) = exp((*inptr) - (*compptr)); } } //cz277 - fast softmax __global__ void qn_kern_scale_f_f(int rows, int cols, int size, const float* in, const float *cache, float* res) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { int row = index / cols; int off = row * cols; float *outptr = &res[index]; const float *compptr = &cache[off]; const float *inptr = &in[index]; (*outptr) = (*compptr) * (*inptr); } } //cz277 - fast softmax __global__ void qn_kern_div_f_f(int rows, int cols, int size, const float* in, const float *cache, float* res) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { int row = index / cols; int off = row * cols; float *outptr = &res[index]; const float *compptr = &cache[off]; const float *inptr = &in[index]; (*outptr) = (*inptr) / (*compptr); } } //cz277 - fast softmax __global__ void qn_kern_tempsoftmax_mf_mf(int rows, int cols, float* in_vec, float* out_vec) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int i; const int index = row * cols; float* invec = &in_vec[index]; float* outvec = &out_vec[index]; float* inptr; float* outptr; // Now put exp(in-max) in out inptr = invec; //outptr = outvec; float sumexp = 0; for (i=cols; i!=0; i--) { sumexp += (*inptr); ++inptr; } // Now scale the output float scale = 1.0f/sumexp; outptr = outvec; *outptr = scale; /*inptr = invec; for (i=cols; i!=0; i--) { *inptr = (*inptr) * scale; ++inptr; }*/ } } //cz277 - fast softmax //cz277 - fast softmax __global__ void qn_kern_dualsum_ff_f(int cols, int size, int incr, const float *in_vec, float *out_vec) { int absidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (absidx < size) { int row = absidx / cols; int off = row * cols; int index = absidx - off; int den = 2 * incr; float *outptr = &out_vec[absidx]; const float *inptr = &in_vec[absidx]; if (index % den == 0) { float lhval = (*inptr); if (index + incr >= cols) { *outptr = lhval; } else { inptr += incr; //float rhval = (*inptr); *outptr = lhval + (*inptr); } } } } // Kernel for multisoftmax - several softmaxes at once // Do each softmax row in its own thread __global__ void qn_kern_multisoftmax_mf_mf(int rows, int cols, const float* in_vec, float* out_vec) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int i; const int index = row * cols; const float* invec = &in_vec[index]; float* outvec = &out_vec[index]; const float* inptr; float* outptr; // First find the max of each vector float max; inptr = invec; max = *inptr++; for (i=cols-1; i!=0; i--) { float val; val = *inptr++; if (val>max) max = val; } // Now put exp(in-max) in out inptr = invec; outptr = outvec; float sumexp = 0; for (i=cols; i!=0; i--) { float f, e; f = *inptr++; e = expf(f - max); *outptr++ = e; sumexp += e; } // Now scale the output float scale = 1.0f/sumexp; outptr = outvec; for (i=cols; i!=0; i--) { *outptr = (*outptr) * scale; outptr++; } } } __global__ void qn_devnv_softmax_vf_vf(int n, const float* in_vec, float* out_vec) { float max; float min; float sumexp = 0.0f; /* Sum of exponents */ float scale; /* 1/sum of exponents */ size_t i; qn_devin_maxmin_vf_ff(n, in_vec, &max, &min); /* Find constant bias. */ for (i=0; i<n; i++) { float f; /* Input value. */ float e; /* Exponent of current value. */ f = in_vec[i]; e = expf(f - max); out_vec[i] = e; sumexp += e; } scale = 1.0f/sumexp; for (i=0; i<n; i++) { out_vec[i] = out_vec[i] * scale; } } __global__ void qn_devnv_copy_vf_mf(size_t mat_height, size_t vec_len, const float* vec, float* mat) { size_t i, j; const float* vec_ptr; float* mat_ptr = mat; for (i=mat_height; i!=0; i--) { vec_ptr = vec; for (j=vec_len; j!=0; j--) { (*mat_ptr++) = (*vec_ptr++); } } } __global__ void qn_devnv_sumcol_mf_vf(size_t rows, size_t cols, const float* in, float* res) { const float *const res_end_b8p = res + (cols & ~7); const float *const res_end_p = res + cols; float* res_p; const float* in_p = in; size_t i; /* Initialize the result */ res_p = res; while(res_p != res_end_b8p) { res_p[0] = in_p[0]; res_p[1] = in_p[1]; res_p[2] = in_p[2]; res_p[3] = in_p[3]; res_p[4] = in_p[4]; res_p[5] = in_p[5]; res_p[6] = in_p[6]; res_p[7] = in_p[7]; res_p += 8; in_p += 8; } while (res_p != res_end_p) { (*res_p++) = (*in_p++); } /* The main loop */ for (i=1; i!=rows; i++) { res_p = res; while(res_p != res_end_b8p) { res_p[0] += in_p[0]; res_p[1] += in_p[1]; res_p[2] += in_p[2]; res_p[3] += in_p[3]; res_p[4] += in_p[4]; res_p[5] += in_p[5]; res_p[6] += in_p[6]; res_p[7] += in_p[7]; res_p += 8; in_p += 8; } while (res_p != res_end_p) { (*res_p++) += (*in_p++); } } } __global__ void qn_devnv_mul_mfmf_mf(size_t Sm,size_t Sk,size_t Sn, const float *A,const float *B,float *C) { size_t i,j,k; for (i=0;i<Sm;i++) { for (j=0;j<Sn;j++) { C[i*Sn+j] = 0.0f; for (k=0;k<Sk;k++) C[i*Sn+j] += A[i*Sk+k]*B[k*Sn+j]; } } } __global__ void qn_devnv_mulntacc_mfmf_mf(size_t Sm,size_t Sk,size_t Sn, const float *A,const float *B,float *C) { size_t i,j,k; for (i=0;i<Sm;i++) for (j=0;j<Sn;j++) for (k=0;k<Sk;k++) C[i*Sn+j] += A[i*Sk+k]*B[j*Sk+k]; } __global__ void qn_devnv_multnacc_fmfmf_mf(size_t Sk,size_t Sm,size_t Sn, float scale, const float *A,const float *B,float *C) { size_t i,j,k; float acc; for (i=0;i<Sm;i++) { for (j=0;j<Sn;j++) { acc = 0.0f; for (k=0;k<Sk;k++) acc += A[k*Sm+i]*B[k*Sn+j]; C[i*Sn+j] += acc * scale; } } } //cz277 - momentum __global__ void qn_kern_selfadd_vfvf_vf(int n, const float* in_vec1, float* in_vec2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) in_vec2[i] += in_vec1[i]; }
1d6799a6614ebd0873675827fce304372824f2df.cu
const char* QN_cuvec_rcsid = "$Header: /u/drspeech/repos/quicknet2/QN_cuvec.cu,v 1.1 2011/03/10 00:27:57 davidj Exp $"; /* Must include the config.h file first */ #include <QN_config.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "QN_types.h" #include "QN_cuvec.h" #include <cuda.h> #include <cublas.h> __global__ void qn_devnv_copy_vf_vf(size_t len, const float* from, float* to) { size_t i; for (i=len; i!=0; i--) *to++ = *from++; } __global__ void qn_devnv_sub_vfvf_vf(size_t n, const float *in_vec1, const float *in_vec2, float *res_vec) { size_t i; for (i=n; i!=0; i--) (*res_vec++) = *(in_vec1++) - (*in_vec2++); } __global__ void qn_devnv_mul_vfvf_vf(size_t n, const float *in_vec1, const float *in_vec2, float *res_vec) { size_t i; for (i=n; i!=0; i--) (*res_vec++) = (*in_vec1++) * (*in_vec2++); } __global__ void qn_devnv_mulacc_vff_vf(size_t n, const float *in, float scale, float *acc) { size_t i; for (i=n; i!=0; i--) (*acc++) += scale * (*in++); } __global__ void qn_devnv_dsigmoid_vf_vf(size_t n, const float* in_vec, float* out_vec) { size_t i; for (i=n; i!=0; i--) { const float y = *in_vec++; *out_vec++ = (1.0f - y) * y; } } __global__ void qn_devnv_sigmoid_vf_vf(int n, const float* in_vec, float* out_vec) { size_t i; for (i=0; i<n; i++) { *out_vec++ = qn_devin_sigmoid_f_f(*in_vec++); } } __global__ void qn_kern_sub_vfvf_vf(int n, const float* in_vec1, const float* in_vec2, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = in_vec1[i] - in_vec2[i]; } __global__ void qn_kern_mul_vfvf_vf(int n, const float* in_vec1, const float* in_vec2, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = in_vec1[i] * in_vec2[i]; } __global__ void qn_kern_sigmoid_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = 1.0f/(1.0f + __expf(-in_vec[i])); } __global__ void qn_kern_tanh_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) out_vec[i] = tanh(in_vec[i]); } __global__ void qn_kern_dsigmoid_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) { const float y = in_vec[i]; out_vec[i] = (1.0f - y) * y; } } __global__ void qn_kern_dtanh_vf_vf(int n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i<n) { const float y = in_vec[i]; out_vec[i] = (1.0f - y) * (1.0f + y); } } //cz277 - criteria, in_vec1 = y_i, in_vec2 = t_i __global__ void qn_kern_dxentropy_vf_vf(size_t n, const float* in_vec1, const float* in_vec2, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { float yi = in_vec1[i]; if (yi == 0) yi = QN_MinProb; out_vec[i] = - in_vec2[i] / yi; //out_vec[i] = in_vec2[i]; } } //cz277 - nonlinearity __global__ void qn_kern_dsoftmax_vf_vf(size_t n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { const float yi = in_vec[i]; out_vec[i] = (1 - yi) * yi; } } //cz277 - nonlinearity, x > 0 -> y > 0; x = 0 -> y = 0; x < 0 -> y < 0; __global__ void qn_kern_dsoftsign_vf_vf(size_t n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { float temp; const float yi = in_vec[i]; if (yi > 0) temp = yi - 1; else temp = yi + 1; out_vec[i] = temp * temp; } } //cz277 - nonlinearity __global__ void qn_kern_softsign_vf_vf(size_t n, const float* in_vec, float* out_vec) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) { out_vec[i] = in_vec[i] / (1 + abs(in_vec[i])); } } __global__ void qn_kern_copy_vf_mf(int mat_height, int vec_len, const float*vec, float* mat) { int col = (blockIdx.x * blockDim.x) + threadIdx.x; if (col<vec_len) { int j; float val = vec[col]; float* top = &mat[col]; for (j=mat_height; j!=0; j--) { *top = val; top += vec_len; } } } //cz277 - fast softmax __global__ void qn_kern_fastcopy_vf_mf(int mat_height, int vec_len, const float*vec, float* mat, int matsize) { int matidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (matidx < matsize) { int vecidx = matidx % vec_len; float val = vec[vecidx]; float *ptr = &mat[matidx]; *ptr = val; } } // Kernel to sum columns in a matrix // Do each column sum in its own thread __global__ void qn_kern_sumcol_mf_vf(int rows, int cols, const float* in, float* res) { int col = (blockIdx.x * blockDim.x) + threadIdx.x; if (col < cols) { int j; const float* fromp = &in[col]; float* top = &res[col]; (*top) = (*fromp); fromp+=cols; for (j=rows-1; j!=0; j--) { (*top) += (*fromp); fromp+=cols; } } } //cz277 - fast softmax __global__ void qn_kern_sumrow_vf_f(int rows, int cols, const float* in, float* res) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int off = row * cols; const float *inptr = &in[off]; float *outptr = &res[off]; float sum = 0.0; for (int i = 0; i != cols; ++i) { sum += (*inptr); ++inptr; } (*outptr) = 1.0f / sum; } } __global__ void qn_kern_maxrow_vf_f(int rows, int cols, const float* in, float* res) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int off = row * cols; const float *inptr = &in[off]; float *outptr = &res[off]; float max = *inptr; ++inptr; for (int i = 1; i != cols; ++i) { if ((*inptr) > max) { max = (*inptr); } ++inptr; } (*outptr) = max; } } //cz277 - fast softmax __global__ void qn_kern_dualmax_ff_f(int cols, int size, int incr, const float *in_vec, float *out_vec) { int absidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (absidx < size) { int row = absidx / cols; int off = row * cols; int index = absidx - off; int den = 2 * incr; float *outptr = &out_vec[absidx]; const float *inptr = &in_vec[absidx]; if (index % den == 0) { float lhval = (*inptr); if (index + incr >= cols) { *outptr = lhval; } else { inptr += incr; float rhval = (*inptr); if (lhval < rhval) { *outptr = rhval; } else { *outptr = lhval; } } } } } //cz277 - fast softmax __global__ void qn_kern_exp_f_f(int rows, int cols, int size, const float* in_vec, const float *comp_cache, float* out_vec) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { int row = index / cols; int off = row * cols; float *outptr = &out_vec[index]; const float *inptr = &in_vec[index]; const float *compptr = &comp_cache[off]; (*outptr) = exp((*inptr) - (*compptr)); } } //cz277 - fast softmax __global__ void qn_kern_scale_f_f(int rows, int cols, int size, const float* in, const float *cache, float* res) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { int row = index / cols; int off = row * cols; float *outptr = &res[index]; const float *compptr = &cache[off]; const float *inptr = &in[index]; (*outptr) = (*compptr) * (*inptr); } } //cz277 - fast softmax __global__ void qn_kern_div_f_f(int rows, int cols, int size, const float* in, const float *cache, float* res) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { int row = index / cols; int off = row * cols; float *outptr = &res[index]; const float *compptr = &cache[off]; const float *inptr = &in[index]; (*outptr) = (*inptr) / (*compptr); } } //cz277 - fast softmax __global__ void qn_kern_tempsoftmax_mf_mf(int rows, int cols, float* in_vec, float* out_vec) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int i; const int index = row * cols; float* invec = &in_vec[index]; float* outvec = &out_vec[index]; float* inptr; float* outptr; // Now put exp(in-max) in out inptr = invec; //outptr = outvec; float sumexp = 0; for (i=cols; i!=0; i--) { sumexp += (*inptr); ++inptr; } // Now scale the output float scale = 1.0f/sumexp; outptr = outvec; *outptr = scale; /*inptr = invec; for (i=cols; i!=0; i--) { *inptr = (*inptr) * scale; ++inptr; }*/ } } //cz277 - fast softmax //cz277 - fast softmax __global__ void qn_kern_dualsum_ff_f(int cols, int size, int incr, const float *in_vec, float *out_vec) { int absidx = (blockIdx.x * blockDim.x) + threadIdx.x; if (absidx < size) { int row = absidx / cols; int off = row * cols; int index = absidx - off; int den = 2 * incr; float *outptr = &out_vec[absidx]; const float *inptr = &in_vec[absidx]; if (index % den == 0) { float lhval = (*inptr); if (index + incr >= cols) { *outptr = lhval; } else { inptr += incr; //float rhval = (*inptr); *outptr = lhval + (*inptr); } } } } // Kernel for multisoftmax - several softmaxes at once // Do each softmax row in its own thread __global__ void qn_kern_multisoftmax_mf_mf(int rows, int cols, const float* in_vec, float* out_vec) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; if (row < rows) { int i; const int index = row * cols; const float* invec = &in_vec[index]; float* outvec = &out_vec[index]; const float* inptr; float* outptr; // First find the max of each vector float max; inptr = invec; max = *inptr++; for (i=cols-1; i!=0; i--) { float val; val = *inptr++; if (val>max) max = val; } // Now put exp(in-max) in out inptr = invec; outptr = outvec; float sumexp = 0; for (i=cols; i!=0; i--) { float f, e; f = *inptr++; e = expf(f - max); *outptr++ = e; sumexp += e; } // Now scale the output float scale = 1.0f/sumexp; outptr = outvec; for (i=cols; i!=0; i--) { *outptr = (*outptr) * scale; outptr++; } } } __global__ void qn_devnv_softmax_vf_vf(int n, const float* in_vec, float* out_vec) { float max; float min; float sumexp = 0.0f; /* Sum of exponents */ float scale; /* 1/sum of exponents */ size_t i; qn_devin_maxmin_vf_ff(n, in_vec, &max, &min); /* Find constant bias. */ for (i=0; i<n; i++) { float f; /* Input value. */ float e; /* Exponent of current value. */ f = in_vec[i]; e = expf(f - max); out_vec[i] = e; sumexp += e; } scale = 1.0f/sumexp; for (i=0; i<n; i++) { out_vec[i] = out_vec[i] * scale; } } __global__ void qn_devnv_copy_vf_mf(size_t mat_height, size_t vec_len, const float* vec, float* mat) { size_t i, j; const float* vec_ptr; float* mat_ptr = mat; for (i=mat_height; i!=0; i--) { vec_ptr = vec; for (j=vec_len; j!=0; j--) { (*mat_ptr++) = (*vec_ptr++); } } } __global__ void qn_devnv_sumcol_mf_vf(size_t rows, size_t cols, const float* in, float* res) { const float *const res_end_b8p = res + (cols & ~7); const float *const res_end_p = res + cols; float* res_p; const float* in_p = in; size_t i; /* Initialize the result */ res_p = res; while(res_p != res_end_b8p) { res_p[0] = in_p[0]; res_p[1] = in_p[1]; res_p[2] = in_p[2]; res_p[3] = in_p[3]; res_p[4] = in_p[4]; res_p[5] = in_p[5]; res_p[6] = in_p[6]; res_p[7] = in_p[7]; res_p += 8; in_p += 8; } while (res_p != res_end_p) { (*res_p++) = (*in_p++); } /* The main loop */ for (i=1; i!=rows; i++) { res_p = res; while(res_p != res_end_b8p) { res_p[0] += in_p[0]; res_p[1] += in_p[1]; res_p[2] += in_p[2]; res_p[3] += in_p[3]; res_p[4] += in_p[4]; res_p[5] += in_p[5]; res_p[6] += in_p[6]; res_p[7] += in_p[7]; res_p += 8; in_p += 8; } while (res_p != res_end_p) { (*res_p++) += (*in_p++); } } } __global__ void qn_devnv_mul_mfmf_mf(size_t Sm,size_t Sk,size_t Sn, const float *A,const float *B,float *C) { size_t i,j,k; for (i=0;i<Sm;i++) { for (j=0;j<Sn;j++) { C[i*Sn+j] = 0.0f; for (k=0;k<Sk;k++) C[i*Sn+j] += A[i*Sk+k]*B[k*Sn+j]; } } } __global__ void qn_devnv_mulntacc_mfmf_mf(size_t Sm,size_t Sk,size_t Sn, const float *A,const float *B,float *C) { size_t i,j,k; for (i=0;i<Sm;i++) for (j=0;j<Sn;j++) for (k=0;k<Sk;k++) C[i*Sn+j] += A[i*Sk+k]*B[j*Sk+k]; } __global__ void qn_devnv_multnacc_fmfmf_mf(size_t Sk,size_t Sm,size_t Sn, float scale, const float *A,const float *B,float *C) { size_t i,j,k; float acc; for (i=0;i<Sm;i++) { for (j=0;j<Sn;j++) { acc = 0.0f; for (k=0;k<Sk;k++) acc += A[k*Sm+i]*B[k*Sn+j]; C[i*Sn+j] += acc * scale; } } } //cz277 - momentum __global__ void qn_kern_selfadd_vfvf_vf(int n, const float* in_vec1, float* in_vec2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < n) in_vec2[i] += in_vec1[i]; }
bc842c69f12ee56a3ab391a2e53a9c2506731f56.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void transposeCoalesced(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; }
bc842c69f12ee56a3ab391a2e53a9c2506731f56.cu
#include "includes.h" __global__ void transposeCoalesced(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; }
eb492617f38d509a035eb885fb5f352e9f003908.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void vecadd(int *A, int *B, int *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { int n; printf("Enter the no. of elements\n"); scanf("%i", &n); int *hA = (int *)calloc(n, sizeof(int)); int *hB = (int *)calloc(n, sizeof(int)); int *hC = (int *)calloc(n, sizeof(int)); printf("Enter array 1\n"); for (int i = 0; i < n; i++) { scanf("%i", &hA[i]); } printf("Enter array 2\n"); for (int i = 0; i < n; i++) { scanf("%i", &hB[i]); } int *dA, *dB, *dC; hipMalloc(&dA, n * sizeof(int)); hipMalloc(&dB, n * sizeof(int)); hipMalloc(&dC, n * sizeof(int)); hipMemcpy(dA, hA, n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dB, hB, n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( vecadd), dim3(1), dim3(n), 0, 0, dA, dB, dC); hipError_t err = hipGetLastError(); if(err != hipSuccess) { printf("%s\n", hipGetErrorString(err)); } hipMemcpy(hA, dC, n * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%i ", hA[i]); } printf("\n"); hipFree(dA); hipFree(dB); hipFree(dC); return 0; }
eb492617f38d509a035eb885fb5f352e9f003908.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void vecadd(int *A, int *B, int *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { int n; printf("Enter the no. of elements\n"); scanf("%i", &n); int *hA = (int *)calloc(n, sizeof(int)); int *hB = (int *)calloc(n, sizeof(int)); int *hC = (int *)calloc(n, sizeof(int)); printf("Enter array 1\n"); for (int i = 0; i < n; i++) { scanf("%i", &hA[i]); } printf("Enter array 2\n"); for (int i = 0; i < n; i++) { scanf("%i", &hB[i]); } int *dA, *dB, *dC; cudaMalloc(&dA, n * sizeof(int)); cudaMalloc(&dB, n * sizeof(int)); cudaMalloc(&dC, n * sizeof(int)); cudaMemcpy(dA, hA, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, n * sizeof(int), cudaMemcpyHostToDevice); vecadd<<<1, n>>>(dA, dB, dC); cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) { printf("%s\n", cudaGetErrorString(err)); } cudaMemcpy(hA, dC, n * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { printf("%i ", hA[i]); } printf("\n"); cudaFree(dA); cudaFree(dB); cudaFree(dC); return 0; }
d54762f355f050b0892c0715ab120edb33c870a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void sweep_columns_%(name)s(float* X, /** matrix to sweep in place **/ float* y, /** column vector to remove **/ int rows, int cols) { unsigned int thidx = threadIdx.x; unsigned int thidy = threadIdx.y; unsigned int bid = blockIdx.x; unsigned int bdx = blockDim.x; // assumed equal to blockDim.y .. 16 or 32 .. int currow = bdx*bid; // flexible block size extern __shared__ float shared_data[]; if(currow+thidx < rows){ shared_data[thidx] = y[currow+thidx];} __syncthreads(); for(int chunk = 0; chunk < cols; chunk+=bdx){ // get some values chunking accross rows ... if(currow + thidy < rows && chunk + thidx < cols){ X[(currow + thidy)*cols + chunk + thidx] = \ l_%(name)s(X[(currow + thidy)*cols + chunk + thidx], shared_data[thidy]); } } }
d54762f355f050b0892c0715ab120edb33c870a3.cu
__global__ void sweep_columns_%(name)s(float* X, /** matrix to sweep in place **/ float* y, /** column vector to remove **/ int rows, int cols) { unsigned int thidx = threadIdx.x; unsigned int thidy = threadIdx.y; unsigned int bid = blockIdx.x; unsigned int bdx = blockDim.x; // assumed equal to blockDim.y .. 16 or 32 .. int currow = bdx*bid; // flexible block size extern __shared__ float shared_data[]; if(currow+thidx < rows){ shared_data[thidx] = y[currow+thidx];} __syncthreads(); for(int chunk = 0; chunk < cols; chunk+=bdx){ // get some values chunking accross rows ... if(currow + thidy < rows && chunk + thidx < cols){ X[(currow + thidy)*cols + chunk + thidx] = \ l_%(name)s(X[(currow + thidy)*cols + chunk + thidx], shared_data[thidy]); } } }
69e03fefc5650279b900d0c456142822af9aabf3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cmath> #include <hip/hip_runtime.h> #include "blur_kernel.h" __global__ void cuda_blur (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius) { const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x; int x = offset % width; int y = (offset-x) / width; int fsize = radius; if (offset < width*height) { float output_r = 0; float output_g = 0; float output_b = 0; float output_a = 0; int hits = 0; for (int ox = -fsize; ox < fsize + 1; ++ox) { for (int oy = -fsize; oy < fsize + 1; ++oy) { if ((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) { const int currentoffset = (offset + ox + oy * width) * 4; output_r += in_image[currentoffset + 0]; output_g += in_image[currentoffset + 1]; output_b += in_image[currentoffset + 2]; output_a += in_image[currentoffset + 3]; hits++; } } } out_image[offset * 4 + 0] = output_r / hits; out_image[offset * 4 + 1] = output_g / hits; out_image[offset * 4 + 2] = output_b / hits; out_image[offset * 4 + 3] = output_a / hits; } } void print_device_info() { int ndevices; char star[] = "***************************************\n"; hipGetDeviceCount(&ndevices); fprintf(stdout, "number of devices: %d\n", ndevices); fprintf(stdout, "%s", star); for (int i = 0; i < ndevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); fprintf(stdout, "device number: %d\n", i); fprintf(stdout, "device name: %s\n", prop.name); fprintf(stdout, "clock rate: %d Mhz\n", prop.clockRate / 1000); fprintf(stdout, "global memory: %lu MB\n", prop.totalGlobalMem / 1024 / 1024); fprintf(stdout, "cpu count: %d\n", prop.multiProcessorCount); fprintf(stdout, "bus width: %d bit\n", prop.memoryBusWidth); fprintf(stdout, "max t block: %d\n", prop.maxThreadsPerBlock); fprintf(stdout, "dim x max: %d\n", prop.maxThreadsDim[0]); fprintf(stdout, "dim y max: %d\n", prop.maxThreadsDim[1]); fprintf(stdout, "dim z max: %d\n", prop.maxThreadsDim[2]); fprintf(stdout, "compute cap.: %d.%d\n", prop.major, prop.minor); fprintf(stdout, "asyncEngines: %d\n", prop.asyncEngineCount); fprintf(stdout, "warpSize: %d\n", prop.warpSize); fprintf(stdout, "is unified: %d\n", prop.unifiedAddressing); fprintf(stdout, "%s", star); } } void cuda_blur_prepare (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius, int testmode) { unsigned char* dev_input; // device char array input image unsigned char* dev_output; // device char array output image int blockSize; int minGridSize; int gridSize; const int N = 1000000; hipEvent_t start; hipEvent_t stop; float time; hipEventCreate(&start); hipEventCreate(&stop); int malsize = width * height * 4 * sizeof(unsigned char); fprintf(stdout, "Doing cuda blur...\n"); hipError_t mallocstatus = hipMalloc( (void**) &dev_input, malsize); if (mallocstatus != hipSuccess) { fprintf(stderr, "Malloc went wrong: %s\n", hipGetErrorString(mallocstatus)); } hipError_t memcpystatus = hipMemcpy( dev_input, in_image, malsize, hipMemcpyHostToDevice ); if (memcpystatus != hipSuccess) { fprintf(stderr, "Memcpy went wrong: %s\n", hipGetErrorString(memcpystatus)); } hipError_t mallocoutputstatus = hipMalloc( (void**) &dev_output, malsize); if (mallocoutputstatus != hipSuccess) { fprintf(stderr, "Malloc went wrong: %s\n", hipGetErrorString(mallocoutputstatus)); } if (testmode > 0) { fprintf(stdout, "Test mode detected...\n"); print_device_info(); hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cuda_blur, 0, N); gridSize = (N + blockSize - 1) / blockSize; } else { blockSize = 256; gridSize = (unsigned int) ceil( (double)(width * height * 4 / blockSize)); } fprintf(stdout, "gridSize: %i\n", gridSize); fprintf(stdout, "blockSize: %i\n", blockSize); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_blur), dim3(gridSize), dim3(blockSize), 0, 0, dev_input, dev_output, width, height, radius); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); fprintf(stdout, "Kernel elapsed time: %3.3f ms\n", time); hipError_t copybackstatus = hipMemcpy(out_image, dev_output, malsize, hipMemcpyDeviceToHost); hipFree(dev_input); hipFree(dev_output); hipDeviceReset(); }
69e03fefc5650279b900d0c456142822af9aabf3.cu
#include <stdio.h> #include <stdlib.h> #include <cmath> #include <cuda.h> #include "blur_kernel.h" __global__ void cuda_blur (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius) { const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x; int x = offset % width; int y = (offset-x) / width; int fsize = radius; if (offset < width*height) { float output_r = 0; float output_g = 0; float output_b = 0; float output_a = 0; int hits = 0; for (int ox = -fsize; ox < fsize + 1; ++ox) { for (int oy = -fsize; oy < fsize + 1; ++oy) { if ((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) { const int currentoffset = (offset + ox + oy * width) * 4; output_r += in_image[currentoffset + 0]; output_g += in_image[currentoffset + 1]; output_b += in_image[currentoffset + 2]; output_a += in_image[currentoffset + 3]; hits++; } } } out_image[offset * 4 + 0] = output_r / hits; out_image[offset * 4 + 1] = output_g / hits; out_image[offset * 4 + 2] = output_b / hits; out_image[offset * 4 + 3] = output_a / hits; } } void print_device_info() { int ndevices; char star[] = "***************************************\n"; cudaGetDeviceCount(&ndevices); fprintf(stdout, "number of devices: %d\n", ndevices); fprintf(stdout, "%s", star); for (int i = 0; i < ndevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); fprintf(stdout, "device number: %d\n", i); fprintf(stdout, "device name: %s\n", prop.name); fprintf(stdout, "clock rate: %d Mhz\n", prop.clockRate / 1000); fprintf(stdout, "global memory: %lu MB\n", prop.totalGlobalMem / 1024 / 1024); fprintf(stdout, "cpu count: %d\n", prop.multiProcessorCount); fprintf(stdout, "bus width: %d bit\n", prop.memoryBusWidth); fprintf(stdout, "max t block: %d\n", prop.maxThreadsPerBlock); fprintf(stdout, "dim x max: %d\n", prop.maxThreadsDim[0]); fprintf(stdout, "dim y max: %d\n", prop.maxThreadsDim[1]); fprintf(stdout, "dim z max: %d\n", prop.maxThreadsDim[2]); fprintf(stdout, "compute cap.: %d.%d\n", prop.major, prop.minor); fprintf(stdout, "asyncEngines: %d\n", prop.asyncEngineCount); fprintf(stdout, "warpSize: %d\n", prop.warpSize); fprintf(stdout, "is unified: %d\n", prop.unifiedAddressing); fprintf(stdout, "%s", star); } } void cuda_blur_prepare (unsigned char* in_image, unsigned char* out_image, int width, int height, int radius, int testmode) { unsigned char* dev_input; // device char array input image unsigned char* dev_output; // device char array output image int blockSize; int minGridSize; int gridSize; const int N = 1000000; cudaEvent_t start; cudaEvent_t stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); int malsize = width * height * 4 * sizeof(unsigned char); fprintf(stdout, "Doing cuda blur...\n"); cudaError_t mallocstatus = cudaMalloc( (void**) &dev_input, malsize); if (mallocstatus != cudaSuccess) { fprintf(stderr, "Malloc went wrong: %s\n", cudaGetErrorString(mallocstatus)); } cudaError_t memcpystatus = cudaMemcpy( dev_input, in_image, malsize, cudaMemcpyHostToDevice ); if (memcpystatus != cudaSuccess) { fprintf(stderr, "Memcpy went wrong: %s\n", cudaGetErrorString(memcpystatus)); } cudaError_t mallocoutputstatus = cudaMalloc( (void**) &dev_output, malsize); if (mallocoutputstatus != cudaSuccess) { fprintf(stderr, "Malloc went wrong: %s\n", cudaGetErrorString(mallocoutputstatus)); } if (testmode > 0) { fprintf(stdout, "Test mode detected...\n"); print_device_info(); cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cuda_blur, 0, N); gridSize = (N + blockSize - 1) / blockSize; } else { blockSize = 256; gridSize = (unsigned int) ceil( (double)(width * height * 4 / blockSize)); } fprintf(stdout, "gridSize: %i\n", gridSize); fprintf(stdout, "blockSize: %i\n", blockSize); cudaEventRecord(start, 0); cuda_blur<<<gridSize, blockSize>>> (dev_input, dev_output, width, height, radius); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); fprintf(stdout, "Kernel elapsed time: %3.3f ms\n", time); cudaError_t copybackstatus = cudaMemcpy(out_image, dev_output, malsize, cudaMemcpyDeviceToHost); cudaFree(dev_input); cudaFree(dev_output); cudaDeviceReset(); }
04dc216cd159b05d91e6eb772e4651ac009326bd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __global__ void process_kernel1(const float* input1, const float* input2, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)sin(input1[globalThreadId]) + (float)cos(input2[globalThreadId]); } // Device Function for process_kernel2 __global__ void process_kernel2(const float* input, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)log(fabs(input[globalThreadId])); } // Device Function for process_kernel3 __global__ void process_kernel3(const float* input, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)sqrt(input[globalThreadId]); } int main(void) { // Size of Array elements int numElements = 32*32*4*2*2; size_t size = numElements*sizeof(float); printf("[Operations on Kernel 1 on %d elements] \n", numElements); // Allocate the host input arrays float *h_A = (float*)malloc(size); float *h_B = (float*)malloc(size); // Allocate the host array for result float *h_outKernel_1 = (float*)malloc(size); // Verify that the allocations are successful if(h_A == NULL || h_B == NULL || h_outKernel_1 == NULL){ fprintf(stderr, "Failed to allocate size for Host arrays\n"); exit(EXIT_FAILURE); } // Initialize the host vectors with random values for(int i=0; i < numElements; i++){ h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input arrays float *d_A = NULL; hipMalloc((void **)&d_A, size); float *d_B = NULL; hipMalloc((void **)&d_B, size); float *d_outKernel_1 = NULL; hipMalloc((void **)&d_outKernel_1, size); // copy data from host input arrays to device memory printf("Copy data from host arrays to device arrays\n"); hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); dim3 blocksPerGrid_1(4, 2, 2); dim3 threadsPerBlock_1(32, 32, 1); hipLaunchKernelGGL(( process_kernel1), dim3(blocksPerGrid_1), dim3(threadsPerBlock_1), 0, 0, d_A, d_B, d_outKernel_1, numElements); // copy the device result from device memory to host memory printf("Copy data output of kernel1 from device memory to host memory\n"); hipMemcpy(h_outKernel_1, d_outKernel_1, size, hipMemcpyDeviceToHost); // Launch Kernel 2 // Allocate host memory for result of process_kernel2 float *h_outKernel_2 = (float*)malloc(size); // Allocate device memory for output of process_kernel2 float *d_outKernel_2 = NULL; hipMalloc((void **)&d_outKernel_2, size); // Dimensions for process_kernel2 dim3 blocksPerGrid_2(2, 8, 1); dim3 threadsPerBlock_2(8, 8, 16); hipLaunchKernelGGL(( process_kernel2), dim3(blocksPerGrid_2), dim3(threadsPerBlock_2), 0, 0, d_outKernel_1, d_outKernel_2,numElements); printf("Copy data output of kernel2 from device memory to host memory\n"); hipMemcpy(h_outKernel_2, d_outKernel_2, size, hipMemcpyDeviceToHost); // Launching Kernel 3 // Allocate host memory for result of process_kernel3 float *h_outKernel_3 = (float*)malloc(size); // Allocate device memory for output of process_kernel3 float *d_outKernel_3 = NULL; hipMalloc((void **)&d_outKernel_3, size); // Dimensions for process_kernel3 dim3 blocksPerGrid_3(32, 1, 1); dim3 threadsPerBlock_3(128, 4, 1); hipLaunchKernelGGL(( process_kernel3), dim3(blocksPerGrid_3), dim3(threadsPerBlock_3) , 0, 0, d_outKernel_2, d_outKernel_3, numElements); printf("Copy data output of kernel3 from device memory to host memory\n"); hipMemcpy(h_outKernel_3, d_outKernel_3, size, hipMemcpyDeviceToHost); // Free device global memory hipFree(d_A); hipFree(d_B); hipFree(d_outKernel_1); hipFree(d_outKernel_2); hipFree(d_outKernel_3); printf("All Device Memory Freed\n"); // Free Host memory free(h_A); free(h_B); free(h_outKernel_1); free(h_outKernel_2); free(h_outKernel_3); printf("All Host Memory Freed\n"); // Reset the Device and Exit hipDeviceReset(); printf("Done\n"); return 0; }
04dc216cd159b05d91e6eb772e4651ac009326bd.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void process_kernel1(const float* input1, const float* input2, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)sin(input1[globalThreadId]) + (float)cos(input2[globalThreadId]); } // Device Function for process_kernel2 __global__ void process_kernel2(const float* input, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)log(fabs(input[globalThreadId])); } // Device Function for process_kernel3 __global__ void process_kernel3(const float* input, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)sqrt(input[globalThreadId]); } int main(void) { // Size of Array elements int numElements = 32*32*4*2*2; size_t size = numElements*sizeof(float); printf("[Operations on Kernel 1 on %d elements] \n", numElements); // Allocate the host input arrays float *h_A = (float*)malloc(size); float *h_B = (float*)malloc(size); // Allocate the host array for result float *h_outKernel_1 = (float*)malloc(size); // Verify that the allocations are successful if(h_A == NULL || h_B == NULL || h_outKernel_1 == NULL){ fprintf(stderr, "Failed to allocate size for Host arrays\n"); exit(EXIT_FAILURE); } // Initialize the host vectors with random values for(int i=0; i < numElements; i++){ h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input arrays float *d_A = NULL; cudaMalloc((void **)&d_A, size); float *d_B = NULL; cudaMalloc((void **)&d_B, size); float *d_outKernel_1 = NULL; cudaMalloc((void **)&d_outKernel_1, size); // copy data from host input arrays to device memory printf("Copy data from host arrays to device arrays\n"); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); dim3 blocksPerGrid_1(4, 2, 2); dim3 threadsPerBlock_1(32, 32, 1); process_kernel1<<< blocksPerGrid_1, threadsPerBlock_1>>>(d_A, d_B, d_outKernel_1, numElements); // copy the device result from device memory to host memory printf("Copy data output of kernel1 from device memory to host memory\n"); cudaMemcpy(h_outKernel_1, d_outKernel_1, size, cudaMemcpyDeviceToHost); // Launch Kernel 2 // Allocate host memory for result of process_kernel2 float *h_outKernel_2 = (float*)malloc(size); // Allocate device memory for output of process_kernel2 float *d_outKernel_2 = NULL; cudaMalloc((void **)&d_outKernel_2, size); // Dimensions for process_kernel2 dim3 blocksPerGrid_2(2, 8, 1); dim3 threadsPerBlock_2(8, 8, 16); process_kernel2<<< blocksPerGrid_2, threadsPerBlock_2>>>(d_outKernel_1, d_outKernel_2,numElements); printf("Copy data output of kernel2 from device memory to host memory\n"); cudaMemcpy(h_outKernel_2, d_outKernel_2, size, cudaMemcpyDeviceToHost); // Launching Kernel 3 // Allocate host memory for result of process_kernel3 float *h_outKernel_3 = (float*)malloc(size); // Allocate device memory for output of process_kernel3 float *d_outKernel_3 = NULL; cudaMalloc((void **)&d_outKernel_3, size); // Dimensions for process_kernel3 dim3 blocksPerGrid_3(32, 1, 1); dim3 threadsPerBlock_3(128, 4, 1); process_kernel3<<< blocksPerGrid_3, threadsPerBlock_3 >>>(d_outKernel_2, d_outKernel_3, numElements); printf("Copy data output of kernel3 from device memory to host memory\n"); cudaMemcpy(h_outKernel_3, d_outKernel_3, size, cudaMemcpyDeviceToHost); // Free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_outKernel_1); cudaFree(d_outKernel_2); cudaFree(d_outKernel_3); printf("All Device Memory Freed\n"); // Free Host memory free(h_A); free(h_B); free(h_outKernel_1); free(h_outKernel_2); free(h_outKernel_3); printf("All Host Memory Freed\n"); // Reset the Device and Exit cudaDeviceReset(); printf("Done\n"); return 0; }
9b51765e092c31875b73c9e264700f90f9df7120.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <cmath> #include <ctime> #include <iostream> #include "cudakernels.h" #define CUDA_CALL( call ) \ { \ hipError_t err = call; \ if ( hipSuccess != err) \ fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,hipGetErrorString(err));\ } using namespace std; // Self-defined double-precision atomicAdd function for nvidia GPUs with Compute Capability 6 and below. // Pre-defined atomicAdd() with double-precision does not work for pre-CC7 nvidia GPUs. __device__ double atomicAdd_double(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } // Determines 1-dimensional CUDA block and grid sizes based on the number of rows N __host__ void calculateDimensions(size_t N, dim3 &gridDim, dim3 &blockDim) { if ( N <= 1024 ) { blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1; gridDim.x = 1; gridDim.y = 1; gridDim.z = 1; } else { blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1; gridDim.x = (int)ceil(N/blockDim.x)+1; gridDim.y = 1; gridDim.z = 1; } } // Determines 2-dimensional CUDA block and grid sizes based on the number of rows N __host__ void calculateDimensions2D(size_t Nx, size_t Ny, dim3 &gridDim, dim3 &blockDim) { if ( Nx <= 32 && Ny <= 32) { blockDim.x = 32; blockDim.y = 32; blockDim.z = 1; gridDim.x = 1; gridDim.y = 1; gridDim.z = 1; } else { blockDim.x = 32; blockDim.y = 32; blockDim.z = 1; gridDim.x = (int)ceil(Nx/blockDim.x)+1; gridDim.y = (int)ceil(Ny/blockDim.y)+1; gridDim.z = 1; } } // TODO: this is for 2D only, need 3D later // calculates the DOF of a grid with dimensions __host__ size_t calcDOF(size_t Nx, size_t Ny, size_t dim) { return (Nx + 1) * (Ny + 1) * dim; } // returns value of an ELLPack matrix A at (x,y) __device__ double valueAt(size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[x * max_row_size + k] == y) return vValue[x * max_row_size + k]; } return 0.0; } // adds the value to an ELLPack matrix A at (x,y) __device__ void addAt( size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size, double value ) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[y * max_row_size + k] == x) { vValue[y * max_row_size + k] += value; // printf("%f \n", vValue[x * max_row_size + k]); k = max_row_size; // to exit for loop } } } // sets the value of an ELLPack matrix A at (x,y) __device__ void setAt( size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size, double value ) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[y * max_row_size + k] == x ) { vValue[y * max_row_size + k] = value; k = max_row_size; // to exit for loop } } } __device__ void setAt_( size_t x, size_t y, double* vValue, size_t* vIndex, size_t num_cols, size_t max_row_size, double value ) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[y * max_row_size + k] == x && k < num_cols) { vValue[y * max_row_size + k] = value; k = max_row_size; // to exit for loop } } } __global__ void setToZero(double* a, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) a[id] = 0.0; } // norm = x.norm() __global__ void norm_GPU(double* norm, double* x, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; // TODO: if (id < num) if ( id == 0 ) *norm = 0; __syncthreads(); if ( id < num_rows ) { atomicAdd_double( norm, x[id]*x[id] ); } __syncthreads(); if ( id == 0 ) *norm = sqrt(*norm); } // a[] = 0 // a[] = 0, size_t __global__ void setToZero(size_t* a, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) a[id] = 0.0; } //TODO: to delete // bool = true __global__ void setToTrue( bool *foo ) { *foo = true; } // DEBUG: TEST !!!!!!!!!!!!!!!!!!!!!!!!!! __global__ void sqrt_GPU(double *x) { *x = sqrt(*x); } // sum = sum( x[n]*x[n] ) __global__ void sumOfSquare_GPU(double* sum, double* x, size_t n) { int id = threadIdx.x + blockDim.x*blockIdx.x; int stride = blockDim.x*gridDim.x; __shared__ double cache[1024]; double temp = 0.0; while(id < n) { temp += x[id]*x[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reset id id = threadIdx.x + blockDim.x*blockIdx.x; // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __global__ void LastBlockSumOfSquare_GPU(double* sum, double* x, size_t n, size_t counter) { int id = threadIdx.x + blockDim.x*blockIdx.x; // if ( id >= counter*blockDim.x && id < ( ( counter*blockDim.x ) + lastBlockSize ) ) if ( id >= counter*blockDim.x && id < n ) atomicAdd_double(sum, x[id]*x[id]); } __host__ void norm_GPU(double* d_norm, double* d_x, size_t N, dim3 gridDim, dim3 blockDim) { hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_norm, 1); // getting the last block's size size_t lastBlockSize = N; size_t counter = 0; if ( N % gridDim.x == 0 ) {} else { while ( lastBlockSize >= gridDim.x) { counter++; lastBlockSize -= gridDim.x; } } // sum of squares for the full blocks //hipLaunchKernelGGL(( sumOfSquare_GPU), dim3(gridDim.x - 1), dim3(blockDim), 0, 0, d_norm, d_x, N); // TODO: check, this is the original hipLaunchKernelGGL(( sumOfSquare_GPU), dim3(gridDim.x - 1), dim3(blockDim), 0, 0, d_norm, d_x, (gridDim.x - 1)*blockDim.x); // sum of squares for the last incomplete block hipLaunchKernelGGL(( LastBlockSumOfSquare_GPU), dim3(1), dim3(lastBlockSize), 0, 0, d_norm, d_x, N, counter); // hipDeviceSynchronize(); hipLaunchKernelGGL(( sqrt_GPU), dim3(1),dim3(1), 0, 0, d_norm ); // hipDeviceSynchronize(); } /// Helper functions for debugging __global__ void print_GPU(double* x) { printf("[GPU] x = %e\n", *x); } __global__ void print_GPU(int* x) { printf("[GPU] x = %d\n", *x); } __global__ void print_GPU(size_t* x) { printf("[GPU] x = %lu\n", *x); } __global__ void print_GPU(bool* x) { printf("[GPU] x = %d\n", *x); } __global__ void printLinearVector_GPU(size_t* x, size_t i, size_t num_rows, size_t num_cols) { for ( int j = 0 ; j < num_cols ; j++ ) printf("%lu ", x[j+i*num_cols]); printf("\n"); } __global__ void printLinearVector_GPU(double* x, size_t i, size_t num_rows, size_t num_cols) { for ( int j = 0 ; j < num_cols ; j++ ) printf("%f ", x[j+i*num_cols]); printf("\n"); } __host__ void printLinearVector(size_t* x, size_t num_rows, size_t num_cols) { for(int i = 0 ; i < num_rows ; i++ ) { hipLaunchKernelGGL(( printLinearVector_GPU), dim3(1),dim3(1), 0, 0, x, i, num_rows, num_cols); hipDeviceSynchronize(); } } __host__ void printLinearVector(double* x, size_t num_rows, size_t num_cols) { for(int i = 0 ; i < num_rows ; i++ ) { hipLaunchKernelGGL(( printLinearVector_GPU), dim3(1),dim3(1), 0, 0, x, i, num_rows, num_cols); hipDeviceSynchronize(); } } __global__ void printVector_GPU(double* x) { int id = blockDim.x * blockIdx.x + threadIdx.x; printf("[GPU] x[%d] = %e\n", id, x[id]); } __global__ void printVector_GPU(double* x, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) printf("%d %e\n", id, x[id]); } __global__ void printVector_GPU(std::size_t* x, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) printf("%d %lu\n", id, x[id]); } __global__ void printVector_GPU(int* x) { int id = blockDim.x * blockIdx.x + threadIdx.x; printf("[GPU] x[%d] = %d\n", id, x[id]); } __global__ void printELL_GPU(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols) { for ( int i = 0 ; i < num_rows ; i++) { for ( int j = 0 ; j < num_cols ; j++) printf("%f ", valueAt(i, j, value, index, max_row_size) ); printf("\n"); } } __global__ void printELLrow_GPU(size_t row, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols) { for ( int j = 0 ; j < num_cols ; j++) printf("%f ", valueAt(row, j, value, index, max_row_size) ); printf("\n"); } __host__ void printELLrow(size_t lev, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols) { for ( size_t i = 0 ; i < num_rows ; i++ ) { hipLaunchKernelGGL(( printELLrow_GPU), dim3(1),dim3(1), 0, 0, i, value, index, max_row_size, num_rows, num_cols); hipDeviceSynchronize(); } } // (scalar) a = b __global__ void equals_GPU(double* a, double* b) { *a = *b; } // x = a * b __global__ void dotProduct_GPU(double* x, double* a, double* b, size_t num_rows) { unsigned int id = threadIdx.x + blockDim.x*blockIdx.x; unsigned int stride = blockDim.x*gridDim.x; __shared__ double cache[1024]; double temp = 0.0; // filling in the shared variable while(id < num_rows){ temp += a[id]*b[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ atomicAdd_double(x, cache[0]); } __syncthreads(); } __global__ void LastBlockDotProduct(double* dot, double* x, double* y, size_t starting_index) { int id = threadIdx.x + blockDim.x*blockIdx.x + starting_index; atomicAdd_double(dot, x[id]*y[id]); } // dot = a[] * b[] __host__ void dotProduct(double* dot, double* a, double* b, size_t N, dim3 gridDim, dim3 blockDim) { hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, dot, 1 ); // getting the last block's size size_t lastBlockSize = blockDim.x - ( (gridDim.x * blockDim.x ) - N ); if ( N < blockDim.x) { hipLaunchKernelGGL(( LastBlockDotProduct), dim3(1), dim3(N), 0, 0, dot, a, b, 0 ); } else { // dot products for the full blocks hipLaunchKernelGGL(( dotProduct_GPU), dim3(gridDim.x - 1), dim3(blockDim), 0, 0, dot, a, b, (gridDim.x - 1)*blockDim.x ); // dot products for the last incomplete block hipLaunchKernelGGL(( LastBlockDotProduct), dim3(1), dim3(lastBlockSize), 0, 0, dot, a, b, ( (gridDim.x - 1) * blockDim.x ) ); } } // x = y / z __global__ void divide_GPU(double *x, double *y, double *z) { *x = *y / *z; } // x += y __global__ void add_GPU(double *x, double *y) { *x += *y; } // x -= y __global__ void minus_GPU(double *x, double *y) { *x -= *y; } // x += c __global__ void addVector_GPU(double *x, double *c, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) x[id] += c[id]; } __global__ void transformToELL_GPU(double *array, double *value, size_t *index, size_t max_row_size, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { size_t counter = id*max_row_size; size_t nnz = 0; // printf("array = %e\n", array [ 1 ]); for ( int j = 0 ; nnz < max_row_size ; j++ ) { if ( array [ j + id*num_rows ] != 0 ) { // printf("array = %e\n", array [ j + id*num_rows ]); value [counter] = array [ j + id*num_rows ]; index [counter] = j; // printf("value = %e\n", value[counter]); counter++; nnz++; } if ( j == num_rows - 1 ) { for ( ; nnz < max_row_size ; counter++ && nnz++ ) { value [counter] = 0.0; index [counter] = num_rows; } } } } } std::size_t getMaxRowSize(vector<vector<double>> &array, size_t num_rows, size_t num_cols) { std::size_t max_row_size = 0; for ( int i = 0; i < num_rows ; i++ ) { std::size_t max_in_row = 0; for ( int j = 0 ; j < num_cols ; j++ ) { if ( array[i][j] < -1.0e-8 || array[i][j] > 1.0e-8 ) max_in_row++; } if ( max_in_row >= max_row_size ) max_row_size = max_in_row; } return max_row_size; } // transforms a 2D array into ELLPACK's vectors value and index // max_row_size has to be determined prior to this void transformToELL(vector<vector<double>> &array, vector<double> &value, vector<size_t> &index, size_t max_row_size, size_t num_rows, size_t num_cols ) { size_t nnz; for ( int i = 0 ; i < num_rows ; i++) { nnz = 0; // printf("array = %e\n", array [ 1 ]); for ( int j = 0 ; nnz < max_row_size ; j++ ) { if ( array[i][j] < -1.0e-8 || array[i][j] > 1.0e-8 ) { // printf("array = %e\n", array [ j + id*num_rows ]); value.push_back(array[i][j]); index.push_back(j); nnz++; } if ( j == num_cols - 1 ) { for ( ; nnz < max_row_size ; nnz++ ) { value.push_back(0.0); index.push_back(num_rows); } } } } } //TEMP: // sets identity rows and columns of the DOF in which a BC is applied void applyMatrixBC(vector<vector<double>> &array, size_t index, size_t num_rows, size_t dim) { // index *= dim; // for ( int j = 0 ; j < dim ; j++ ) // { for ( int i = 0 ; i < num_rows ; i++ ) { array[i][index] = 0.0; array[index][i] = 0.0; } array[index][index] = 1.0; // } } __host__ void applyMatrixBC(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols, size_t dim, size_t bc_index) { } // a = b __global__ void vectorEquals_GPU(double* a, double* b, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) a[id] = b[id]; } //////////////////////////////////////////// // ASSEMBLER //////////////////////////////////////////// __host__ vector<vector<size_t>> applyBC(vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim) { vector<vector<size_t>> bc_index(numLevels); vector<size_t> nodesPerDim; // nodesPerDim.push_back(N[0]+1); // nodesPerDim.push_back(N[1]+1); for( int i = 0 ; i < N.size() ; i++ ) nodesPerDim.push_back(N[i]+1); // base level size_t totalNodes2D = nodesPerDim[0]*nodesPerDim[1]; for ( int i = 0 ; i < nodesPerDim[1] ; i++ ) { bc_index[0].push_back(i*nodesPerDim[0]*dim); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[0].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j); } } // y-direction boundary condition at bottom right node bc_index[0].push_back(dim*N[0] + 1 ); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[0].push_back(dim*N[0] + 1 + totalNodes2D*3*j); } // finer levels for ( int lev = 1 ; lev < numLevels ; lev++ ) { for( int i = 0 ; i < N.size() ; i++ ) nodesPerDim[i] = 2*nodesPerDim[i] - 1; totalNodes2D = nodesPerDim[0]*nodesPerDim[1]; for ( int i = 0 ; i < nodesPerDim[1] ; i++ ) { bc_index[lev].push_back(i*nodesPerDim[0]*dim); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[lev].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j); } } // y-direction boundary condition at bottom right node bc_index[lev].push_back(nodesPerDim[0]*dim - (dim-1)); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[lev].push_back(dim*nodesPerDim[0] - (dim-1) + totalNodes2D*3*j); } } return bc_index; } __host__ void applyLoad(vector<double> &b, vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim, double force) { vector<size_t> nodesPerDim; for ( int i = 0 ; i < N.size() ; i++) nodesPerDim.push_back(N[i]+1); size_t index = 0; for ( int lev = 0 ; lev < numLevels - 1 ; lev++) { for ( int i = 0 ; i < N.size() ; i++) nodesPerDim[i] = 2*nodesPerDim[i] - 1; } index = dim * nodesPerDim[0] * ( nodesPerDim[1] - 1 ) + 1; b[index] = force; if ( dim == 3 ) { for ( int i = 1 ; i < nodesPerDim[2] ; i++ ) { index = index + (nodesPerDim[0]*nodesPerDim[1])*dim; b[index] = force; } } } __global__ void assembleGrid2D_GPU( size_t N, // number of elements per row size_t dim, // dimension double* chi, // the updated design variable value of each element double* A_local, // local stiffness matrix double* value, // global element's ELLPACK value vector size_t* index, // global element's ELLPACK index vector size_t max_row_size, // global element's ELLPACK maximum row size size_t num_rows, // global element's ELLPACK number of rows size_t* node_index, // vector that contains the corresponding global indices of the node's local indices size_t p ) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_rows && idy < num_rows ) { size_t local_num_cols = pow(2,dim) * dim; addAt( dim*node_index[ idx/dim ] + ( idx % dim ), dim*node_index[idy/dim] + ( idy % dim ), value, index, max_row_size, pow(*chi,p)*A_local[ ( idx + idy*local_num_cols ) ] ); } // addAt( 2*node_index[ idx/2 ] + ( idx % 2 ), 2*node_index[idy/2] + ( idy % 2 ), value, index, max_row_size, pow(*chi,p)*A_local[ ( idx + idy * ( 4 * dim ) ) ] ); // addAt( 2*node_index[ idx/2 ] + ( idx % 2 ), 2*node_index[idy/2] + ( idy % 2 ), value, index, max_row_size, A_local[ ( idx + idy * ( 4 * dim ) ) ] ); // if ( idx == 0 && idy == 0 ) // printf("%e\n", *chi); } __global__ void applyMatrixBC_GPU(double* value, size_t* index, size_t max_row_size, size_t bc_index, size_t num_rows) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx == bc_index && idy == bc_index ) setAt( idx, idy, value, index, max_row_size, 1.0 ); } // CHECK: overkill to use this many threads? __global__ void applyMatrixBC_GPU_test(double* value, size_t* index, size_t max_row_size, size_t bc_index, size_t num_rows, size_t num_cols) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; // printf("(%d, %d) = %lu, %d, %d\n", idx, idy, bc_index, num_rows, num_cols); if ( idx < num_cols && idy < num_rows ) { if ( idx == bc_index && idy == bc_index ) { for ( int i = 0 ; i < num_rows ; i++ ) setAt( i, idy, value, index, max_row_size, 0.0 ); for ( int j = 0 ; j < num_cols ; j++ ) setAt( idx, j, value, index, max_row_size, 0.0 ); setAt( idx, idy, value, index, max_row_size, 1.0 ); } } } // obtain a node's corresponding fine node index __host__ size_t getFineNode(size_t index, vector<size_t> N, size_t dim) { // check for error size_t num_nodes = N[0] + 1; for ( int i = 1 ; i < dim ; i++ ) num_nodes *= (N[i] + 1); if ( index > num_nodes - 1 ) throw(runtime_error("Error : Index does not exist on this level")); if ( dim == 3 ) { size_t twoDimSize = (N[0]+1)*(N[1]+1); size_t baseindex = index % twoDimSize; size_t base_idx = baseindex % (N[0]+1); size_t fine2Dsize = (2*N[0]+1)*(2*N[1]+1); size_t multiplier = index/twoDimSize; // return 2*multiplier*fine2Dsize + (2*( baseindex % twoDimSize ) + (ceil)(baseindex/2)*2) ; return 2*base_idx + (baseindex/(N[0]+1))*2*(2*N[0] + 1) + 2*fine2Dsize*multiplier; } else return (2 * (ceil)(index / (N[0] + 1)) * (2*N[0] + 1) + 2*( index % (N[0]+1)) ); } // input the coarse node's "index" to obtain the node's corresponding fine node index __device__ size_t getFineNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim) { // size_t num_nodes = (Nx + 1)*(Ny + 1)*(Nz + 1); if ( dim == 3 ) { size_t twoDimSize = (Nx+1)*(Ny+1); size_t baseindex = index % twoDimSize; size_t base_idx = baseindex % (Nx+1); size_t fine2Dsize = (2*Nx+1)*(2*Ny+1); size_t multiplier = index/twoDimSize; return 2*base_idx + (baseindex/(Nx+1))*2*(2*Nx + 1) + 2*fine2Dsize*multiplier; // return 2*multiplier*fine2Dsize + (2*( baseindex ) + (baseindex/2)*2) ; // return 2*multiplier*fine2Dsize + (2*( baseindex % twoDimSize ) + (baseindex/2)*2) ; } else return (2 * (index / (Nx + 1)) * (2*Nx + 1) + 2*( index % (Nx+1)) ); } __global__ void fillRestMatrix(double* r_value, size_t* r_index, size_t r_max_row_size, double* p_value, size_t* p_index, size_t p_max_row_size, size_t num_rows, size_t num_cols) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_cols && idy < num_rows ) setAt_( r_index[idx + idy*r_max_row_size], idy, r_value, r_index, num_cols, r_max_row_size, valueAt(r_index[idx + idy*r_max_row_size], idy, p_value, p_index, p_max_row_size)); } // //////////////////////////////////////////// // // SMOOTHERS // //////////////////////////////////////////// __global__ void Jacobi_Precond_GPU(double* c, double* value, size_t* index, size_t max_row_size, double* r, size_t num_rows, double damp){ int id = blockDim.x * blockIdx.x + threadIdx.x; // B = damp / diag(A); if ( id < num_rows ) c[id] = r[id] * damp / valueAt(id, id, value, index, max_row_size); } // //////////////////////////////////////////// // // SOLVER // //////////////////////////////////////////// __global__ void checkIterationConditions(bool* foo, size_t* step, double* res, double* res0, double* m_minRes, double* m_minRed, size_t m_maxIter){ if ( *res > *m_minRes && *res > *m_minRed*(*res0) && (*step) <= m_maxIter ) *foo = true; else { // printf("false\n"); // printf("res = %f\n",*res); // printf("m_minRes = %f\n",*m_minRes); // printf("m_minRed = %f\n",*m_minRed); // printf("step = %lu\n",(*step)); *foo = false; } } __global__ void printInitialResult_GPU(double* res0, double* m_minRes, double* m_minRed) { printf(" 0 %e %9.3e ----- -------- %9.3e \n", *res0, *m_minRes, *m_minRed); } /// r = b - A*x __global__ void ComputeResiduum_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r, double* b) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { double dot = 0.0; for ( int n = 0; n < num_cols_per_row; n++ ) { int col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; dot += val * x [ col ]; } r[id] = b[id] - dot; } } /// r = r - A*x __global__ void UpdateResiduum_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { double dot = 0.0; for ( int n = 0; n < num_cols_per_row; n++ ) { std::size_t col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; dot += val * x [ col ]; } r[id] = r[id] - dot; } } /// r = A*x __global__ void Apply_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { double dot = 0; for ( int n = 0; n < num_cols_per_row; n++ ) { int col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; dot += val * x [ col ]; } r[id] = dot; } } /// r = A^T * x /// NOTE: This kernel should be run with A's number of rows as the number of threads /// e.g., r's size = 9, A's size = 25 x 9, x's size = 25 /// ApplyTransposed_GPU<<<1, 25>>>() __global__ void ApplyTransposed_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { for ( int n = 0; n < num_cols_per_row; n++ ) { int col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; atomicAdd_double( &r[col], val*x[id] ); } } } __global__ void printResult_GPU(size_t* step, double* res, double* m_minRes, double* lastRes, double* res0, double* m_minRed) { if(*step < 10) printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed); else printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed); } __global__ void addStep(size_t* step){ ++(*step); } // BASE SOLVER // p = z + p * beta; __global__ void calculateDirectionVector( size_t* d_step, double* d_p, double* d_z, double* d_rho, double* d_rho_old, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { // if(step == 1) p = z; if(*d_step == 1) { d_p[id] = d_z[id]; } else { // p *= (rho / rho_old) d_p[id] = d_p[id] * ( *d_rho / (*d_rho_old) ); // __syncthreads(); // p += z; d_p[id] = d_p[id] + d_z[id]; } } } // A_ = P^T * A * P __host__ void PTAP(vector<vector<double>> &A_, vector<vector<double>> &A, vector<vector<double>> &P, size_t num_rows, size_t num_rows_) { // temp vectors std::vector<std::vector<double>> foo ( num_rows, std::vector <double> (num_rows_, 0.0)); // foo = A * P for ( int i = 0 ; i < num_rows ; i++ ) { for( int j = 0 ; j < num_rows_ ; j++ ) { for ( int k = 0 ; k < num_rows ; k++) { // cout << "PTAP-ijk = " << i << " " << j << " " << k << endl; foo[i][j] += A[i][k] * P[k][j]; } } } // A_ = P^T * foo for ( int i = 0 ; i < num_rows_ ; i++ ) { for( int j = 0 ; j < num_rows_ ; j++ ) { for ( int k = 0 ; k < num_rows ; k++) A_[i][j] += P[k][i] * foo[k][j]; } } // temp vectors // std::vector<std::vector<double>> foo ( num_rows, std::vector <double> (num_rows_, 0.0)); // double** foo = new double*[num_rows]; // for(int i = 0; i < num_rows; i++) // { // foo[i] = new double[num_rows_]; // } // for ( int i = 0 ; i < num_rows ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // { // foo[i][j] = 0; // } // } // // foo = A * P // for ( int i = 0 ; i < num_rows ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // { // for ( int k = 0 ; k < num_rows ; k++) // { // // cout << "PTAP-ijk = " << i << " " << j << " " << k << endl; // foo[i][j] += A[i][k] * P[k][j]; // } // } // } // // A_ = P^T * foo // for ( int i = 0 ; i < num_rows_ ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // { // for ( int k = 0 ; k < num_rows ; k++) // A_[i][j] += P[k][i] * foo[k][j]; // } // } // for ( int i = 0 ; i < num_rows ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // cout << foo[i][j] << " "; // cout << endl; // } // for(int i = 0; i < num_rows; i++) // { // delete [] foo[i]; // } // delete [] foo; } __host__ void calculateAlpha( double* d_alpha, double* d_rho, double* d_p, double* d_z, double* d_alpha_temp, size_t num_rows, dim3 gridDim, dim3 blockDim) { hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_alpha_temp, 1); // alpha_temp = () p * z ) dotProduct(d_alpha_temp, d_p, d_z, num_rows, gridDim, blockDim); // d_alpha = *d_rho / (*alpha_temp) hipLaunchKernelGGL(( divide_GPU), dim3(1),dim3(1), 0, 0, d_alpha, d_rho, d_alpha_temp); } // x = x + alpha * p __global__ void axpy_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) d_x[id] += (*d_alpha * d_p[id]); } // x = x - alpha * p __global__ void axpy_neg_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) d_x[id] = d_x[id] - (*d_alpha * d_p[id]); } //// TDO // df = ( 1/2*omega ) * p * chi^(p-1) * sum(local stiffness matrices) __global__ void UpdateDrivingForce(double *df, double* uTau, double p, double *chi, double local_volume, size_t N) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < N ) df[id] = uTau[id] * ( local_volume / (2*local_volume) ) * p * pow(chi[id], p - 1); // df[id] = uTKu[id] * ( 1 / (2*local_volume) ) * p * pow(chi[id], p - 1); } // x[] = u[]^T * A * u[] __global__ void calcDrivingForce_GPU(double *x, double *u, double* chi, double p, size_t *node_index, double* d_A_local, size_t num_rows, size_t dim, double local_volume) { double temp[24]; //CHECK: *x = 0; for ( int n = 0; n < num_rows; n++ ) { temp[n]=0; for ( int m = 0; m < num_rows; m++) { // converts local node to global node int global_col = ( node_index [ m / dim ] * dim ) + ( m % dim ); // printf("u[%d] = %f\n", global_col, u[global_col]); temp[n] += u[global_col] * d_A_local[ n + m*num_rows ]; } } for ( int n = 0; n < num_rows; n++ ) { int global_col = ( node_index [ n / dim ] * dim ) + ( n % dim ); *x += temp[n] * u[global_col]; } *x *= 0.5 * p * pow(*chi, p-1) / local_volume; } // calculate the driving force per element __host__ void calcDrivingForce( double *df, // driving force double *chi, // design variable double p, // penalization parameter double *uTAu, // dummy/temp vector double *u, // elemental displacement vector vector<size_t*> node_index, double* d_A_local, size_t num_rows, // local ELLPack stiffness matrix's number of rows dim3 gridDim, // grid and dim3 blockDim, const size_t dim, size_t numElements, // block sizes needed for running CUDA kernels double local_volume ) { // calculate the driving force in each element ( 1 element per thread ) // df[] = (0.5/local_volume) * p * pow(chi,p-1) - u[]^T * A_local * u[] for ( int i = 0 ; i < numElements; i++ ) hipLaunchKernelGGL(( calcDrivingForce_GPU), dim3(1), dim3(1), 0, 0, &df[i], u, &chi[i], p, node_index[i], d_A_local, num_rows, dim, local_volume); hipDeviceSynchronize(); } __global__ void sumOfVector_GPU(double* sum, double* x, size_t n) { int id = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x*gridDim.x; // if ( id < n ) // printf("%d : %e\n", id, x[id]); __shared__ double cache[1024]; cache[threadIdx.x] = 0; double temp = 0.0; while(id < n) { temp += x[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __global__ void calcDrivingForce_( double *df, // driving force double *chi, // design variable double p, // penalization parameter double *u, // elemental displacement vector size_t* node_index, double* d_A_local, size_t num_rows, // local ELLPack stiffness matrix's number of rows size_t dim) { int id = blockDim.x * blockIdx.x + threadIdx.x; double uTAu; if ( id < num_rows ) { uTAu = 0; // uTAu = uT * A for ( int n = 0; n < num_rows; n++ ) { // converts local node to global node int global_col = ( node_index [ n / dim ] * dim ) + ( n % dim ); uTAu += u[global_col] * d_A_local[ id + n*num_rows ]; } // uTAu *= u uTAu *= u[ ( node_index [ id / dim ] * dim ) + ( id % dim ) ]; df[id] = uTAu * (p) * pow(chi[id], (p-1)); } } __device__ double laplacian_GPU( double *array, size_t ind, size_t Nx, size_t Ny, size_t Nz, double h ) { bool east = ( (ind + 1) % Nx != 0 ); bool north = ( ind + Nx < Nx*Ny ); bool west = ( ind % Nx != 0 ); bool south = ( ind >= Nx ); double value = -4.0 * array[ind]; // east element if ( east ) value += 1.0 * array[ind + 1]; else value += 1.0 * array[ind]; // north element if ( north ) value += 1.0 * array[ind + Nx]; else value += 1.0 * array[ind]; // west element if ( west ) value += 1.0 * array[ind - 1]; else value += 1.0 * array[ind]; // south element if ( south ) value += 1.0 * array[ind - Nx]; else value += 1.0 * array[ind]; return value/(h*h); } __global__ void calcLambdaUpper(double *df_array, double *max, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[1024]; *max = -1.0e9; *mutex = 0; double temp = -1.0e9; while(index + offset < numElements){ //TODO:DEBUG: temp = fmaxf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) ); // temp = fmaxf(temp, ( df_array[index + offset] + *eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *max = fmaxf(*max, cache[0]); atomicExch(mutex, 0); //unlock } } __global__ void calcLambdaLower(double *df_array, double *min, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[1024]; *min = 1.0e9; *mutex = 0; double temp = 1.0e9; if ( index < numElements ) { while(index + offset < numElements){ temp = fminf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) ); // temp = fminf(temp, ( df_array[index + offset] - *eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *min = fminf(*min, cache[0]); atomicExch(mutex, 0); //unlock } } } __global__ void calcChiTrial( double *chi, double *df, double *lambda_trial, double del_t, double* eta, double* beta, double* chi_trial, size_t Nx, size_t Ny, size_t Nz, size_t numElements, double h ) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id < numElements ) { double del_chi; del_chi = ( del_t / *eta ) * ( df[id] - *lambda_trial + (*beta)*( laplacian_GPU( chi, id, Nx, Ny, Nz, h ) ) ); if ( del_chi + chi[id] > 1 ) chi_trial[id] = 1; else if ( del_chi + chi[id] < 1e-9 ) chi_trial[id] = 1e-9; else chi_trial[id] = del_chi + chi[id]; } } __global__ void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial) { if ( *rho_trial > rho ) *lambda_l = *lambda_trial; else *lambda_u = *lambda_trial; *lambda_trial = 0.5 * ( *lambda_l + *lambda_u ); } __global__ void calcRhoTrial(double* rho_tr, double local_volume, size_t numElements) { double total_volume = local_volume * numElements; *rho_tr *= local_volume; *rho_tr /= total_volume; } // NOTE: shelved for now __global__ void int_g_p(double* d_temp, double* d_df, double local_volume, size_t numElements) { // unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; // if( id < numElements) // { // // calculate g of element // d_temp[id] = (d_chi[id] - 1e-9)*(1-d_chi[id]) * d_df[id] * local_volume; // } } // calculate the average weighted driving force, p_w __global__ void calcP_w_GPU(double* p_w, double* df, double* uTAu, double* chi, int p, double local_volume, size_t numElements) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; __shared__ double int_g_p[1024]; __shared__ double int_g[1024]; if( id < numElements) { df[id] = uTAu[id] * ( local_volume / (2*local_volume) ) * p * pow(chi[id], p - 1); int_g_p[id] = (chi[id] - 1e-9)*(1-chi[id]) * df[id] * local_volume; int_g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume; __syncthreads(); // atomicAdd_double(&d_temp[0], int_g_p[id]); // atomicAdd_double(&d_temp[1], int_g[id]); if ( id == 0 ) { for ( int i = 1 ; i < numElements ; ++i ) int_g_p[0] += int_g_p[i]; } if ( id == 1 ) { for ( int i = 1 ; i < numElements ; ++i ) int_g[0] += int_g[i]; } __syncthreads(); if ( id == 0 ) *p_w = int_g_p[0] / int_g[0]; } } __global__ void calc_g_GPU(double*g, double* chi, size_t numElements, double local_volume) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if (id < numElements) { g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume; // if ( id == 0 ) // printf("%f\n", g[id]); } } // sum = sum ( df * g * local_volume) __global__ void calcSum_df_g_GPU(double* sum, double* df, double* g, size_t numElements) { int id = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x*gridDim.x; // if ( id < n ) // printf("%d : %e\n", id, x[id]); __shared__ double cache[1024]; cache[threadIdx.x] = 0; double temp = 0.0; while(id < numElements) { temp += df[id]*g[id]; // local volume is already included in g, i.e. g = g*local_volume id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __host__ void calcP_w(double* p_w, double* sum_g, double* sum_df_g, double* df, double* chi, double* g, double* df_g, size_t numElements, double local_volume) { dim3 gridDim; dim3 blockDim; calculateDimensions(numElements, gridDim, blockDim); // calculate g of each element * local_volume hipLaunchKernelGGL(( calc_g_GPU), dim3(gridDim), dim3(blockDim), 0, 0, g, chi, numElements, local_volume); // calculate sum_g = sum(g) hipLaunchKernelGGL(( sumOfVector_GPU), dim3(gridDim), dim3(blockDim), 0, 0, sum_g, g, numElements); // sum_df_g = sum( g[i]*df[i]*local_volume ) hipLaunchKernelGGL(( calcSum_df_g_GPU), dim3(gridDim), dim3(blockDim), 0, 0, sum_df_g, df, g, numElements); // p_w = sum_df_g / sum_g hipLaunchKernelGGL(( divide_GPU), dim3(1),dim3(1), 0, 0, p_w, sum_df_g, sum_g); } __global__ void calcEtaBeta( double* eta, double* beta, double etastar, double betastar, double* p_w ) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id == 0 ) *eta = etastar * (*p_w); if ( id == 1 ) *beta = betastar * (*p_w); } __global__ void RA( double* r_value, // restriction matrix's size_t* r_index, // ELLPACK vectors size_t r_max_row_size, double* value, // global stiffness matrix's size_t* index, // ELLPACK vectors size_t max_row_size, double* temp_matrix, // empty temp matrix size_t num_rows, // no. of rows of temp matrix size_t num_cols // no. of cols of temp matrix ) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; unsigned int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_cols && idy < num_rows ) { for ( int j = 0 ; j < num_cols ; ++j ) temp_matrix[idx + idy*num_cols] += valueAt(idy, j, r_value, r_index, r_max_row_size) * valueAt(j, idx, value, index, max_row_size); //TODO: R matrix, no need valueAt, direct lookup } } __global__ void AP( double* value, // coarse global stiffness matrix's size_t* index, // ELLPACK vectors size_t max_row_size, double* p_value, // prolongation matrix's size_t* p_index, // ELLPACK vectors size_t p_max_row_size, double* temp_matrix, // temp_matrix = R*A size_t num_rows, // no. of rows of temp matrix size_t num_cols // no. of cols of temp matrix ) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; unsigned int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_cols && idy < num_rows ) { for ( int j = 0 ; j < num_cols ; ++j ) addAt( idx, idy, value, index, max_row_size, temp_matrix[j + idy*num_cols] * valueAt(j, idx, p_value, p_index, p_max_row_size) ); } // if ( num_cols == 42 ) // { // for ( int j = 0 ; j < num_cols ; ++j ) // addAt( idx, idy, value, index, max_row_size, temp_matrix[j + idy*num_cols] * valueAt(j, idx, p_value, p_index, p_max_row_size) ); // if (idx==1 && idy==1) // { // addAt( idx, idy, value, index, max_row_size, 20.0 ); // printf("%f\n", valueAt(idx, idx, value, index, max_row_size)); // } // } // else // { // } // if ( idx == 0 && idy == 0 ) // printf("%f\n", valueAt(1, 1, value, index, max_row_size)); } // A_coarse = R * A_fine * P // TODO: not optimized yet __host__ void RAP( vector<double*> value, vector<size_t*> index, vector<size_t> max_row_size, vector<double*> r_value, vector<size_t*> r_index, vector<size_t> r_max_row_size, vector<double*> p_value, vector<size_t*> p_index, vector<size_t> p_max_row_size, double* temp_matrix, vector<size_t> num_rows, size_t lev) { // dim3 gridDim(2,2,1); // dim3 blockDim(32,32,1); dim3 gridDim; dim3 blockDim; calculateDimensions2D( num_rows[lev], num_rows[lev], gridDim, blockDim); // temp_matrix = R * A_fine hipLaunchKernelGGL(( RA), dim3(gridDim),dim3(blockDim), 0, 0, r_value[lev-1], r_index[lev-1], r_max_row_size[lev-1], value[lev], index[lev], max_row_size[lev], temp_matrix, num_rows[lev-1], num_rows[lev]); hipDeviceSynchronize(); // calculateDimensions2D( num_rows[0] * num_rows[0], gridDim, blockDim); hipLaunchKernelGGL(( AP), dim3(gridDim),dim3(blockDim), 0, 0, value[lev-1], index[lev-1], max_row_size[lev-1], p_value[lev-1], p_index[lev-1], p_max_row_size[lev-1], temp_matrix, num_rows[lev-1], num_rows[lev]); hipDeviceSynchronize(); } // TODO: CHECK: // returns value at (row, col) of matrix multiplication A*B __device__ double matMul(size_t row, size_t col, double* A_value, size_t* A_index, size_t A_max_row_size, size_t A_num_rows, double* B_value, size_t* B_index, size_t B_max_row_size, size_t b_num_rows ) { __shared__ double value; value = 0; for(int i = 0 ; i < A_max_row_size ; i++ ) { value += valueAt(row, A_index[i+A_max_row_size*row], A_value, A_index, A_max_row_size) * valueAt(A_index[i+A_max_row_size*row], col, B_value, B_index, B_max_row_size); // printf("%f %f\n ", valueAt(row, A_index[i], A_value, A_index, A_max_row_size), valueAt(A_index[i], col, B_value, B_index, B_max_row_size) ); // printf("%f\n ", valueAt(B_index[i], col, B_value, B_index, B_max_row_size) ); } // printf("%f\n ", value ); return value; } // A_coarse = R * A_fine * P __global__ void RAP_( double* value, size_t* index, size_t max_row_size, size_t num_rows, double* value_, size_t* index_, size_t max_row_size_, size_t num_rows_, double* r_value, size_t* r_index, size_t r_max_row_size, double* p_value, size_t* p_index, size_t p_max_row_size, size_t lev) { double RAP = 0; unsigned int col = threadIdx.x + blockIdx.x*blockDim.x; unsigned int row = threadIdx.y + blockIdx.y*blockDim.y; if ( row < num_rows_ && col < num_rows_ ) { for ( int i = 0 ; i < r_max_row_size ; i++ ) RAP += matMul(row, r_index[i + col*r_max_row_size], r_value, r_index, r_max_row_size, num_rows_, value, index, max_row_size, num_rows ) * valueAt(r_index[i+col*r_max_row_size], col, p_value, p_index, p_max_row_size); setAt( col, row, value_, index_, max_row_size_, RAP ); } } __global__ void checkTDOConvergence(bool* foo, double rho, double* rho_trial) { if ( abs(rho - *rho_trial) < 1e-7 ) *foo = false; } __global__ void fillIndexVector2D_GPU(size_t* index, size_t Nx, size_t Ny, size_t max_row_size, size_t num_rows) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 2; if ( id < num_rows ) { int base_id = (id - id%dim); // south-west if ( id >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i; counter++; } } // south if ( id >= (Nx + 1)*dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i; counter++; } } // south-east if ( id >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i; counter++; } } // west if ( (id) % ((Nx + 1)*dim) >= dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i; counter++; } } // origin for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i; counter++; } // east if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i; counter++; } } // north-west if ( id < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i; counter++; } } // north if ( id < (Nx+1)*(Ny)*dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i; counter++; } } // north-east if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i; counter++; } } } } __global__ void fillIndexVector3D_GPU(size_t* index, size_t Nx, size_t Ny, size_t Nz, size_t max_row_size, size_t num_rows) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 3; if ( id < num_rows ) { size_t base_id = (id - id%dim); size_t baseid_2D = (id) % ((Nx + 1)*(Ny + 1)*dim); size_t gridsize_2D = (Nx+1)*(Ny+1)*dim; bool prev_layer = (id >= (Nx+1)*(Ny+1)*dim); bool next_layer = (id < (Nx+1)*(Ny+1)*(Nz)*dim); bool south = ((id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim); bool north = ((id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim); bool west = ((id) % ((Nx + 1)*dim) >= dim); bool east = ((base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0); //// previous layer // south-west // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( prev_layer && south && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i - gridsize_2D; counter++; } } // south // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) if ( prev_layer && south ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i - gridsize_2D; counter++; } } // south-east // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( prev_layer && south && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i - gridsize_2D; counter++; } } // west // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( prev_layer && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i - gridsize_2D; counter++; } } // origin // if ( id >= (Nx+1)*(Ny+1)*dim ) if ( prev_layer ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i - gridsize_2D; counter++; } } // east // if ( (base_id == 0 && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) || id >= (Nx+1)*(Ny+1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0) if ( prev_layer && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i - gridsize_2D; counter++; } } // north-west // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( prev_layer && north && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i - gridsize_2D; counter++; } } // north // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) if ( prev_layer && north ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i - gridsize_2D; counter++; } } // north-east // if ( ((id) % ((Nx + 1)*(Ny + 1)*dim) && base_id == 0 && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) || ( (id) % ((Nx + 1)*(Ny + 1)*dim) && id >= (Nx+1)*(Ny+1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0) ) if ( prev_layer && north && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i - gridsize_2D; counter++; } } //// current layer // south-west // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( south && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i; counter++; } } // south // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) if ( south ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i; counter++; } } // south-east // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( south && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i; counter++; } } // west // if ( (id) % ((Nx + 1)*dim) >= dim ) if ( west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i; counter++; } } // origin for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i; counter++; } // east // if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( base_id == 0 || east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i; counter++; } } // north-west // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( north && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i; counter++; } } // north // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) if ( north ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i; counter++; } } // north-east // if ( (base_id == 0 ) || ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) ) if ( base_id == 0 || (north && east ) ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i; counter++; } } //// next layer // south-west // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( next_layer && south && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i + gridsize_2D; counter++; } } // south // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) if ( next_layer && south ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i + gridsize_2D; counter++; } } // south-east // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( next_layer && south && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i + gridsize_2D; counter++; } } // west // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( next_layer && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i + gridsize_2D; counter++; } } // origin // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim ) if ( next_layer ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i + gridsize_2D; counter++; } } // east // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) if ( base_id == 0 || ( next_layer && east ) ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i + gridsize_2D; counter++; } } // north-west // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( next_layer && north && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i + gridsize_2D; counter++; } } // north // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) if ( next_layer && north ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i + gridsize_2D; counter++; } } // north-east // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) if ( base_id == 0 || (next_layer && north && east ) ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i + gridsize_2D; counter++; } } for ( int i = counter ; i < max_row_size; i++) { index[i + id*max_row_size] = num_rows; } // if ( id == 0 ) // { // for ( int i = 0 ; i < max_row_size ; i++ ) // printf( "%lu ", index[i + id*max_row_size] ); // printf("\n"); // } } } __global__ void fillProlMatrix2D_GPU(double* p_value, size_t* p_index, size_t Nx, size_t Ny, size_t p_max_row_size, size_t num_rows, size_t num_cols) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id < num_rows ) { int counter = 0; int dim = 2; // coarse grid size_t Nx_ = Nx / 2; size_t Ny_ = Ny / 2; size_t base_id = (id - id%dim); size_t node_index = base_id / dim; int coarse_node_index = getCoarseNode_GPU(node_index, Nx, Ny, 0, dim); // if node is even numbered bool condition1 = (node_index % 2 == 0 ); // if node exists in the coarse grid bool condition2 = ( node_index % ((Nx+1)*2) < (Nx + 1) ); bool south = ( id >= (Nx + 1)*dim ); bool west = ( (id) % ((Nx + 1)*dim) >= dim ); bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ); bool north = ( id < (Nx+1)*(Ny)*dim ); // if there exists a coarse node in the same location if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, 0, dim) == node_index ) { p_index[counter + id*p_max_row_size] = coarse_node_index*dim + id%dim; p_value[counter + id*p_max_row_size] = 1; counter++; } else { // south-west if ( south && condition1 && !condition2 && west ) { size_t south_west_fine_node = (node_index - (Nx+1) - 1); size_t south_west_coarse_node = getCoarseNode_GPU(south_west_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = south_west_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } // south if ( south && !condition1 && !condition2 ) { size_t south_fine_node = (node_index - (Nx+1) ); size_t south_coarse_node = getCoarseNode_GPU(south_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = south_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // south-east if ( south && condition1 && !condition2 && east ) { size_t south_east_fine_node = (node_index - (Nx+1) + 1); size_t south_east_coarse_node = getCoarseNode_GPU(south_east_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = south_east_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } // west if ( west && condition2 ) { size_t west_fine_node = (node_index - 1); size_t west_coarse_node = getCoarseNode_GPU(west_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = west_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // east if ( east && condition2 ) { size_t east_fine_node = (node_index + 1); size_t east_coarse_node = getCoarseNode_GPU(east_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = east_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // north-west if ( north && condition1 && !condition2 && west ) { size_t north_west_fine_node = (node_index + (Nx+1) - 1); size_t north_west_coarse_node = getCoarseNode_GPU(north_west_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = north_west_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } // north if ( north && !condition1 && !condition2 ) { size_t north_fine_node = (node_index + (Nx+1) ); size_t north_coarse_node = getCoarseNode_GPU(north_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = north_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // north-east if ( north && condition1 && !condition2 && east ) { size_t north_east_fine_node = (node_index + (Nx+1) + 1); size_t north_east_coarse_node = getCoarseNode_GPU(north_east_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = north_east_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } } for ( int i = counter ; i < p_max_row_size; i++) { p_index[i + id*p_max_row_size] = num_cols; } } } __device__ int getCoarseNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim) { // get coarse grid dimensions size_t Nx_ = Nx / 2; size_t Ny_ = Ny / 2; size_t Nz_ = Nz / 2; // if node is even numbered bool condition1 = (index % 2 == 0 ); // if node exists in the coarse grid bool condition2 = ( index % ((Nx+1)*2) < (Nx + 1) ); // printf("aps = %d\n", ((Nx+1)*2) ); if ( condition1 && condition2 ) { return index/2 - (index/((Nx+1)*2 ))*(Nx_); } // -1 means the node in the coarse grid does not exist else return -1; } // __global__ void fillIndexVectorProl2D_GPU(size_t* p_index, size_t Nx, size_t Ny, size_t p_max_row_size, size_t num_rows, size_t num_cols) // { // unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; // if ( id < num_rows ) // { // int counter = 0; // int dim = 2; // // coarse grid // size_t Nx_ = Nx / 2; // size_t Ny_ = Ny / 2; // size_t base_id = (id - id%dim); // size_t node_index = base_id / dim; // int coarse_node_index = getCoarseNode_GPU(node_index, Nx, Ny, 0, dim); // // if node is even numbered // bool condition1 = (node_index % 2 == 0 ); // // if node exists in the coarse grid // bool condition2 = ( node_index % ((Nx+1)*2) < (Nx + 1) ); // bool south = ( id >= (Nx + 1)*dim ); // bool west = ( (id) % ((Nx + 1)*dim) >= dim ); // bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ); // bool north = ( id < (Nx+1)*(Ny)*dim ); // // if there exists a coarse node in the same location // if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, 0, dim) == node_index ) // { // p_index[counter + id*p_max_row_size] = coarse_node_index*dim + id%dim; // counter++; // } // else // { // // south-west // if ( south && condition1 && !condition2 && west ) // { // size_t south_west_fine_node = (node_index - (Nx+1) - 1); // size_t south_west_coarse_node = getCoarseNode_GPU(south_west_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = south_west_coarse_node*dim + id%dim ; // counter++; // } // // south // if ( south && !condition1 && !condition2 ) // { // size_t south_fine_node = (node_index - (Nx+1) ); // size_t south_coarse_node = getCoarseNode_GPU(south_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = south_coarse_node*dim + id%dim ; // counter++; // } // // south-east // if ( south && condition1 && !condition2 && east ) // { // size_t south_east_fine_node = (node_index - (Nx+1) + 1); // size_t south_east_coarse_node = getCoarseNode_GPU(south_east_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = south_east_coarse_node*dim + id%dim ; // counter++; // } // // west // if ( west && condition2 ) // { // size_t west_fine_node = (node_index - 1); // size_t west_coarse_node = getCoarseNode_GPU(west_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = west_coarse_node*dim + id%dim ; // counter++; // } // // east // if ( east && condition2 ) // { // size_t east_fine_node = (node_index + 1); // size_t east_coarse_node = getCoarseNode_GPU(east_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = east_coarse_node*dim + id%dim ; // counter++; // } // // north-west // if ( north && condition1 && !condition2 && west ) // { // size_t north_west_fine_node = (node_index + (Nx+1) - 1); // size_t north_west_coarse_node = getCoarseNode_GPU(north_west_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = north_west_coarse_node*dim + id%dim ; // counter++; // } // // north // if ( north && !condition1 && !condition2 ) // { // size_t north_fine_node = (node_index + (Nx+1) ); // size_t north_coarse_node = getCoarseNode_GPU(north_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = north_coarse_node*dim + id%dim ; // counter++; // } // // north-east // if ( north && condition1 && !condition2 && east ) // { // size_t north_east_fine_node = (node_index + (Nx+1) + 1); // size_t north_east_coarse_node = getCoarseNode_GPU(north_east_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = north_east_coarse_node*dim + id%dim ; // counter++; // } // } // // else if ( coarse_node_index == -1 ) // // { // // print // // } // // bool origin = ( id ) // // // // // if ( id == getFineNode_GPU(id, Nx, Ny, 0, dim) ) // // { // // p_index[counter + id*p_max_row_size] = getFineNode_GPU(id, Nx, Ny, 0, dim); // // counter++; // // } // // else // // { // // } // for ( int i = counter ; i < p_max_row_size; i++) // { // p_index[i + id*p_max_row_size] = num_cols; // } // } // } __global__ void fillIndexVectorRest2D_GPU(size_t* r_index, size_t Nx, size_t Ny, size_t r_max_row_size, size_t num_rows, size_t num_cols) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 2; if ( id < num_rows ) { size_t coarse_node_index = id / dim; size_t fine_id = getFineNode_GPU(id, Nx, Ny, 0, dim); size_t base_id = (id - id%dim); // all on fine grid // base : dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) = (id - id%dim) // s : - ((Nx)*dim + 1)*2 = - (Nx+1)*dim // w : - dim // south-west if ( id >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - ((Nx)*dim + 1)*2 - dim + id%dim; counter++; } // south if ( id >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - ((Nx)*dim + 1)*2 + id%dim; counter++; } // south-east if ( id >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - ((Nx)*dim + 1)*2 + dim + id%dim; counter++; } // west if ( (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - dim + id%dim; counter++; } // origin r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + id%dim; counter++; // east if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + dim + id%dim; counter++; } // north-west if ( id < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + ((Nx)*dim + 1)*2 - dim + id%dim; counter++; } // north if ( id < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + ((Nx)*dim + 1)*2 + id%dim; counter++; } // north-east if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + ((Nx)*dim + 1)*2 + dim + id%dim; counter++; } for ( int i = counter ; i < r_max_row_size; i++) { r_index[i + id*r_max_row_size] = num_cols; } } } __global__ void fillIndexVectorRest3D_GPU(size_t* r_index, size_t Nx, size_t Ny, size_t Nz, size_t r_max_row_size, size_t num_rows, size_t num_cols) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 3; if ( id < num_rows ) { size_t coarse_node_index = id / dim; size_t fine_id = getFineNode_GPU(id, Nx, Ny, 0, dim); size_t base_id = (id - id%dim); size_t baseid_2D = (id) % ((Nx + 1)*(Ny + 1)*dim); // all on fine grid // base : dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) = (id - id%dim) // w : - dim // n : ((Nx)*2 + 1)*3 // s : - ((Nx)*2 + 1)*3 // previous layer // id >= (Nx+1)*(Ny+1) // TODO: take above index's || base ... //// previous layer // south-west if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 - dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // south if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // south-east if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // west if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // origin if ( id >= (Nx+1)*(Ny+1)*dim && id != 0) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // east if ( id >= (Nx+1)*(Ny+1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-west if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 - dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // north if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-east if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } //// current layer // south-west if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 - dim + id%dim; counter++; } // south if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + id%dim; counter++; } // south-east if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + dim + id%dim; counter++; } // west if ( (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - dim + id%dim; counter++; } // origin r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + id%dim; counter++; // east if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + dim + id%dim; counter++; } // north-west if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 - dim + id%dim; counter++; } // north if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + id%dim; counter++; } // north-east if ( base_id == 0 || (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + dim + id%dim; counter++; } //// next layer // south-west if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 - dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // south if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // south-east if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // west if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // origin if ( id < (Nx+1)*(Ny+1)*(Nz)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // CHECK: // east if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-west if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 - dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // north if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-east if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } for ( int i = counter ; i < r_max_row_size; i++) { r_index[i + id*r_max_row_size] = num_cols; } } // if ( id == 9 ) // { // for ( int i = 0 ; i < r_max_row_size ; i++) // printf("%lu ", r_index[i + id*r_max_row_size]); // printf("\n"); // } } // DEBUG: __global__ void checkMassConservation(double* chi, double local_volume, size_t numElements) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; __shared__ double temp[1024]; if ( id < numElements) { // sum of chi * local_volume temp[id] = chi[id] * local_volume; } __syncthreads(); if ( id == 0 ) { for ( int i = 1 ; i < numElements ; i++ ) { temp[0] += temp[i]; } // total volume double vol = local_volume * numElements; printf("chi_trial %f\n", temp[0] / vol); } } // // TODO: to delete __global__ void checkLaplacian(double* laplacian, double* chi, size_t Nx, size_t Ny, size_t Nz, size_t numElements, double h) { // laplacian_GPU( double *array, size_t ind, size_t Nx, size_t Ny, size_t Nz ) unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id < numElements) { laplacian[id] = laplacian_GPU( chi, id, Nx, Ny, Nz, h ); } }
9b51765e092c31875b73c9e264700f90f9df7120.cu
#include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <cmath> #include <ctime> #include <iostream> #include "cudakernels.h" #define CUDA_CALL( call ) \ { \ cudaError_t err = call; \ if ( cudaSuccess != err) \ fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,cudaGetErrorString(err));\ } using namespace std; // Self-defined double-precision atomicAdd function for nvidia GPUs with Compute Capability 6 and below. // Pre-defined atomicAdd() with double-precision does not work for pre-CC7 nvidia GPUs. __device__ double atomicAdd_double(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } // Determines 1-dimensional CUDA block and grid sizes based on the number of rows N __host__ void calculateDimensions(size_t N, dim3 &gridDim, dim3 &blockDim) { if ( N <= 1024 ) { blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1; gridDim.x = 1; gridDim.y = 1; gridDim.z = 1; } else { blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1; gridDim.x = (int)ceil(N/blockDim.x)+1; gridDim.y = 1; gridDim.z = 1; } } // Determines 2-dimensional CUDA block and grid sizes based on the number of rows N __host__ void calculateDimensions2D(size_t Nx, size_t Ny, dim3 &gridDim, dim3 &blockDim) { if ( Nx <= 32 && Ny <= 32) { blockDim.x = 32; blockDim.y = 32; blockDim.z = 1; gridDim.x = 1; gridDim.y = 1; gridDim.z = 1; } else { blockDim.x = 32; blockDim.y = 32; blockDim.z = 1; gridDim.x = (int)ceil(Nx/blockDim.x)+1; gridDim.y = (int)ceil(Ny/blockDim.y)+1; gridDim.z = 1; } } // TODO: this is for 2D only, need 3D later // calculates the DOF of a grid with dimensions __host__ size_t calcDOF(size_t Nx, size_t Ny, size_t dim) { return (Nx + 1) * (Ny + 1) * dim; } // returns value of an ELLPack matrix A at (x,y) __device__ double valueAt(size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[x * max_row_size + k] == y) return vValue[x * max_row_size + k]; } return 0.0; } // adds the value to an ELLPack matrix A at (x,y) __device__ void addAt( size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size, double value ) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[y * max_row_size + k] == x) { vValue[y * max_row_size + k] += value; // printf("%f \n", vValue[x * max_row_size + k]); k = max_row_size; // to exit for loop } } } // sets the value of an ELLPack matrix A at (x,y) __device__ void setAt( size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size, double value ) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[y * max_row_size + k] == x ) { vValue[y * max_row_size + k] = value; k = max_row_size; // to exit for loop } } } __device__ void setAt_( size_t x, size_t y, double* vValue, size_t* vIndex, size_t num_cols, size_t max_row_size, double value ) { for(size_t k = 0; k < max_row_size; ++k) { if(vIndex[y * max_row_size + k] == x && k < num_cols) { vValue[y * max_row_size + k] = value; k = max_row_size; // to exit for loop } } } __global__ void setToZero(double* a, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) a[id] = 0.0; } // norm = x.norm() __global__ void norm_GPU(double* norm, double* x, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; // TODO: if (id < num) if ( id == 0 ) *norm = 0; __syncthreads(); if ( id < num_rows ) { atomicAdd_double( norm, x[id]*x[id] ); } __syncthreads(); if ( id == 0 ) *norm = sqrt(*norm); } // a[] = 0 // a[] = 0, size_t __global__ void setToZero(size_t* a, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) a[id] = 0.0; } //TODO: to delete // bool = true __global__ void setToTrue( bool *foo ) { *foo = true; } // DEBUG: TEST !!!!!!!!!!!!!!!!!!!!!!!!!! __global__ void sqrt_GPU(double *x) { *x = sqrt(*x); } // sum = sum( x[n]*x[n] ) __global__ void sumOfSquare_GPU(double* sum, double* x, size_t n) { int id = threadIdx.x + blockDim.x*blockIdx.x; int stride = blockDim.x*gridDim.x; __shared__ double cache[1024]; double temp = 0.0; while(id < n) { temp += x[id]*x[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reset id id = threadIdx.x + blockDim.x*blockIdx.x; // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __global__ void LastBlockSumOfSquare_GPU(double* sum, double* x, size_t n, size_t counter) { int id = threadIdx.x + blockDim.x*blockIdx.x; // if ( id >= counter*blockDim.x && id < ( ( counter*blockDim.x ) + lastBlockSize ) ) if ( id >= counter*blockDim.x && id < n ) atomicAdd_double(sum, x[id]*x[id]); } __host__ void norm_GPU(double* d_norm, double* d_x, size_t N, dim3 gridDim, dim3 blockDim) { setToZero<<<1,1>>>( d_norm, 1); // getting the last block's size size_t lastBlockSize = N; size_t counter = 0; if ( N % gridDim.x == 0 ) {} else { while ( lastBlockSize >= gridDim.x) { counter++; lastBlockSize -= gridDim.x; } } // sum of squares for the full blocks // sumOfSquare_GPU<<<gridDim.x - 1, blockDim>>>(d_norm, d_x, N); // TODO: check, this is the original sumOfSquare_GPU<<<gridDim.x - 1, blockDim>>>(d_norm, d_x, (gridDim.x - 1)*blockDim.x); // sum of squares for the last incomplete block LastBlockSumOfSquare_GPU<<<1, lastBlockSize>>>(d_norm, d_x, N, counter); // cudaDeviceSynchronize(); sqrt_GPU<<<1,1>>>( d_norm ); // cudaDeviceSynchronize(); } /// Helper functions for debugging __global__ void print_GPU(double* x) { printf("[GPU] x = %e\n", *x); } __global__ void print_GPU(int* x) { printf("[GPU] x = %d\n", *x); } __global__ void print_GPU(size_t* x) { printf("[GPU] x = %lu\n", *x); } __global__ void print_GPU(bool* x) { printf("[GPU] x = %d\n", *x); } __global__ void printLinearVector_GPU(size_t* x, size_t i, size_t num_rows, size_t num_cols) { for ( int j = 0 ; j < num_cols ; j++ ) printf("%lu ", x[j+i*num_cols]); printf("\n"); } __global__ void printLinearVector_GPU(double* x, size_t i, size_t num_rows, size_t num_cols) { for ( int j = 0 ; j < num_cols ; j++ ) printf("%f ", x[j+i*num_cols]); printf("\n"); } __host__ void printLinearVector(size_t* x, size_t num_rows, size_t num_cols) { for(int i = 0 ; i < num_rows ; i++ ) { printLinearVector_GPU<<<1,1>>>(x, i, num_rows, num_cols); cudaDeviceSynchronize(); } } __host__ void printLinearVector(double* x, size_t num_rows, size_t num_cols) { for(int i = 0 ; i < num_rows ; i++ ) { printLinearVector_GPU<<<1,1>>>(x, i, num_rows, num_cols); cudaDeviceSynchronize(); } } __global__ void printVector_GPU(double* x) { int id = blockDim.x * blockIdx.x + threadIdx.x; printf("[GPU] x[%d] = %e\n", id, x[id]); } __global__ void printVector_GPU(double* x, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) printf("%d %e\n", id, x[id]); } __global__ void printVector_GPU(std::size_t* x, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) printf("%d %lu\n", id, x[id]); } __global__ void printVector_GPU(int* x) { int id = blockDim.x * blockIdx.x + threadIdx.x; printf("[GPU] x[%d] = %d\n", id, x[id]); } __global__ void printELL_GPU(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols) { for ( int i = 0 ; i < num_rows ; i++) { for ( int j = 0 ; j < num_cols ; j++) printf("%f ", valueAt(i, j, value, index, max_row_size) ); printf("\n"); } } __global__ void printELLrow_GPU(size_t row, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols) { for ( int j = 0 ; j < num_cols ; j++) printf("%f ", valueAt(row, j, value, index, max_row_size) ); printf("\n"); } __host__ void printELLrow(size_t lev, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols) { for ( size_t i = 0 ; i < num_rows ; i++ ) { printELLrow_GPU<<<1,1>>> (i, value, index, max_row_size, num_rows, num_cols); cudaDeviceSynchronize(); } } // (scalar) a = b __global__ void equals_GPU(double* a, double* b) { *a = *b; } // x = a * b __global__ void dotProduct_GPU(double* x, double* a, double* b, size_t num_rows) { unsigned int id = threadIdx.x + blockDim.x*blockIdx.x; unsigned int stride = blockDim.x*gridDim.x; __shared__ double cache[1024]; double temp = 0.0; // filling in the shared variable while(id < num_rows){ temp += a[id]*b[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ atomicAdd_double(x, cache[0]); } __syncthreads(); } __global__ void LastBlockDotProduct(double* dot, double* x, double* y, size_t starting_index) { int id = threadIdx.x + blockDim.x*blockIdx.x + starting_index; atomicAdd_double(dot, x[id]*y[id]); } // dot = a[] * b[] __host__ void dotProduct(double* dot, double* a, double* b, size_t N, dim3 gridDim, dim3 blockDim) { setToZero<<<1,1>>>( dot, 1 ); // getting the last block's size size_t lastBlockSize = blockDim.x - ( (gridDim.x * blockDim.x ) - N ); if ( N < blockDim.x) { LastBlockDotProduct<<<1, N>>>( dot, a, b, 0 ); } else { // dot products for the full blocks dotProduct_GPU<<<gridDim.x - 1, blockDim>>>(dot, a, b, (gridDim.x - 1)*blockDim.x ); // dot products for the last incomplete block LastBlockDotProduct<<<1, lastBlockSize>>>(dot, a, b, ( (gridDim.x - 1) * blockDim.x ) ); } } // x = y / z __global__ void divide_GPU(double *x, double *y, double *z) { *x = *y / *z; } // x += y __global__ void add_GPU(double *x, double *y) { *x += *y; } // x -= y __global__ void minus_GPU(double *x, double *y) { *x -= *y; } // x += c __global__ void addVector_GPU(double *x, double *c, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) x[id] += c[id]; } __global__ void transformToELL_GPU(double *array, double *value, size_t *index, size_t max_row_size, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { size_t counter = id*max_row_size; size_t nnz = 0; // printf("array = %e\n", array [ 1 ]); for ( int j = 0 ; nnz < max_row_size ; j++ ) { if ( array [ j + id*num_rows ] != 0 ) { // printf("array = %e\n", array [ j + id*num_rows ]); value [counter] = array [ j + id*num_rows ]; index [counter] = j; // printf("value = %e\n", value[counter]); counter++; nnz++; } if ( j == num_rows - 1 ) { for ( ; nnz < max_row_size ; counter++ && nnz++ ) { value [counter] = 0.0; index [counter] = num_rows; } } } } } std::size_t getMaxRowSize(vector<vector<double>> &array, size_t num_rows, size_t num_cols) { std::size_t max_row_size = 0; for ( int i = 0; i < num_rows ; i++ ) { std::size_t max_in_row = 0; for ( int j = 0 ; j < num_cols ; j++ ) { if ( array[i][j] < -1.0e-8 || array[i][j] > 1.0e-8 ) max_in_row++; } if ( max_in_row >= max_row_size ) max_row_size = max_in_row; } return max_row_size; } // transforms a 2D array into ELLPACK's vectors value and index // max_row_size has to be determined prior to this void transformToELL(vector<vector<double>> &array, vector<double> &value, vector<size_t> &index, size_t max_row_size, size_t num_rows, size_t num_cols ) { size_t nnz; for ( int i = 0 ; i < num_rows ; i++) { nnz = 0; // printf("array = %e\n", array [ 1 ]); for ( int j = 0 ; nnz < max_row_size ; j++ ) { if ( array[i][j] < -1.0e-8 || array[i][j] > 1.0e-8 ) { // printf("array = %e\n", array [ j + id*num_rows ]); value.push_back(array[i][j]); index.push_back(j); nnz++; } if ( j == num_cols - 1 ) { for ( ; nnz < max_row_size ; nnz++ ) { value.push_back(0.0); index.push_back(num_rows); } } } } } //TEMP: // sets identity rows and columns of the DOF in which a BC is applied void applyMatrixBC(vector<vector<double>> &array, size_t index, size_t num_rows, size_t dim) { // index *= dim; // for ( int j = 0 ; j < dim ; j++ ) // { for ( int i = 0 ; i < num_rows ; i++ ) { array[i][index] = 0.0; array[index][i] = 0.0; } array[index][index] = 1.0; // } } __host__ void applyMatrixBC(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols, size_t dim, size_t bc_index) { } // a = b __global__ void vectorEquals_GPU(double* a, double* b, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) a[id] = b[id]; } //////////////////////////////////////////// // ASSEMBLER //////////////////////////////////////////// __host__ vector<vector<size_t>> applyBC(vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim) { vector<vector<size_t>> bc_index(numLevels); vector<size_t> nodesPerDim; // nodesPerDim.push_back(N[0]+1); // nodesPerDim.push_back(N[1]+1); for( int i = 0 ; i < N.size() ; i++ ) nodesPerDim.push_back(N[i]+1); // base level size_t totalNodes2D = nodesPerDim[0]*nodesPerDim[1]; for ( int i = 0 ; i < nodesPerDim[1] ; i++ ) { bc_index[0].push_back(i*nodesPerDim[0]*dim); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[0].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j); } } // y-direction boundary condition at bottom right node bc_index[0].push_back(dim*N[0] + 1 ); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[0].push_back(dim*N[0] + 1 + totalNodes2D*3*j); } // finer levels for ( int lev = 1 ; lev < numLevels ; lev++ ) { for( int i = 0 ; i < N.size() ; i++ ) nodesPerDim[i] = 2*nodesPerDim[i] - 1; totalNodes2D = nodesPerDim[0]*nodesPerDim[1]; for ( int i = 0 ; i < nodesPerDim[1] ; i++ ) { bc_index[lev].push_back(i*nodesPerDim[0]*dim); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[lev].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j); } } // y-direction boundary condition at bottom right node bc_index[lev].push_back(nodesPerDim[0]*dim - (dim-1)); if ( dim == 3 ) { for ( int j = 1 ; j < nodesPerDim[2] ; j++ ) bc_index[lev].push_back(dim*nodesPerDim[0] - (dim-1) + totalNodes2D*3*j); } } return bc_index; } __host__ void applyLoad(vector<double> &b, vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim, double force) { vector<size_t> nodesPerDim; for ( int i = 0 ; i < N.size() ; i++) nodesPerDim.push_back(N[i]+1); size_t index = 0; for ( int lev = 0 ; lev < numLevels - 1 ; lev++) { for ( int i = 0 ; i < N.size() ; i++) nodesPerDim[i] = 2*nodesPerDim[i] - 1; } index = dim * nodesPerDim[0] * ( nodesPerDim[1] - 1 ) + 1; b[index] = force; if ( dim == 3 ) { for ( int i = 1 ; i < nodesPerDim[2] ; i++ ) { index = index + (nodesPerDim[0]*nodesPerDim[1])*dim; b[index] = force; } } } __global__ void assembleGrid2D_GPU( size_t N, // number of elements per row size_t dim, // dimension double* chi, // the updated design variable value of each element double* A_local, // local stiffness matrix double* value, // global element's ELLPACK value vector size_t* index, // global element's ELLPACK index vector size_t max_row_size, // global element's ELLPACK maximum row size size_t num_rows, // global element's ELLPACK number of rows size_t* node_index, // vector that contains the corresponding global indices of the node's local indices size_t p ) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_rows && idy < num_rows ) { size_t local_num_cols = pow(2,dim) * dim; addAt( dim*node_index[ idx/dim ] + ( idx % dim ), dim*node_index[idy/dim] + ( idy % dim ), value, index, max_row_size, pow(*chi,p)*A_local[ ( idx + idy*local_num_cols ) ] ); } // addAt( 2*node_index[ idx/2 ] + ( idx % 2 ), 2*node_index[idy/2] + ( idy % 2 ), value, index, max_row_size, pow(*chi,p)*A_local[ ( idx + idy * ( 4 * dim ) ) ] ); // addAt( 2*node_index[ idx/2 ] + ( idx % 2 ), 2*node_index[idy/2] + ( idy % 2 ), value, index, max_row_size, A_local[ ( idx + idy * ( 4 * dim ) ) ] ); // if ( idx == 0 && idy == 0 ) // printf("%e\n", *chi); } __global__ void applyMatrixBC_GPU(double* value, size_t* index, size_t max_row_size, size_t bc_index, size_t num_rows) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx == bc_index && idy == bc_index ) setAt( idx, idy, value, index, max_row_size, 1.0 ); } // CHECK: overkill to use this many threads? __global__ void applyMatrixBC_GPU_test(double* value, size_t* index, size_t max_row_size, size_t bc_index, size_t num_rows, size_t num_cols) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; // printf("(%d, %d) = %lu, %d, %d\n", idx, idy, bc_index, num_rows, num_cols); if ( idx < num_cols && idy < num_rows ) { if ( idx == bc_index && idy == bc_index ) { for ( int i = 0 ; i < num_rows ; i++ ) setAt( i, idy, value, index, max_row_size, 0.0 ); for ( int j = 0 ; j < num_cols ; j++ ) setAt( idx, j, value, index, max_row_size, 0.0 ); setAt( idx, idy, value, index, max_row_size, 1.0 ); } } } // obtain a node's corresponding fine node index __host__ size_t getFineNode(size_t index, vector<size_t> N, size_t dim) { // check for error size_t num_nodes = N[0] + 1; for ( int i = 1 ; i < dim ; i++ ) num_nodes *= (N[i] + 1); if ( index > num_nodes - 1 ) throw(runtime_error("Error : Index does not exist on this level")); if ( dim == 3 ) { size_t twoDimSize = (N[0]+1)*(N[1]+1); size_t baseindex = index % twoDimSize; size_t base_idx = baseindex % (N[0]+1); size_t fine2Dsize = (2*N[0]+1)*(2*N[1]+1); size_t multiplier = index/twoDimSize; // return 2*multiplier*fine2Dsize + (2*( baseindex % twoDimSize ) + (ceil)(baseindex/2)*2) ; return 2*base_idx + (baseindex/(N[0]+1))*2*(2*N[0] + 1) + 2*fine2Dsize*multiplier; } else return (2 * (ceil)(index / (N[0] + 1)) * (2*N[0] + 1) + 2*( index % (N[0]+1)) ); } // input the coarse node's "index" to obtain the node's corresponding fine node index __device__ size_t getFineNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim) { // size_t num_nodes = (Nx + 1)*(Ny + 1)*(Nz + 1); if ( dim == 3 ) { size_t twoDimSize = (Nx+1)*(Ny+1); size_t baseindex = index % twoDimSize; size_t base_idx = baseindex % (Nx+1); size_t fine2Dsize = (2*Nx+1)*(2*Ny+1); size_t multiplier = index/twoDimSize; return 2*base_idx + (baseindex/(Nx+1))*2*(2*Nx + 1) + 2*fine2Dsize*multiplier; // return 2*multiplier*fine2Dsize + (2*( baseindex ) + (baseindex/2)*2) ; // return 2*multiplier*fine2Dsize + (2*( baseindex % twoDimSize ) + (baseindex/2)*2) ; } else return (2 * (index / (Nx + 1)) * (2*Nx + 1) + 2*( index % (Nx+1)) ); } __global__ void fillRestMatrix(double* r_value, size_t* r_index, size_t r_max_row_size, double* p_value, size_t* p_index, size_t p_max_row_size, size_t num_rows, size_t num_cols) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_cols && idy < num_rows ) setAt_( r_index[idx + idy*r_max_row_size], idy, r_value, r_index, num_cols, r_max_row_size, valueAt(r_index[idx + idy*r_max_row_size], idy, p_value, p_index, p_max_row_size)); } // //////////////////////////////////////////// // // SMOOTHERS // //////////////////////////////////////////// __global__ void Jacobi_Precond_GPU(double* c, double* value, size_t* index, size_t max_row_size, double* r, size_t num_rows, double damp){ int id = blockDim.x * blockIdx.x + threadIdx.x; // B = damp / diag(A); if ( id < num_rows ) c[id] = r[id] * damp / valueAt(id, id, value, index, max_row_size); } // //////////////////////////////////////////// // // SOLVER // //////////////////////////////////////////// __global__ void checkIterationConditions(bool* foo, size_t* step, double* res, double* res0, double* m_minRes, double* m_minRed, size_t m_maxIter){ if ( *res > *m_minRes && *res > *m_minRed*(*res0) && (*step) <= m_maxIter ) *foo = true; else { // printf("false\n"); // printf("res = %f\n",*res); // printf("m_minRes = %f\n",*m_minRes); // printf("m_minRed = %f\n",*m_minRed); // printf("step = %lu\n",(*step)); *foo = false; } } __global__ void printInitialResult_GPU(double* res0, double* m_minRes, double* m_minRed) { printf(" 0 %e %9.3e ----- -------- %9.3e \n", *res0, *m_minRes, *m_minRed); } /// r = b - A*x __global__ void ComputeResiduum_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r, double* b) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { double dot = 0.0; for ( int n = 0; n < num_cols_per_row; n++ ) { int col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; dot += val * x [ col ]; } r[id] = b[id] - dot; } } /// r = r - A*x __global__ void UpdateResiduum_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { double dot = 0.0; for ( int n = 0; n < num_cols_per_row; n++ ) { std::size_t col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; dot += val * x [ col ]; } r[id] = r[id] - dot; } } /// r = A*x __global__ void Apply_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { double dot = 0; for ( int n = 0; n < num_cols_per_row; n++ ) { int col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; dot += val * x [ col ]; } r[id] = dot; } } /// r = A^T * x /// NOTE: This kernel should be run with A's number of rows as the number of threads /// e.g., r's size = 9, A's size = 25 x 9, x's size = 25 /// ApplyTransposed_GPU<<<1, 25>>>() __global__ void ApplyTransposed_GPU( const std::size_t num_rows, const std::size_t num_cols_per_row, const double* value, const std::size_t* index, const double* x, double* r) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { for ( int n = 0; n < num_cols_per_row; n++ ) { int col = index [ num_cols_per_row * id + n ]; double val = value [ num_cols_per_row * id + n ]; atomicAdd_double( &r[col], val*x[id] ); } } } __global__ void printResult_GPU(size_t* step, double* res, double* m_minRes, double* lastRes, double* res0, double* m_minRed) { if(*step < 10) printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed); else printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed); } __global__ void addStep(size_t* step){ ++(*step); } // BASE SOLVER // p = z + p * beta; __global__ void calculateDirectionVector( size_t* d_step, double* d_p, double* d_z, double* d_rho, double* d_rho_old, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) { // if(step == 1) p = z; if(*d_step == 1) { d_p[id] = d_z[id]; } else { // p *= (rho / rho_old) d_p[id] = d_p[id] * ( *d_rho / (*d_rho_old) ); // __syncthreads(); // p += z; d_p[id] = d_p[id] + d_z[id]; } } } // A_ = P^T * A * P __host__ void PTAP(vector<vector<double>> &A_, vector<vector<double>> &A, vector<vector<double>> &P, size_t num_rows, size_t num_rows_) { // temp vectors std::vector<std::vector<double>> foo ( num_rows, std::vector <double> (num_rows_, 0.0)); // foo = A * P for ( int i = 0 ; i < num_rows ; i++ ) { for( int j = 0 ; j < num_rows_ ; j++ ) { for ( int k = 0 ; k < num_rows ; k++) { // cout << "PTAP-ijk = " << i << " " << j << " " << k << endl; foo[i][j] += A[i][k] * P[k][j]; } } } // A_ = P^T * foo for ( int i = 0 ; i < num_rows_ ; i++ ) { for( int j = 0 ; j < num_rows_ ; j++ ) { for ( int k = 0 ; k < num_rows ; k++) A_[i][j] += P[k][i] * foo[k][j]; } } // temp vectors // std::vector<std::vector<double>> foo ( num_rows, std::vector <double> (num_rows_, 0.0)); // double** foo = new double*[num_rows]; // for(int i = 0; i < num_rows; i++) // { // foo[i] = new double[num_rows_]; // } // for ( int i = 0 ; i < num_rows ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // { // foo[i][j] = 0; // } // } // // foo = A * P // for ( int i = 0 ; i < num_rows ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // { // for ( int k = 0 ; k < num_rows ; k++) // { // // cout << "PTAP-ijk = " << i << " " << j << " " << k << endl; // foo[i][j] += A[i][k] * P[k][j]; // } // } // } // // A_ = P^T * foo // for ( int i = 0 ; i < num_rows_ ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // { // for ( int k = 0 ; k < num_rows ; k++) // A_[i][j] += P[k][i] * foo[k][j]; // } // } // for ( int i = 0 ; i < num_rows ; i++ ) // { // for( int j = 0 ; j < num_rows_ ; j++ ) // cout << foo[i][j] << " "; // cout << endl; // } // for(int i = 0; i < num_rows; i++) // { // delete [] foo[i]; // } // delete [] foo; } __host__ void calculateAlpha( double* d_alpha, double* d_rho, double* d_p, double* d_z, double* d_alpha_temp, size_t num_rows, dim3 gridDim, dim3 blockDim) { setToZero<<<1,1>>>( d_alpha_temp, 1); // alpha_temp = () p * z ) dotProduct(d_alpha_temp, d_p, d_z, num_rows, gridDim, blockDim); // d_alpha = *d_rho / (*alpha_temp) divide_GPU<<<1,1>>>(d_alpha, d_rho, d_alpha_temp); } // x = x + alpha * p __global__ void axpy_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) d_x[id] += (*d_alpha * d_p[id]); } // x = x - alpha * p __global__ void axpy_neg_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < num_rows ) d_x[id] = d_x[id] - (*d_alpha * d_p[id]); } //// TDO // df = ( 1/2*omega ) * p * chi^(p-1) * sum(local stiffness matrices) __global__ void UpdateDrivingForce(double *df, double* uTau, double p, double *chi, double local_volume, size_t N) { int id = blockDim.x * blockIdx.x + threadIdx.x; if ( id < N ) df[id] = uTau[id] * ( local_volume / (2*local_volume) ) * p * pow(chi[id], p - 1); // df[id] = uTKu[id] * ( 1 / (2*local_volume) ) * p * pow(chi[id], p - 1); } // x[] = u[]^T * A * u[] __global__ void calcDrivingForce_GPU(double *x, double *u, double* chi, double p, size_t *node_index, double* d_A_local, size_t num_rows, size_t dim, double local_volume) { double temp[24]; //CHECK: *x = 0; for ( int n = 0; n < num_rows; n++ ) { temp[n]=0; for ( int m = 0; m < num_rows; m++) { // converts local node to global node int global_col = ( node_index [ m / dim ] * dim ) + ( m % dim ); // printf("u[%d] = %f\n", global_col, u[global_col]); temp[n] += u[global_col] * d_A_local[ n + m*num_rows ]; } } for ( int n = 0; n < num_rows; n++ ) { int global_col = ( node_index [ n / dim ] * dim ) + ( n % dim ); *x += temp[n] * u[global_col]; } *x *= 0.5 * p * pow(*chi, p-1) / local_volume; } // calculate the driving force per element __host__ void calcDrivingForce( double *df, // driving force double *chi, // design variable double p, // penalization parameter double *uTAu, // dummy/temp vector double *u, // elemental displacement vector vector<size_t*> node_index, double* d_A_local, size_t num_rows, // local ELLPack stiffness matrix's number of rows dim3 gridDim, // grid and dim3 blockDim, const size_t dim, size_t numElements, // block sizes needed for running CUDA kernels double local_volume ) { // calculate the driving force in each element ( 1 element per thread ) // df[] = (0.5/local_volume) * p * pow(chi,p-1) - u[]^T * A_local * u[] for ( int i = 0 ; i < numElements; i++ ) calcDrivingForce_GPU<<<1, 1>>>(&df[i], u, &chi[i], p, node_index[i], d_A_local, num_rows, dim, local_volume); cudaDeviceSynchronize(); } __global__ void sumOfVector_GPU(double* sum, double* x, size_t n) { int id = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x*gridDim.x; // if ( id < n ) // printf("%d : %e\n", id, x[id]); __shared__ double cache[1024]; cache[threadIdx.x] = 0; double temp = 0.0; while(id < n) { temp += x[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __global__ void calcDrivingForce_( double *df, // driving force double *chi, // design variable double p, // penalization parameter double *u, // elemental displacement vector size_t* node_index, double* d_A_local, size_t num_rows, // local ELLPack stiffness matrix's number of rows size_t dim) { int id = blockDim.x * blockIdx.x + threadIdx.x; double uTAu; if ( id < num_rows ) { uTAu = 0; // uTAu = uT * A for ( int n = 0; n < num_rows; n++ ) { // converts local node to global node int global_col = ( node_index [ n / dim ] * dim ) + ( n % dim ); uTAu += u[global_col] * d_A_local[ id + n*num_rows ]; } // uTAu *= u uTAu *= u[ ( node_index [ id / dim ] * dim ) + ( id % dim ) ]; df[id] = uTAu * (p) * pow(chi[id], (p-1)); } } __device__ double laplacian_GPU( double *array, size_t ind, size_t Nx, size_t Ny, size_t Nz, double h ) { bool east = ( (ind + 1) % Nx != 0 ); bool north = ( ind + Nx < Nx*Ny ); bool west = ( ind % Nx != 0 ); bool south = ( ind >= Nx ); double value = -4.0 * array[ind]; // east element if ( east ) value += 1.0 * array[ind + 1]; else value += 1.0 * array[ind]; // north element if ( north ) value += 1.0 * array[ind + Nx]; else value += 1.0 * array[ind]; // west element if ( west ) value += 1.0 * array[ind - 1]; else value += 1.0 * array[ind]; // south element if ( south ) value += 1.0 * array[ind - Nx]; else value += 1.0 * array[ind]; return value/(h*h); } __global__ void calcLambdaUpper(double *df_array, double *max, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[1024]; *max = -1.0e9; *mutex = 0; double temp = -1.0e9; while(index + offset < numElements){ //TODO:DEBUG: temp = fmaxf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) ); // temp = fmaxf(temp, ( df_array[index + offset] + *eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *max = fmaxf(*max, cache[0]); atomicExch(mutex, 0); //unlock } } __global__ void calcLambdaLower(double *df_array, double *min, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[1024]; *min = 1.0e9; *mutex = 0; double temp = 1.0e9; if ( index < numElements ) { while(index + offset < numElements){ temp = fminf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) ); // temp = fminf(temp, ( df_array[index + offset] - *eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *min = fminf(*min, cache[0]); atomicExch(mutex, 0); //unlock } } } __global__ void calcChiTrial( double *chi, double *df, double *lambda_trial, double del_t, double* eta, double* beta, double* chi_trial, size_t Nx, size_t Ny, size_t Nz, size_t numElements, double h ) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id < numElements ) { double del_chi; del_chi = ( del_t / *eta ) * ( df[id] - *lambda_trial + (*beta)*( laplacian_GPU( chi, id, Nx, Ny, Nz, h ) ) ); if ( del_chi + chi[id] > 1 ) chi_trial[id] = 1; else if ( del_chi + chi[id] < 1e-9 ) chi_trial[id] = 1e-9; else chi_trial[id] = del_chi + chi[id]; } } __global__ void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial) { if ( *rho_trial > rho ) *lambda_l = *lambda_trial; else *lambda_u = *lambda_trial; *lambda_trial = 0.5 * ( *lambda_l + *lambda_u ); } __global__ void calcRhoTrial(double* rho_tr, double local_volume, size_t numElements) { double total_volume = local_volume * numElements; *rho_tr *= local_volume; *rho_tr /= total_volume; } // NOTE: shelved for now __global__ void int_g_p(double* d_temp, double* d_df, double local_volume, size_t numElements) { // unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; // if( id < numElements) // { // // calculate g of element // d_temp[id] = (d_chi[id] - 1e-9)*(1-d_chi[id]) * d_df[id] * local_volume; // } } // calculate the average weighted driving force, p_w __global__ void calcP_w_GPU(double* p_w, double* df, double* uTAu, double* chi, int p, double local_volume, size_t numElements) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; __shared__ double int_g_p[1024]; __shared__ double int_g[1024]; if( id < numElements) { df[id] = uTAu[id] * ( local_volume / (2*local_volume) ) * p * pow(chi[id], p - 1); int_g_p[id] = (chi[id] - 1e-9)*(1-chi[id]) * df[id] * local_volume; int_g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume; __syncthreads(); // atomicAdd_double(&d_temp[0], int_g_p[id]); // atomicAdd_double(&d_temp[1], int_g[id]); if ( id == 0 ) { for ( int i = 1 ; i < numElements ; ++i ) int_g_p[0] += int_g_p[i]; } if ( id == 1 ) { for ( int i = 1 ; i < numElements ; ++i ) int_g[0] += int_g[i]; } __syncthreads(); if ( id == 0 ) *p_w = int_g_p[0] / int_g[0]; } } __global__ void calc_g_GPU(double*g, double* chi, size_t numElements, double local_volume) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if (id < numElements) { g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume; // if ( id == 0 ) // printf("%f\n", g[id]); } } // sum = sum ( df * g * local_volume) __global__ void calcSum_df_g_GPU(double* sum, double* df, double* g, size_t numElements) { int id = blockDim.x * blockIdx.x + threadIdx.x; int stride = blockDim.x*gridDim.x; // if ( id < n ) // printf("%d : %e\n", id, x[id]); __shared__ double cache[1024]; cache[threadIdx.x] = 0; double temp = 0.0; while(id < numElements) { temp += df[id]*g[id]; // local volume is already included in g, i.e. g = g*local_volume id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __host__ void calcP_w(double* p_w, double* sum_g, double* sum_df_g, double* df, double* chi, double* g, double* df_g, size_t numElements, double local_volume) { dim3 gridDim; dim3 blockDim; calculateDimensions(numElements, gridDim, blockDim); // calculate g of each element * local_volume calc_g_GPU<<<gridDim, blockDim>>>(g, chi, numElements, local_volume); // calculate sum_g = sum(g) sumOfVector_GPU<<<gridDim, blockDim>>>(sum_g, g, numElements); // sum_df_g = sum( g[i]*df[i]*local_volume ) calcSum_df_g_GPU<<<gridDim, blockDim>>>(sum_df_g, df, g, numElements); // p_w = sum_df_g / sum_g divide_GPU<<<1,1>>>(p_w, sum_df_g, sum_g); } __global__ void calcEtaBeta( double* eta, double* beta, double etastar, double betastar, double* p_w ) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id == 0 ) *eta = etastar * (*p_w); if ( id == 1 ) *beta = betastar * (*p_w); } __global__ void RA( double* r_value, // restriction matrix's size_t* r_index, // ELLPACK vectors size_t r_max_row_size, double* value, // global stiffness matrix's size_t* index, // ELLPACK vectors size_t max_row_size, double* temp_matrix, // empty temp matrix size_t num_rows, // no. of rows of temp matrix size_t num_cols // no. of cols of temp matrix ) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; unsigned int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_cols && idy < num_rows ) { for ( int j = 0 ; j < num_cols ; ++j ) temp_matrix[idx + idy*num_cols] += valueAt(idy, j, r_value, r_index, r_max_row_size) * valueAt(j, idx, value, index, max_row_size); //TODO: R matrix, no need valueAt, direct lookup } } __global__ void AP( double* value, // coarse global stiffness matrix's size_t* index, // ELLPACK vectors size_t max_row_size, double* p_value, // prolongation matrix's size_t* p_index, // ELLPACK vectors size_t p_max_row_size, double* temp_matrix, // temp_matrix = R*A size_t num_rows, // no. of rows of temp matrix size_t num_cols // no. of cols of temp matrix ) { unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x; unsigned int idy = threadIdx.y + blockIdx.y*blockDim.y; if ( idx < num_cols && idy < num_rows ) { for ( int j = 0 ; j < num_cols ; ++j ) addAt( idx, idy, value, index, max_row_size, temp_matrix[j + idy*num_cols] * valueAt(j, idx, p_value, p_index, p_max_row_size) ); } // if ( num_cols == 42 ) // { // for ( int j = 0 ; j < num_cols ; ++j ) // addAt( idx, idy, value, index, max_row_size, temp_matrix[j + idy*num_cols] * valueAt(j, idx, p_value, p_index, p_max_row_size) ); // if (idx==1 && idy==1) // { // addAt( idx, idy, value, index, max_row_size, 20.0 ); // printf("%f\n", valueAt(idx, idx, value, index, max_row_size)); // } // } // else // { // } // if ( idx == 0 && idy == 0 ) // printf("%f\n", valueAt(1, 1, value, index, max_row_size)); } // A_coarse = R * A_fine * P // TODO: not optimized yet __host__ void RAP( vector<double*> value, vector<size_t*> index, vector<size_t> max_row_size, vector<double*> r_value, vector<size_t*> r_index, vector<size_t> r_max_row_size, vector<double*> p_value, vector<size_t*> p_index, vector<size_t> p_max_row_size, double* temp_matrix, vector<size_t> num_rows, size_t lev) { // dim3 gridDim(2,2,1); // dim3 blockDim(32,32,1); dim3 gridDim; dim3 blockDim; calculateDimensions2D( num_rows[lev], num_rows[lev], gridDim, blockDim); // temp_matrix = R * A_fine RA<<<gridDim,blockDim>>>(r_value[lev-1], r_index[lev-1], r_max_row_size[lev-1], value[lev], index[lev], max_row_size[lev], temp_matrix, num_rows[lev-1], num_rows[lev]); cudaDeviceSynchronize(); // calculateDimensions2D( num_rows[0] * num_rows[0], gridDim, blockDim); AP<<<gridDim,blockDim>>>( value[lev-1], index[lev-1], max_row_size[lev-1], p_value[lev-1], p_index[lev-1], p_max_row_size[lev-1], temp_matrix, num_rows[lev-1], num_rows[lev]); cudaDeviceSynchronize(); } // TODO: CHECK: // returns value at (row, col) of matrix multiplication A*B __device__ double matMul(size_t row, size_t col, double* A_value, size_t* A_index, size_t A_max_row_size, size_t A_num_rows, double* B_value, size_t* B_index, size_t B_max_row_size, size_t b_num_rows ) { __shared__ double value; value = 0; for(int i = 0 ; i < A_max_row_size ; i++ ) { value += valueAt(row, A_index[i+A_max_row_size*row], A_value, A_index, A_max_row_size) * valueAt(A_index[i+A_max_row_size*row], col, B_value, B_index, B_max_row_size); // printf("%f %f\n ", valueAt(row, A_index[i], A_value, A_index, A_max_row_size), valueAt(A_index[i], col, B_value, B_index, B_max_row_size) ); // printf("%f\n ", valueAt(B_index[i], col, B_value, B_index, B_max_row_size) ); } // printf("%f\n ", value ); return value; } // A_coarse = R * A_fine * P __global__ void RAP_( double* value, size_t* index, size_t max_row_size, size_t num_rows, double* value_, size_t* index_, size_t max_row_size_, size_t num_rows_, double* r_value, size_t* r_index, size_t r_max_row_size, double* p_value, size_t* p_index, size_t p_max_row_size, size_t lev) { double RAP = 0; unsigned int col = threadIdx.x + blockIdx.x*blockDim.x; unsigned int row = threadIdx.y + blockIdx.y*blockDim.y; if ( row < num_rows_ && col < num_rows_ ) { for ( int i = 0 ; i < r_max_row_size ; i++ ) RAP += matMul(row, r_index[i + col*r_max_row_size], r_value, r_index, r_max_row_size, num_rows_, value, index, max_row_size, num_rows ) * valueAt(r_index[i+col*r_max_row_size], col, p_value, p_index, p_max_row_size); setAt( col, row, value_, index_, max_row_size_, RAP ); } } __global__ void checkTDOConvergence(bool* foo, double rho, double* rho_trial) { if ( abs(rho - *rho_trial) < 1e-7 ) *foo = false; } __global__ void fillIndexVector2D_GPU(size_t* index, size_t Nx, size_t Ny, size_t max_row_size, size_t num_rows) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 2; if ( id < num_rows ) { int base_id = (id - id%dim); // south-west if ( id >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i; counter++; } } // south if ( id >= (Nx + 1)*dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i; counter++; } } // south-east if ( id >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i; counter++; } } // west if ( (id) % ((Nx + 1)*dim) >= dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i; counter++; } } // origin for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i; counter++; } // east if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i; counter++; } } // north-west if ( id < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i; counter++; } } // north if ( id < (Nx+1)*(Ny)*dim ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i; counter++; } } // north-east if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i; counter++; } } } } __global__ void fillIndexVector3D_GPU(size_t* index, size_t Nx, size_t Ny, size_t Nz, size_t max_row_size, size_t num_rows) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 3; if ( id < num_rows ) { size_t base_id = (id - id%dim); size_t baseid_2D = (id) % ((Nx + 1)*(Ny + 1)*dim); size_t gridsize_2D = (Nx+1)*(Ny+1)*dim; bool prev_layer = (id >= (Nx+1)*(Ny+1)*dim); bool next_layer = (id < (Nx+1)*(Ny+1)*(Nz)*dim); bool south = ((id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim); bool north = ((id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim); bool west = ((id) % ((Nx + 1)*dim) >= dim); bool east = ((base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0); //// previous layer // south-west // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( prev_layer && south && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i - gridsize_2D; counter++; } } // south // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) if ( prev_layer && south ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i - gridsize_2D; counter++; } } // south-east // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( prev_layer && south && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i - gridsize_2D; counter++; } } // west // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( prev_layer && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i - gridsize_2D; counter++; } } // origin // if ( id >= (Nx+1)*(Ny+1)*dim ) if ( prev_layer ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i - gridsize_2D; counter++; } } // east // if ( (base_id == 0 && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) || id >= (Nx+1)*(Ny+1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0) if ( prev_layer && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i - gridsize_2D; counter++; } } // north-west // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( prev_layer && north && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i - gridsize_2D; counter++; } } // north // if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) if ( prev_layer && north ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i - gridsize_2D; counter++; } } // north-east // if ( ((id) % ((Nx + 1)*(Ny + 1)*dim) && base_id == 0 && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) || ( (id) % ((Nx + 1)*(Ny + 1)*dim) && id >= (Nx+1)*(Ny+1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0) ) if ( prev_layer && north && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i - gridsize_2D; counter++; } } //// current layer // south-west // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( south && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i; counter++; } } // south // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) if ( south ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i; counter++; } } // south-east // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( south && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i; counter++; } } // west // if ( (id) % ((Nx + 1)*dim) >= dim ) if ( west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i; counter++; } } // origin for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i; counter++; } // east // if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( base_id == 0 || east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i; counter++; } } // north-west // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( north && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i; counter++; } } // north // if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) if ( north ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i; counter++; } } // north-east // if ( (base_id == 0 ) || ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) ) if ( base_id == 0 || (north && east ) ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i; counter++; } } //// next layer // south-west // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( next_layer && south && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim - dim + i + gridsize_2D; counter++; } } // south // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) if ( next_layer && south ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + i + gridsize_2D; counter++; } } // south-east // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) if ( next_layer && south && east ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - (Nx+1)*dim + dim + i + gridsize_2D; counter++; } } // west // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( next_layer && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) - dim + i + gridsize_2D; counter++; } } // origin // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim ) if ( next_layer ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + i + gridsize_2D; counter++; } } // east // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) if ( base_id == 0 || ( next_layer && east ) ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + dim + i + gridsize_2D; counter++; } } // north-west // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) if ( next_layer && north && west ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim - dim + i + gridsize_2D; counter++; } } // north // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) if ( next_layer && north ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + i + gridsize_2D; counter++; } } // north-east // if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) if ( base_id == 0 || (next_layer && north && east ) ) { for(int i = 0 ; i < dim ; i++) { index[counter + id*max_row_size] = (id - id%dim) + (Nx+1)*dim + dim + i + gridsize_2D; counter++; } } for ( int i = counter ; i < max_row_size; i++) { index[i + id*max_row_size] = num_rows; } // if ( id == 0 ) // { // for ( int i = 0 ; i < max_row_size ; i++ ) // printf( "%lu ", index[i + id*max_row_size] ); // printf("\n"); // } } } __global__ void fillProlMatrix2D_GPU(double* p_value, size_t* p_index, size_t Nx, size_t Ny, size_t p_max_row_size, size_t num_rows, size_t num_cols) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id < num_rows ) { int counter = 0; int dim = 2; // coarse grid size_t Nx_ = Nx / 2; size_t Ny_ = Ny / 2; size_t base_id = (id - id%dim); size_t node_index = base_id / dim; int coarse_node_index = getCoarseNode_GPU(node_index, Nx, Ny, 0, dim); // if node is even numbered bool condition1 = (node_index % 2 == 0 ); // if node exists in the coarse grid bool condition2 = ( node_index % ((Nx+1)*2) < (Nx + 1) ); bool south = ( id >= (Nx + 1)*dim ); bool west = ( (id) % ((Nx + 1)*dim) >= dim ); bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ); bool north = ( id < (Nx+1)*(Ny)*dim ); // if there exists a coarse node in the same location if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, 0, dim) == node_index ) { p_index[counter + id*p_max_row_size] = coarse_node_index*dim + id%dim; p_value[counter + id*p_max_row_size] = 1; counter++; } else { // south-west if ( south && condition1 && !condition2 && west ) { size_t south_west_fine_node = (node_index - (Nx+1) - 1); size_t south_west_coarse_node = getCoarseNode_GPU(south_west_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = south_west_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } // south if ( south && !condition1 && !condition2 ) { size_t south_fine_node = (node_index - (Nx+1) ); size_t south_coarse_node = getCoarseNode_GPU(south_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = south_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // south-east if ( south && condition1 && !condition2 && east ) { size_t south_east_fine_node = (node_index - (Nx+1) + 1); size_t south_east_coarse_node = getCoarseNode_GPU(south_east_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = south_east_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } // west if ( west && condition2 ) { size_t west_fine_node = (node_index - 1); size_t west_coarse_node = getCoarseNode_GPU(west_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = west_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // east if ( east && condition2 ) { size_t east_fine_node = (node_index + 1); size_t east_coarse_node = getCoarseNode_GPU(east_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = east_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // north-west if ( north && condition1 && !condition2 && west ) { size_t north_west_fine_node = (node_index + (Nx+1) - 1); size_t north_west_coarse_node = getCoarseNode_GPU(north_west_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = north_west_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } // north if ( north && !condition1 && !condition2 ) { size_t north_fine_node = (node_index + (Nx+1) ); size_t north_coarse_node = getCoarseNode_GPU(north_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = north_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.5 ; counter++; } // north-east if ( north && condition1 && !condition2 && east ) { size_t north_east_fine_node = (node_index + (Nx+1) + 1); size_t north_east_coarse_node = getCoarseNode_GPU(north_east_fine_node, Nx, Ny, 0, dim); p_index[counter + id*p_max_row_size] = north_east_coarse_node*dim + id%dim ; p_value[counter + id*p_max_row_size] = 0.25 ; counter++; } } for ( int i = counter ; i < p_max_row_size; i++) { p_index[i + id*p_max_row_size] = num_cols; } } } __device__ int getCoarseNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim) { // get coarse grid dimensions size_t Nx_ = Nx / 2; size_t Ny_ = Ny / 2; size_t Nz_ = Nz / 2; // if node is even numbered bool condition1 = (index % 2 == 0 ); // if node exists in the coarse grid bool condition2 = ( index % ((Nx+1)*2) < (Nx + 1) ); // printf("aps = %d\n", ((Nx+1)*2) ); if ( condition1 && condition2 ) { return index/2 - (index/((Nx+1)*2 ))*(Nx_); } // -1 means the node in the coarse grid does not exist else return -1; } // __global__ void fillIndexVectorProl2D_GPU(size_t* p_index, size_t Nx, size_t Ny, size_t p_max_row_size, size_t num_rows, size_t num_cols) // { // unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; // if ( id < num_rows ) // { // int counter = 0; // int dim = 2; // // coarse grid // size_t Nx_ = Nx / 2; // size_t Ny_ = Ny / 2; // size_t base_id = (id - id%dim); // size_t node_index = base_id / dim; // int coarse_node_index = getCoarseNode_GPU(node_index, Nx, Ny, 0, dim); // // if node is even numbered // bool condition1 = (node_index % 2 == 0 ); // // if node exists in the coarse grid // bool condition2 = ( node_index % ((Nx+1)*2) < (Nx + 1) ); // bool south = ( id >= (Nx + 1)*dim ); // bool west = ( (id) % ((Nx + 1)*dim) >= dim ); // bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ); // bool north = ( id < (Nx+1)*(Ny)*dim ); // // if there exists a coarse node in the same location // if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, 0, dim) == node_index ) // { // p_index[counter + id*p_max_row_size] = coarse_node_index*dim + id%dim; // counter++; // } // else // { // // south-west // if ( south && condition1 && !condition2 && west ) // { // size_t south_west_fine_node = (node_index - (Nx+1) - 1); // size_t south_west_coarse_node = getCoarseNode_GPU(south_west_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = south_west_coarse_node*dim + id%dim ; // counter++; // } // // south // if ( south && !condition1 && !condition2 ) // { // size_t south_fine_node = (node_index - (Nx+1) ); // size_t south_coarse_node = getCoarseNode_GPU(south_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = south_coarse_node*dim + id%dim ; // counter++; // } // // south-east // if ( south && condition1 && !condition2 && east ) // { // size_t south_east_fine_node = (node_index - (Nx+1) + 1); // size_t south_east_coarse_node = getCoarseNode_GPU(south_east_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = south_east_coarse_node*dim + id%dim ; // counter++; // } // // west // if ( west && condition2 ) // { // size_t west_fine_node = (node_index - 1); // size_t west_coarse_node = getCoarseNode_GPU(west_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = west_coarse_node*dim + id%dim ; // counter++; // } // // east // if ( east && condition2 ) // { // size_t east_fine_node = (node_index + 1); // size_t east_coarse_node = getCoarseNode_GPU(east_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = east_coarse_node*dim + id%dim ; // counter++; // } // // north-west // if ( north && condition1 && !condition2 && west ) // { // size_t north_west_fine_node = (node_index + (Nx+1) - 1); // size_t north_west_coarse_node = getCoarseNode_GPU(north_west_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = north_west_coarse_node*dim + id%dim ; // counter++; // } // // north // if ( north && !condition1 && !condition2 ) // { // size_t north_fine_node = (node_index + (Nx+1) ); // size_t north_coarse_node = getCoarseNode_GPU(north_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = north_coarse_node*dim + id%dim ; // counter++; // } // // north-east // if ( north && condition1 && !condition2 && east ) // { // size_t north_east_fine_node = (node_index + (Nx+1) + 1); // size_t north_east_coarse_node = getCoarseNode_GPU(north_east_fine_node, Nx, Ny, 0, dim); // p_index[counter + id*p_max_row_size] = north_east_coarse_node*dim + id%dim ; // counter++; // } // } // // else if ( coarse_node_index == -1 ) // // { // // print // // } // // bool origin = ( id ) // // // // // if ( id == getFineNode_GPU(id, Nx, Ny, 0, dim) ) // // { // // p_index[counter + id*p_max_row_size] = getFineNode_GPU(id, Nx, Ny, 0, dim); // // counter++; // // } // // else // // { // // } // for ( int i = counter ; i < p_max_row_size; i++) // { // p_index[i + id*p_max_row_size] = num_cols; // } // } // } __global__ void fillIndexVectorRest2D_GPU(size_t* r_index, size_t Nx, size_t Ny, size_t r_max_row_size, size_t num_rows, size_t num_cols) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 2; if ( id < num_rows ) { size_t coarse_node_index = id / dim; size_t fine_id = getFineNode_GPU(id, Nx, Ny, 0, dim); size_t base_id = (id - id%dim); // all on fine grid // base : dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) = (id - id%dim) // s : - ((Nx)*dim + 1)*2 = - (Nx+1)*dim // w : - dim // south-west if ( id >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - ((Nx)*dim + 1)*2 - dim + id%dim; counter++; } // south if ( id >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - ((Nx)*dim + 1)*2 + id%dim; counter++; } // south-east if ( id >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - ((Nx)*dim + 1)*2 + dim + id%dim; counter++; } // west if ( (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) - dim + id%dim; counter++; } // origin r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + id%dim; counter++; // east if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + dim + id%dim; counter++; } // north-west if ( id < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + ((Nx)*dim + 1)*2 - dim + id%dim; counter++; } // north if ( id < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + ((Nx)*dim + 1)*2 + id%dim; counter++; } // north-east if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) + ((Nx)*dim + 1)*2 + dim + id%dim; counter++; } for ( int i = counter ; i < r_max_row_size; i++) { r_index[i + id*r_max_row_size] = num_cols; } } } __global__ void fillIndexVectorRest3D_GPU(size_t* r_index, size_t Nx, size_t Ny, size_t Nz, size_t r_max_row_size, size_t num_rows, size_t num_cols) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; int counter = 0; int dim = 3; if ( id < num_rows ) { size_t coarse_node_index = id / dim; size_t fine_id = getFineNode_GPU(id, Nx, Ny, 0, dim); size_t base_id = (id - id%dim); size_t baseid_2D = (id) % ((Nx + 1)*(Ny + 1)*dim); // all on fine grid // base : dim*getFineNode_GPU(coarse_node_index, Nx, Ny, 0, dim) = (id - id%dim) // w : - dim // n : ((Nx)*2 + 1)*3 // s : - ((Nx)*2 + 1)*3 // previous layer // id >= (Nx+1)*(Ny+1) // TODO: take above index's || base ... //// previous layer // south-west if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 - dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // south if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // south-east if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // west if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // origin if ( id >= (Nx+1)*(Ny+1)*dim && id != 0) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // east if ( id >= (Nx+1)*(Ny+1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-west if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 - dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // north if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-east if ( id >= (Nx+1)*(Ny+1)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + dim + id%dim - (2*Nx+1)*(2*Ny+1)*3; counter++; } //// current layer // south-west if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 - dim + id%dim; counter++; } // south if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + id%dim; counter++; } // south-east if ( (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + dim + id%dim; counter++; } // west if ( (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - dim + id%dim; counter++; } // origin r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + id%dim; counter++; // east if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + dim + id%dim; counter++; } // north-west if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 - dim + id%dim; counter++; } // north if ( (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + id%dim; counter++; } // north-east if ( base_id == 0 || (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + dim + id%dim; counter++; } //// next layer // south-west if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 - dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // south if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // south-east if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - ((Nx)*2 + 1)*3 + dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // west if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) - dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // origin if ( id < (Nx+1)*(Ny+1)*(Nz)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // CHECK: // east if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-west if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 - dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // north if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } // north-east if ( id < (Nx+1)*(Ny+1)*(Nz)*dim && (id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0 || base_id == 0 ) { r_index[counter + id*r_max_row_size] = dim*getFineNode_GPU(coarse_node_index, Nx, Ny, Nz, dim) + ((Nx)*2 + 1)*3 + dim + id%dim + (2*Nx+1)*(2*Ny+1)*3; counter++; } for ( int i = counter ; i < r_max_row_size; i++) { r_index[i + id*r_max_row_size] = num_cols; } } // if ( id == 9 ) // { // for ( int i = 0 ; i < r_max_row_size ; i++) // printf("%lu ", r_index[i + id*r_max_row_size]); // printf("\n"); // } } // DEBUG: __global__ void checkMassConservation(double* chi, double local_volume, size_t numElements) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; __shared__ double temp[1024]; if ( id < numElements) { // sum of chi * local_volume temp[id] = chi[id] * local_volume; } __syncthreads(); if ( id == 0 ) { for ( int i = 1 ; i < numElements ; i++ ) { temp[0] += temp[i]; } // total volume double vol = local_volume * numElements; printf("chi_trial %f\n", temp[0] / vol); } } // // TODO: to delete __global__ void checkLaplacian(double* laplacian, double* chi, size_t Nx, size_t Ny, size_t Nz, size_t numElements, double h) { // laplacian_GPU( double *array, size_t ind, size_t Nx, size_t Ny, size_t Nz ) unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; if ( id < numElements) { laplacian[id] = laplacian_GPU( chi, id, Nx, Ny, Nz, h ); } }
2d4698842b4a13c902a96dcac6320c73b31a4fcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <stdio.h> #include "batch_erf_gpu.h" extern "C" void batchERF(float* input1, float* output, int batchSize) { int threadPerBlocks = 256; int nBlocks = (vocSize + threadPerBlocks - 1) / threadPerBlocks; hipLaunchKernelGGL(( batchPairwiseDistanceKernel), dim3(nBlocks), dim3(threadPerBlocks), 0, 0, input1, input2, output, batchSize, embedDim, vocSize); }
2d4698842b4a13c902a96dcac6320c73b31a4fcc.cu
#include <iostream> #include <math.h> #include <stdio.h> #include "batch_erf_gpu.h" extern "C" void batchERF(float* input1, float* output, int batchSize) { int threadPerBlocks = 256; int nBlocks = (vocSize + threadPerBlocks - 1) / threadPerBlocks; batchPairwiseDistanceKernel<<<nBlocks, threadPerBlocks>>>(input1, input2, output, batchSize, embedDim, vocSize); }
342811c383bfe5ccd2355b69f570a8e4fd7a12c6.hip
// !!! This is a file automatically generated by hipify!!! // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2013/2014, March 3 - April 4 // ### // ### // ### Evgeny Strekalovskiy, Maria Klodt, Jan Stuehmer, Mohamed Souiai // ### // ### // ### // ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED // ### // ### #include "aux.h" #include <cstdlib> #include <iostream> using std::stringstream; using std::cerr; using std::cout; using std::endl; using std::string; // parameter processing: template specialization for T=bool template<> bool getParam<bool>(std::string param, bool &var, int argc, char **argv) { const char *c_param = param.c_str(); for(int i=argc-1; i>=1; i--) { if (argv[i][0]!='-') continue; if (strcmp(argv[i]+1, c_param)==0) { if (!(i+1<argc) || argv[i+1][0]=='-') { var = true; return true; } std::stringstream ss; ss << argv[i+1]; ss >> var; return (bool)ss; } } return false; } // opencv helpers void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc) { if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; } size_t nOmega = (size_t)w*h; for (int y=0; y<h; y++) { for (int x=0; x<w; x++) { for (int c=0; c<nc; c++) { aOut[(nc-1-c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c]; } } } } void convert_layered_to_mat(cv::Mat &mOut, const float *aIn) { convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels()); } void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc) { if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; } size_t nOmega = (size_t)w*h; for (int y=0; y<h; y++) { for (int x=0; x<w; x++) { for (int c=0; c<nc; c++) { aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc-1-c) + nc*(x + (size_t)w*y)]; } } } } void convert_mat_to_layered(float *aOut, const cv::Mat &mIn) { convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels()); } void showImage(string title, const cv::Mat &mat, int x, int y) { const char *wTitle = title.c_str(); cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE); cvMoveWindow(wTitle, x, y); cv::imshow(wTitle, mat); } // adding Gaussian noise float noise(float sigma) { float x1 = (float)rand()/RAND_MAX; float x2 = (float)rand()/RAND_MAX; return sigma * sqrtf(-2*log(::max(x1,0.000001f)))*cosf(2*M_PI*x2); } void addNoise(cv::Mat &m, float sigma) { float *data = (float*)m.data; int w = m.cols; int h = m.rows; int nc = m.channels(); size_t n = (size_t)w*h*nc; for(size_t i=0; i<n; i++) { data[i] += noise(sigma); } } // cuda error checking string prev_file = ""; int prev_line = 0; void cuda_check(string file, int line) { hipError_t e = hipGetLastError(); if (e != hipSuccess) { cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl; if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl; exit(1); } prev_file = file; prev_line = line; }
342811c383bfe5ccd2355b69f570a8e4fd7a12c6.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Winter Semester 2013/2014, March 3 - April 4 // ### // ### // ### Evgeny Strekalovskiy, Maria Klodt, Jan Stuehmer, Mohamed Souiai // ### // ### // ### // ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED // ### // ### #include "aux.h" #include <cstdlib> #include <iostream> using std::stringstream; using std::cerr; using std::cout; using std::endl; using std::string; // parameter processing: template specialization for T=bool template<> bool getParam<bool>(std::string param, bool &var, int argc, char **argv) { const char *c_param = param.c_str(); for(int i=argc-1; i>=1; i--) { if (argv[i][0]!='-') continue; if (strcmp(argv[i]+1, c_param)==0) { if (!(i+1<argc) || argv[i+1][0]=='-') { var = true; return true; } std::stringstream ss; ss << argv[i+1]; ss >> var; return (bool)ss; } } return false; } // opencv helpers void convert_layered_to_interleaved(float *aOut, const float *aIn, int w, int h, int nc) { if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; } size_t nOmega = (size_t)w*h; for (int y=0; y<h; y++) { for (int x=0; x<w; x++) { for (int c=0; c<nc; c++) { aOut[(nc-1-c) + nc*(x + (size_t)w*y)] = aIn[x + (size_t)w*y + nOmega*c]; } } } } void convert_layered_to_mat(cv::Mat &mOut, const float *aIn) { convert_layered_to_interleaved((float*)mOut.data, aIn, mOut.cols, mOut.rows, mOut.channels()); } void convert_interleaved_to_layered(float *aOut, const float *aIn, int w, int h, int nc) { if (nc==1) { memcpy(aOut, aIn, w*h*sizeof(float)); return; } size_t nOmega = (size_t)w*h; for (int y=0; y<h; y++) { for (int x=0; x<w; x++) { for (int c=0; c<nc; c++) { aOut[x + (size_t)w*y + nOmega*c] = aIn[(nc-1-c) + nc*(x + (size_t)w*y)]; } } } } void convert_mat_to_layered(float *aOut, const cv::Mat &mIn) { convert_interleaved_to_layered(aOut, (float*)mIn.data, mIn.cols, mIn.rows, mIn.channels()); } void showImage(string title, const cv::Mat &mat, int x, int y) { const char *wTitle = title.c_str(); cv::namedWindow(wTitle, CV_WINDOW_AUTOSIZE); cvMoveWindow(wTitle, x, y); cv::imshow(wTitle, mat); } // adding Gaussian noise float noise(float sigma) { float x1 = (float)rand()/RAND_MAX; float x2 = (float)rand()/RAND_MAX; return sigma * sqrtf(-2*log(std::max(x1,0.000001f)))*cosf(2*M_PI*x2); } void addNoise(cv::Mat &m, float sigma) { float *data = (float*)m.data; int w = m.cols; int h = m.rows; int nc = m.channels(); size_t n = (size_t)w*h*nc; for(size_t i=0; i<n; i++) { data[i] += noise(sigma); } } // cuda error checking string prev_file = ""; int prev_line = 0; void cuda_check(string file, int line) { cudaError_t e = cudaGetLastError(); if (e != cudaSuccess) { cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl; if (prev_line>0) cout << "Previous CUDA call:" << endl << prev_file << ", line " << prev_line << endl; exit(1); } prev_file = file; prev_line = line; }
1c42a7244b0ecd6ca42fa7e804c698a0be2af6c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cstdlib> #include "SyncedMemory.h" #include "Timer.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } //__device__ int a[3] = { 0, 1, 2 }; __global__ void SomeTransform(char *input_gpu, int fsize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //if (idx < fsize && input_gpu[idx] != '\n') { // input_gpu[idx] = '!'; //} //****// if (idx < fsize && input_gpu[idx] != '\n'){ if (input_gpu[idx] >= 'a' && input_gpu[idx] <= 'z'){ input_gpu[idx] -= 32; } } } int main(int argc, char **argv) { // init, and check if (argc != 2) { printf("Usage %s <input text file>\n", argv[0]); abort(); } FILE *fp = fopen(argv[1], "r"); if (!fp) { printf("Cannot open %s", argv[1]); abort(); } // get file size fseek(fp, 0, SEEK_END);// size_t fsize = ftell(fp); //() -> fseek(fp, 0, SEEK_SET);// // read files MemoryBuffer<char> text(fsize+1); auto text_smem = text.CreateSync(fsize); CHECK; fread(text_smem.get_cpu_wo(), 1, fsize, fp); text_smem.get_cpu_wo()[fsize] = '\0'; fclose(fp); // TODO: do your transform here char *input_gpu = text_smem.get_gpu_rw(); // An example: transform the first 64 characters to '!' // Don't transform over the tail // And don't transform the line breaks hipLaunchKernelGGL(( SomeTransform), dim3(102), dim3(32), 0, 0, input_gpu, fsize); //2 gridDim.x, 32 blockDim.x puts(text_smem.get_cpu_ro()); return 0; }
1c42a7244b0ecd6ca42fa7e804c698a0be2af6c6.cu
#include <cstdio> #include <cstdlib> #include "SyncedMemory.h" #include "Timer.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } //__device__ int a[3] = { 0, 1, 2 }; __global__ void SomeTransform(char *input_gpu, int fsize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //if (idx < fsize && input_gpu[idx] != '\n') { // input_gpu[idx] = '!'; //} //**將字母都換成大寫**// if (idx < fsize && input_gpu[idx] != '\n'){ if (input_gpu[idx] >= 'a' && input_gpu[idx] <= 'z'){ input_gpu[idx] -= 32; } } } int main(int argc, char **argv) { // init, and check if (argc != 2) { printf("Usage %s <input text file>\n", argv[0]); abort(); } FILE *fp = fopen(argv[1], "r"); if (!fp) { printf("Cannot open %s", argv[1]); abort(); } // get file size fseek(fp, 0, SEEK_END);//先將讀寫位置移到檔尾 size_t fsize = ftell(fp); //再傳回檔案目前的讀寫位置(檔尾) ->得到檔案長度 fseek(fp, 0, SEEK_SET);//再從頭開始讀 // read files MemoryBuffer<char> text(fsize+1); auto text_smem = text.CreateSync(fsize); CHECK; fread(text_smem.get_cpu_wo(), 1, fsize, fp); text_smem.get_cpu_wo()[fsize] = '\0'; fclose(fp); // TODO: do your transform here char *input_gpu = text_smem.get_gpu_rw(); // An example: transform the first 64 characters to '!' // Don't transform over the tail // And don't transform the line breaks SomeTransform<<<102, 32>>>(input_gpu, fsize); //2 gridDim.x, 32 blockDim.x puts(text_smem.get_cpu_ro()); return 0; }
a99778bf2ce6d550eb856123a80bd02fc7d8a1c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include "colour-convert.h" #define THREADS_PER_BLOCK 700 #define BLOCK_SIZE 1000 //EXPERIMENTING WITH THREADS AND BLOCK-SIZES: //THREADS_PER_BLOCK ARE 10 50 100 250 500 750 1000 //BLOCK_SIZE 100 - SMALL IMAGE //BLOCK_SIZE 1000 - LARGE IMAGE __host__ __device__ unsigned char clip_rgb(int x) { if(x > 255) return 255; if(x < 0) return 0; return (unsigned char)x; } __global__ void rgb2yuvKernel(PPM_IMG ppm, YUV_IMG yuv) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; unsigned char r, g, b; unsigned char y, cb, cr; r = ppm.img_r[threadId]; g = ppm.img_g[threadId]; b = ppm.img_b[threadId]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); yuv.img_y[threadId] = y; yuv.img_u[threadId] = cb; yuv.img_v[threadId] = cr; } __global__ void yuv2rgbKernel(YUV_IMG yuv, PPM_IMG ppm) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; int rt, gt, bt; int y, cb, cr; y = (int)yuv.img_y[threadId]; cb = (int)yuv.img_u[threadId] - 128; cr = (int)yuv.img_v[threadId] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); ppm.img_r[threadId] = clip_rgb(rt); ppm.img_g[threadId] = clip_rgb(gt); ppm.img_b[threadId] = clip_rgb(bt); } __global__ void emptyKernel(void) {} void launchEmptyKernel(void) { hipLaunchKernelGGL(( emptyKernel), dim3(1),dim3(1), 0, 0, ); } YUV_IMG rgb2yuvGPU(PPM_IMG img_in) { YUV_IMG img_out; PPM_IMG device_ppm; YUV_IMG device_yuv; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(size); img_out.img_u = (unsigned char *)malloc(size); img_out.img_v = (unsigned char *)malloc(size); hipMalloc((void **)&(device_ppm.img_r), size); hipMalloc((void **)&(device_ppm.img_g), size); hipMalloc((void **)&(device_ppm.img_b), size); hipMemcpy(device_ppm.img_r, img_in.img_r, size, hipMemcpyHostToDevice); hipMemcpy(device_ppm.img_g, img_in.img_g, size, hipMemcpyHostToDevice); hipMemcpy(device_ppm.img_b, img_in.img_b, size, hipMemcpyHostToDevice); hipMalloc((void **)&(device_yuv.img_y), size); hipMalloc((void **)&(device_yuv.img_u), size); hipMalloc((void **)&(device_yuv.img_v), size); int N = img_in.w * img_in.h; int BLOCKS_IN_GRID = N / BLOCK_SIZE / THREADS_PER_BLOCK; dim3 BLOCKS_PER_GRID(BLOCK_SIZE, BLOCKS_IN_GRID); hipLaunchKernelGGL(( rgb2yuvKernel), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK), 0, 0, device_ppm, device_yuv); hipMemcpy(img_out.img_y, device_yuv.img_y, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_u, device_yuv.img_u, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_v, device_yuv.img_v, size, hipMemcpyDeviceToHost); return img_out; } PPM_IMG yuv2rgbGPU(YUV_IMG img_in) { PPM_IMG img_out; PPM_IMG device_ppm; YUV_IMG device_yuv; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(size); img_out.img_g = (unsigned char *)malloc(size); img_out.img_b = (unsigned char *)malloc(size); hipMalloc((void **)&(device_yuv.img_y), size); hipMalloc((void **)&(device_yuv.img_u), size); hipMalloc((void **)&(device_yuv.img_v), size); hipMemcpy(device_yuv.img_y, img_in.img_y, size, hipMemcpyHostToDevice); hipMemcpy(device_yuv.img_u, img_in.img_u, size, hipMemcpyHostToDevice); hipMemcpy(device_yuv.img_v, img_in.img_v, size, hipMemcpyHostToDevice); hipMalloc((void **)&(device_ppm.img_r), size); hipMalloc((void **)&(device_ppm.img_g), size); hipMalloc((void **)&(device_ppm.img_b), size); int N = img_in.w * img_in.h; int BLOCKS_IN_GRID = N / BLOCK_SIZE / THREADS_PER_BLOCK; dim3 BLOCKS_PER_GRID(BLOCK_SIZE, BLOCKS_IN_GRID); hipLaunchKernelGGL(( yuv2rgbKernel), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK), 0, 0, device_yuv, device_ppm); hipMemcpy(img_out.img_r, device_ppm.img_r, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_g, device_ppm.img_g, size, hipMemcpyDeviceToHost); hipMemcpy(img_out.img_b, device_ppm.img_b, size, hipMemcpyDeviceToHost); return img_out; } YUV_IMG rgb2yuv(PPM_IMG img_in) { YUV_IMG img_out; int i; unsigned char r, g, b; unsigned char y, cb, cr; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(size); img_out.img_u = (unsigned char *)malloc(size); img_out.img_v = (unsigned char *)malloc(size); for(i = 0; i < img_out.w*img_out.h; i ++){ r = img_in.img_r[i]; g = img_in.img_g[i]; b = img_in.img_b[i]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); img_out.img_y[i] = y; img_out.img_u[i] = cb; img_out.img_v[i] = cr; } return img_out; } PPM_IMG yuv2rgb(YUV_IMG img_in) { PPM_IMG img_out; int i; int rt, gt, bt; int y, cb, cr; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(size); img_out.img_g = (unsigned char *)malloc(size); img_out.img_b = (unsigned char *)malloc(size); for(i = 0; i < img_out.w*img_out.h; i ++){ y = (int)img_in.img_y[i]; cb = (int)img_in.img_u[i] - 128; cr = (int)img_in.img_v[i] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); img_out.img_r[i] = clip_rgb(rt); img_out.img_g[i] = clip_rgb(gt); img_out.img_b[i] = clip_rgb(bt); } return img_out; }
a99778bf2ce6d550eb856123a80bd02fc7d8a1c8.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include "colour-convert.h" #define THREADS_PER_BLOCK 700 #define BLOCK_SIZE 1000 //EXPERIMENTING WITH THREADS AND BLOCK-SIZES: //THREADS_PER_BLOCK ARE 10 50 100 250 500 750 1000 //BLOCK_SIZE 100 - SMALL IMAGE //BLOCK_SIZE 1000 - LARGE IMAGE __host__ __device__ unsigned char clip_rgb(int x) { if(x > 255) return 255; if(x < 0) return 0; return (unsigned char)x; } __global__ void rgb2yuvKernel(PPM_IMG ppm, YUV_IMG yuv) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; unsigned char r, g, b; unsigned char y, cb, cr; r = ppm.img_r[threadId]; g = ppm.img_g[threadId]; b = ppm.img_b[threadId]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); yuv.img_y[threadId] = y; yuv.img_u[threadId] = cb; yuv.img_v[threadId] = cr; } __global__ void yuv2rgbKernel(YUV_IMG yuv, PPM_IMG ppm) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; int rt, gt, bt; int y, cb, cr; y = (int)yuv.img_y[threadId]; cb = (int)yuv.img_u[threadId] - 128; cr = (int)yuv.img_v[threadId] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); ppm.img_r[threadId] = clip_rgb(rt); ppm.img_g[threadId] = clip_rgb(gt); ppm.img_b[threadId] = clip_rgb(bt); } __global__ void emptyKernel(void) {} void launchEmptyKernel(void) { emptyKernel<<<1,1>>>(); } YUV_IMG rgb2yuvGPU(PPM_IMG img_in) { YUV_IMG img_out; PPM_IMG device_ppm; YUV_IMG device_yuv; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(size); img_out.img_u = (unsigned char *)malloc(size); img_out.img_v = (unsigned char *)malloc(size); cudaMalloc((void **)&(device_ppm.img_r), size); cudaMalloc((void **)&(device_ppm.img_g), size); cudaMalloc((void **)&(device_ppm.img_b), size); cudaMemcpy(device_ppm.img_r, img_in.img_r, size, cudaMemcpyHostToDevice); cudaMemcpy(device_ppm.img_g, img_in.img_g, size, cudaMemcpyHostToDevice); cudaMemcpy(device_ppm.img_b, img_in.img_b, size, cudaMemcpyHostToDevice); cudaMalloc((void **)&(device_yuv.img_y), size); cudaMalloc((void **)&(device_yuv.img_u), size); cudaMalloc((void **)&(device_yuv.img_v), size); int N = img_in.w * img_in.h; int BLOCKS_IN_GRID = N / BLOCK_SIZE / THREADS_PER_BLOCK; dim3 BLOCKS_PER_GRID(BLOCK_SIZE, BLOCKS_IN_GRID); rgb2yuvKernel<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(device_ppm, device_yuv); cudaMemcpy(img_out.img_y, device_yuv.img_y, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_u, device_yuv.img_u, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_v, device_yuv.img_v, size, cudaMemcpyDeviceToHost); return img_out; } PPM_IMG yuv2rgbGPU(YUV_IMG img_in) { PPM_IMG img_out; PPM_IMG device_ppm; YUV_IMG device_yuv; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(size); img_out.img_g = (unsigned char *)malloc(size); img_out.img_b = (unsigned char *)malloc(size); cudaMalloc((void **)&(device_yuv.img_y), size); cudaMalloc((void **)&(device_yuv.img_u), size); cudaMalloc((void **)&(device_yuv.img_v), size); cudaMemcpy(device_yuv.img_y, img_in.img_y, size, cudaMemcpyHostToDevice); cudaMemcpy(device_yuv.img_u, img_in.img_u, size, cudaMemcpyHostToDevice); cudaMemcpy(device_yuv.img_v, img_in.img_v, size, cudaMemcpyHostToDevice); cudaMalloc((void **)&(device_ppm.img_r), size); cudaMalloc((void **)&(device_ppm.img_g), size); cudaMalloc((void **)&(device_ppm.img_b), size); int N = img_in.w * img_in.h; int BLOCKS_IN_GRID = N / BLOCK_SIZE / THREADS_PER_BLOCK; dim3 BLOCKS_PER_GRID(BLOCK_SIZE, BLOCKS_IN_GRID); yuv2rgbKernel<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(device_yuv, device_ppm); cudaMemcpy(img_out.img_r, device_ppm.img_r, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_g, device_ppm.img_g, size, cudaMemcpyDeviceToHost); cudaMemcpy(img_out.img_b, device_ppm.img_b, size, cudaMemcpyDeviceToHost); return img_out; } YUV_IMG rgb2yuv(PPM_IMG img_in) { YUV_IMG img_out; int i; unsigned char r, g, b; unsigned char y, cb, cr; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_y = (unsigned char *)malloc(size); img_out.img_u = (unsigned char *)malloc(size); img_out.img_v = (unsigned char *)malloc(size); for(i = 0; i < img_out.w*img_out.h; i ++){ r = img_in.img_r[i]; g = img_in.img_g[i]; b = img_in.img_b[i]; y = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); cb = (unsigned char)(-0.169*r - 0.331*g + 0.499*b + 128); cr = (unsigned char)( 0.499*r - 0.418*g - 0.0813*b + 128); img_out.img_y[i] = y; img_out.img_u[i] = cb; img_out.img_v[i] = cr; } return img_out; } PPM_IMG yuv2rgb(YUV_IMG img_in) { PPM_IMG img_out; int i; int rt, gt, bt; int y, cb, cr; int size = sizeof(unsigned char) * img_in.w * img_in.h; img_out.w = img_in.w; img_out.h = img_in.h; img_out.img_r = (unsigned char *)malloc(size); img_out.img_g = (unsigned char *)malloc(size); img_out.img_b = (unsigned char *)malloc(size); for(i = 0; i < img_out.w*img_out.h; i ++){ y = (int)img_in.img_y[i]; cb = (int)img_in.img_u[i] - 128; cr = (int)img_in.img_v[i] - 128; rt = (int)( y + 1.402*cr); gt = (int)( y - 0.344*cb - 0.714*cr); bt = (int)( y + 1.772*cb); img_out.img_r[i] = clip_rgb(rt); img_out.img_g[i] = clip_rgb(gt); img_out.img_b[i] = clip_rgb(bt); } return img_out; }
ed816e15b3f6d03648b6e2c0e83c4a6760d8c8f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* .optix.cu - Copyright 2019/2020 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file contains a minimal set of Optix functions. From here we will dispatch program flow to our own functions that implement the path tracer. */ #include "../kernels/noerrors.h" #include "helper_math.h" // global include files #include "../../RenderSystem/common_settings.h" #include "../../RenderSystem/common_types.h" #define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here #include "../core_settings.h" // global path tracing parameters extern "C" { __constant__ Params params; } // tools __device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; } __device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; } __device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; } static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension ) { // Adapated from E. Heitz. Arguments: // sampleIndex: 0..255 // sampleDimension: 0..255 x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255; // xor index based on optimized ranking int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255; // fetch value in sequence int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256]; // if the dimension is optimized, xor sequence value based on optimized scrambling value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536]; // convert to float and return return (0.5f + value) * (1.0f / 256.0f); } static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 ) { const float blade = (int)(r0 * 9); float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f; float x1, y1, x2, y2; __sincosf( blade * PI / 4.5f, &x1, &y1 ); __sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 ); if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2; const float xr = x1 * r1 + x2 * r2; const float yr = y1 * r1 + y2 * r2; float4 posLens = params.posLensSize; return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr); } static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed ) { // random point on pixel and lens int sx = pixelIdx % params.scrsize.x; int sy = pixelIdx / params.scrsize.x; float r0, r1, r2, r3; if (params.j0 == -5.0f) { if (sampleIdx < 256) r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ), r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ), r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ), r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 ); else r0 = RandomFloat( seed ), r1 = RandomFloat( seed ), r2 = RandomFloat( seed ), r3 = RandomFloat( seed ); O = RandomPointOnLens( r2, r3 ); } else { r0 = r1 = 0; O = make_float3( params.posLensSize ); } float3 posOnPixel; if (params.distortion == 0) { const float u = ((float)sx + r0) * (1.0f / params.scrsize.x); const float v = ((float)sy + r1) * (1.0f / params.scrsize.y); posOnPixel = params.p1 + u * params.right + v * params.up; } else { const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f; const float rr = tx * tx + ty * ty; const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr); const float theta = atan2f( tx, ty ); const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x; const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y; posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y); } D = normalize( posOnPixel - O ); } #if __CUDA_ARCH__ >= 700 #define THREADMASK __activemask() // volta, turing #else #define THREADMASK 0xffffffff // pascal, kepler, fermi #endif __device__ void setupPrimaryRay( const uint pathIdx, const uint stride ) { const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y); const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass; uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 ); // generate eye ray float3 O, D; generateEyeRay( O, D, pixelIdx, sampleIdx, seed ); // populate path state array params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) ); params.pathStates[pathIdx + stride] = make_float4( D, 0 ); // trace eye ray uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); if (pixelIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */ params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void setupSecondaryRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.pathStates[rayIdx]; const float4 D4 = params.pathStates[rayIdx + stride]; uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); if (rayIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */ params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void generateShadowRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.connectData[rayIdx]; // O4 const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4 // launch shadow ray uint u0 = 1; optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 ); if (u0) return; const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4 const int pixelIdx = __float_as_int( E4.w ); if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 ); } extern "C" __global__ void __raygen__rg() { const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z; const uint3 idx = optixGetLaunchIndex(); const uint rayIdx = idx.x + idx.y * params.scrsize.x; switch (params.phase) { case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break; case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break; case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break; } } extern "C" __global__ void __miss__occlusion() { optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io. } extern "C" __global__ void __closesthit__radiance() { const uint prim_idx = optixGetPrimitiveIndex(); const uint inst_idx = optixGetInstanceIndex(); const float2 bary = optixGetTriangleBarycentrics(); const float tmin = optixGetRayTmax(); optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) ); optixSetPayload_1( inst_idx ); optixSetPayload_2( prim_idx ); optixSetPayload_3( __float_as_uint( tmin ) ); } // EOF
ed816e15b3f6d03648b6e2c0e83c4a6760d8c8f6.cu
/* .optix.cu - Copyright 2019/2020 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file contains a minimal set of Optix functions. From here we will dispatch program flow to our own functions that implement the path tracer. */ #include "../kernels/noerrors.h" #include "helper_math.h" // global include files #include "../../RenderSystem/common_settings.h" #include "../../RenderSystem/common_types.h" #define OPTIX_CU // skip CUDAMaterial definition in core_settings.h; not needed here #include "../core_settings.h" // global path tracing parameters extern "C" { __constant__ Params params; } // tools __device__ __inline__ uint WangHash( uint s ) { s = (s ^ 61) ^ (s >> 16), s *= 9, s = s ^ (s >> 4), s *= 0x27d4eb2d, s = s ^ (s >> 15); return s; } __device__ __inline__ uint RandomInt( uint& s ) { s ^= s << 13, s ^= s >> 17, s ^= s << 5; return s; } __device__ __inline__ float RandomFloat( uint& s ) { return RandomInt( s ) * 2.3283064365387e-10f; } static __inline __device__ float blueNoiseSampler( int x, int y, int sampleIndex, int sampleDimension ) { // Adapated from E. Heitz. Arguments: // sampleIndex: 0..255 // sampleDimension: 0..255 x &= 127, y &= 127, sampleIndex &= 255, sampleDimension &= 255; // xor index based on optimized ranking int rankedSampleIndex = (sampleIndex ^ params.blueNoise[sampleDimension + (x + y * 128) * 8 + 65536 * 3]) & 255; // fetch value in sequence int value = params.blueNoise[sampleDimension + rankedSampleIndex * 256]; // if the dimension is optimized, xor sequence value based on optimized scrambling value ^= params.blueNoise[(sampleDimension & 7) + (x + y * 128) * 8 + 65536]; // convert to float and return return (0.5f + value) * (1.0f / 256.0f); } static __inline __device__ float3 RandomPointOnLens( const float r0, float r1 ) { const float blade = (int)(r0 * 9); float r2 = (r0 - blade * (1.0f / 9.0f)) * 9.0f; float x1, y1, x2, y2; __sincosf( blade * PI / 4.5f, &x1, &y1 ); __sincosf( (blade + 1.0f) * PI / 4.5f, &x2, &y2 ); if ((r1 + r2) > 1) r1 = 1.0f - r1, r2 = 1.0f - r2; const float xr = x1 * r1 + x2 * r2; const float yr = y1 * r1 + y2 * r2; float4 posLens = params.posLensSize; return make_float3( posLens ) + posLens.w * (params.right * xr + params.up * yr); } static __inline __device__ void generateEyeRay( float3& O, float3& D, const uint pixelIdx, const uint sampleIdx, uint& seed ) { // random point on pixel and lens int sx = pixelIdx % params.scrsize.x; int sy = pixelIdx / params.scrsize.x; float r0, r1, r2, r3; if (params.j0 == -5.0f) { if (sampleIdx < 256) r0 = blueNoiseSampler( sx, sy, sampleIdx, 0 ), r1 = blueNoiseSampler( sx, sy, sampleIdx, 1 ), r2 = blueNoiseSampler( sx, sy, sampleIdx, 2 ), r3 = blueNoiseSampler( sx, sy, sampleIdx, 3 ); else r0 = RandomFloat( seed ), r1 = RandomFloat( seed ), r2 = RandomFloat( seed ), r3 = RandomFloat( seed ); O = RandomPointOnLens( r2, r3 ); } else { r0 = r1 = 0; O = make_float3( params.posLensSize ); } float3 posOnPixel; if (params.distortion == 0) { const float u = ((float)sx + r0) * (1.0f / params.scrsize.x); const float v = ((float)sy + r1) * (1.0f / params.scrsize.y); posOnPixel = params.p1 + u * params.right + v * params.up; } else { const float tx = sx / (float)params.scrsize.x - 0.5f, ty = sy / (float)params.scrsize.y - 0.5f; const float rr = tx * tx + ty * ty; const float rq = sqrtf( rr ) * (1.0f + params.distortion * rr + params.distortion * rr * rr); const float theta = atan2f( tx, ty ); const float bx = (sinf( theta ) * rq + 0.5f) * params.scrsize.x; const float by = (cosf( theta ) * rq + 0.5f) * params.scrsize.y; posOnPixel = params.p1 + (bx + r0) * (params.right / (float)params.scrsize.x) + (by + r1) * (params.up / (float)params.scrsize.y); } D = normalize( posOnPixel - O ); } #if __CUDA_ARCH__ >= 700 #define THREADMASK __activemask() // volta, turing #else #define THREADMASK 0xffffffff // pascal, kepler, fermi #endif __device__ void setupPrimaryRay( const uint pathIdx, const uint stride ) { const uint pixelIdx = pathIdx % (params.scrsize.x * params.scrsize.y); const uint sampleIdx = pathIdx / (params.scrsize.x * params.scrsize.y) + params.pass; uint seed = WangHash( pathIdx * 16789 + params.pass * 1791 ); // generate eye ray float3 O, D; generateEyeRay( O, D, pixelIdx, sampleIdx, seed ); // populate path state array params.pathStates[pathIdx] = make_float4( O, __uint_as_float( (pathIdx << 6) + 1 /* S_SPECULAR in CUDA code */ ) ); params.pathStates[pathIdx + stride] = make_float4( D, 0 ); // trace eye ray uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, O, D, params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); if (pixelIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */ params.hitData[pathIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void setupSecondaryRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.pathStates[rayIdx]; const float4 D4 = params.pathStates[rayIdx + stride]; uint u0, u1 = 0, u2 = 0xffffffff, u3 = __float_as_uint( 1e34f ); optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, 1e34f, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, 0, 2, 0, u0, u1, u2, u3 ); if (rayIdx < stride /* OptiX bug workaround? */) if (u2 != 0xffffffff) /* bandwidth reduction */ params.hitData[rayIdx] = make_float4( __uint_as_float( u0 ), __uint_as_float( u1 ), __uint_as_float( u2 ), __uint_as_float( u3 ) ); } __device__ void generateShadowRay( const uint rayIdx, const uint stride ) { const float4 O4 = params.connectData[rayIdx]; // O4 const float4 D4 = params.connectData[rayIdx + stride * 2]; // D4 // launch shadow ray uint u0 = 1; optixTrace( params.bvhRoot, make_float3( O4 ), make_float3( D4 ), params.geometryEpsilon, D4.w, 0.0f /* ray time */, OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, 1, 2, 1, u0 ); if (u0) return; const float4 E4 = params.connectData[rayIdx + stride * 2 * 2]; // E4 const int pixelIdx = __float_as_int( E4.w ); if (pixelIdx < stride /* OptiX bug workaround? */) params.accumulator[pixelIdx] += make_float4( E4.x, E4.y, E4.z, 1 ); } extern "C" __global__ void __raygen__rg() { const uint stride = params.scrsize.x * params.scrsize.y * params.scrsize.z; const uint3 idx = optixGetLaunchIndex(); const uint rayIdx = idx.x + idx.y * params.scrsize.x; switch (params.phase) { case Params::SPAWN_PRIMARY: /* primary rays */ setupPrimaryRay( rayIdx, stride ); break; case Params::SPAWN_SECONDARY: /* secondary rays */ setupSecondaryRay( rayIdx, stride ); break; case Params::SPAWN_SHADOW: /* shadow rays */ generateShadowRay( rayIdx, stride ); break; } } extern "C" __global__ void __miss__occlusion() { optixSetPayload_0( 0u ); // instead of any hit. suggested by WillUsher.io. } extern "C" __global__ void __closesthit__radiance() { const uint prim_idx = optixGetPrimitiveIndex(); const uint inst_idx = optixGetInstanceIndex(); const float2 bary = optixGetTriangleBarycentrics(); const float tmin = optixGetRayTmax(); optixSetPayload_0( (uint)(65535.0f * bary.x) + ((uint)(65535.0f * bary.y) << 16) ); optixSetPayload_1( inst_idx ); optixSetPayload_2( prim_idx ); optixSetPayload_3( __float_as_uint( tmin ) ); } // EOF
92b36a5692a092306a89ff48d4bdc4b0d1af8783.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Mark Gates @generated from magmablas/zlanhe.cu, normal z -> s, Wed Jan 2 14:18:51 2019 */ #include "magma_internal.h" #include "magma_templates.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_s #define REAL // ============================================================================= // inf-norm /******************************************************************************/ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void slansy_inf_kernel_lower( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_S_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for (int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_S_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for (int j=0; j < tx; j++) { res += MAGMA_S_ABS( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_S_REAL( *A )); A += 1; // sum column below diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_S_ABS( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /******************************************************************************/ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void slansy_inf_kernel_upper( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_S_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for (int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_S_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for (int j=0; j < tx; j++) { res += MAGMA_S_ABS( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_S_REAL( *A )); A += lda; //# // sum #row right of diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_S_ABS( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /******************************************************************************/ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void slansy_inf( magma_uplo_t uplo, magma_int_t n, magmaFloat_const_ptr A, magma_int_t lda, magmaFloat_ptr dwork, magma_queue_t queue ) { dim3 threads( inf_bs, 4 ); dim3 grid( magma_ceildiv( n, inf_bs ), 1 ); magma_int_t n_full_block = (n - n % inf_bs) / inf_bs; magma_int_t n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { hipLaunchKernelGGL(( slansy_inf_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork, n_full_block, n_mod_bs ); } else { hipLaunchKernelGGL(( slansy_inf_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork, n_full_block, n_mod_bs ); } } // ============================================================================= // max-norm /******************************************************************************/ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void slansy_max_kernel_lower( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; for (int j=0; j < ind; ++j) { res = max_nan( res, MAGMA_S_ABS( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A ))); dwork[ind] = res; } } /******************************************************************************/ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void slansy_max_kernel_upper( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for (int j=n-1; j > ind; j--) { res = max_nan( res, MAGMA_S_ABS( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A ))); dwork[ind] = res; } } /******************************************************************************/ /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void slansy_max( magma_uplo_t uplo, magma_int_t n, magmaFloat_const_ptr A, magma_int_t lda, magmaFloat_ptr dwork, magma_queue_t queue ) { dim3 threads( max_bs ); dim3 grid( magma_ceildiv( n, max_bs ) ); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( slansy_max_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork ); } else { hipLaunchKernelGGL(( slansy_max_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, dwork ); } } /***************************************************************************//** Purpose ------- SLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. SLANSY = ( max(abs(A(i,j))), NORM = MagmaMaxNorm ( ( norm1(A), NORM = MagmaOneNorm ( ( normI(A), NORM = MagmaInfNorm ( ( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. On error, returns SLANSY < 0: if SLANSY = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm magma_norm_t Specifies the value to be returned in SLANSY as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, SLANSY is set to zero. @param[in] dA REAL array on the GPU, dimension (LDDA,N) The symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(N,1). @param dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires WORK. @param[in] lwork INTEGER The dimension of the array DWORK. LWORK >= max( 1, N ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lanhe *******************************************************************************/ extern "C" float magmablas_slansy( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dwork, magma_int_t lwork, magma_queue_t queue ) { magma_int_t info = 0; // 1-norm == inf-norm since A is symmetric bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < n ) info = -5; else if ( lwork < n ) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { slansy_inf( uplo, n, dA, ldda, dwork, queue ); } else { slansy_max( uplo, n, dA, ldda, dwork, queue ); } hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork ); magma_sgetvector( 1, &dwork[0], 1, &res, 1, queue ); return res; }
92b36a5692a092306a89ff48d4bdc4b0d1af8783.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Mark Gates @generated from magmablas/zlanhe.cu, normal z -> s, Wed Jan 2 14:18:51 2019 */ #include "magma_internal.h" #include "magma_templates.h" #define inf_bs 32 #define max_bs 64 #define PRECISION_s #define REAL // ============================================================================= // inf-norm /******************************************************************************/ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */ __global__ void slansy_inf_kernel_lower( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block rows A += ind; A += ty * lda; // ---------- // loop over all blocks left of the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy lower triangle to upper triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i < tx ) { la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks below diagonal block A += inf_bs; for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; } A += inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is (n_mod_bs by inf_bs) if ( n_mod_bs > 0 ) { // load block (transposed), with zeros for rows outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } else { la[ty+j][tx] = MAGMA_S_ZERO; } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block row // Threads past end of matrix (i.e., ind >= n) are redundantly assigned // the last row (n-1). At the end, those results are ignored -- only // results for ind < n are saved into dwork. if ( tx < n_mod_bs ) { A += ind; } else { A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; // ---------- // loop over all blocks left of the diagonal block // each is (n_mod_bs by inf_bs) for (int i=0; i < diag; i += inf_bs ) { // load block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } A += lda*inf_bs; __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_S_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // sum rows left of diagonal for (int j=0; j < tx; j++) { res += MAGMA_S_ABS( *A ); A += lda; } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_S_REAL( *A )); A += 1; // sum column below diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_S_ABS( *A ); A += 1; } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /******************************************************************************/ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32). * z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). * The upper implementation is similar to lower, but processes blocks * in the transposed order: * lower goes from left over to diagonal, then down to bottom; * upper goes from top down to diagonal, then over to right. * Differences are noted with # in comments. */ __global__ void slansy_inf_kernel_upper( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork, int n_full_block, int n_mod_bs ) { #if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int diag = blockIdx.x*inf_bs; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; if ( blockIdx.x < n_full_block ) { // ------------------------------ // All full block #columns A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block for (int i=0; i < diag; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block (#transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[j*lda]; //# } A += inf_bs; //# __syncthreads(); // compute 4 partial sums of each row, i.e., // for ty=0: res = sum( la[tx, 0: 7] ) // for ty=1: res = sum( la[tx, 8:15] ) // for ty=2: res = sum( la[tx,16:23] ) // for ty=3: res = sum( la[tx,24:31] ) #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // load diagonal block #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // copy #upper triangle to #lower triangle, and // make diagonal real (zero imaginary part) #pragma unroll 8 for (int i=ty*8; i < ty*8 + 8; i++) { if ( i > tx ) { //# la[i][tx] = la[tx][i]; } #ifdef COMPLEX else if ( i == tx ) { la[i][i] = MAGMA_S_MAKE( MAGMA_S_REAL( la[i][i] ), 0 ); } #endif } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); // ---------- // loop over all 32x32 blocks #right of diagonal block A += inf_bs*lda; //# for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) { // load block (#non-transposed) #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; //# } A += inf_bs*lda; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // last partial block, which is #(inf_bs by n_mod_bs) if ( n_mod_bs > 0 ) { // load block (#non-transposed), with zeros for #cols outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { //# la[tx][ty+j] = A[j*lda]; //# } else { la[tx][ty+j] = MAGMA_S_ZERO; //# } } __syncthreads(); // partial row sums #pragma unroll 8 for (int j=ty*8; j < ty*8 + 8; j++) { res += MAGMA_S_ABS( la[tx][j] ); } __syncthreads(); } // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } else { // ------------------------------ // Last, partial block #column // Instead of assigning threads ind >= n to the last row (n-1), as in Lower, // Upper simply adjusts loop bounds to avoid loading columns outside the matrix. // Again, at the end, those results are ignored -- only // results for ind < n are saved into dwork. A += blockIdx.x*inf_bs*lda + tx; //# A += ty * lda; // ---------- // loop over all blocks #above the diagonal block // each is #(inf_bs by n_mod_bs) for (int i=0; i < diag; i += inf_bs ) { // load block (#transposed), #ignoring columns outside matrix #pragma unroll 8 for (int j=0; j < inf_bs; j += 4) { if ( ty+j < n_mod_bs ) { la[ty+j][tx] = A[j*lda]; } } A += inf_bs; //# __syncthreads(); // partial row sums #pragma unroll 8 for (int j=0; j < 8; j++) { res += MAGMA_S_ABS( la[tx][j+ty*8] ); } __syncthreads(); } // ---------- // partial diagonal block if ( ty == 0 && tx < n_mod_bs ) { // #transpose pointer within diagonal block // #i.e., from A = A(tx,ty), transpose to A = A(ty,tx). A = A - tx - ty*lda + tx*lda + ty; // sum #column above diagonal for (int j=0; j < tx; j++) { res += MAGMA_S_ABS( *A ); A += 1; //# } // sum diagonal (ignoring imaginary part) res += MAGMA_D_ABS( MAGMA_S_REAL( *A )); A += lda; //# // sum #row right of diagonal for (int j=tx+1; j < n_mod_bs; j++) { res += MAGMA_S_ABS( *A ); A += lda; //# } } __syncthreads(); // ---------- // 32x4 threads store partial sums into shared memory la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); // first column of 32x1 threads computes final sum of each row // rows outside matrix are ignored if ( ty == 0 && tx < n_mod_bs ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */ } /******************************************************************************/ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void slansy_inf( magma_uplo_t uplo, magma_int_t n, magmaFloat_const_ptr A, magma_int_t lda, magmaFloat_ptr dwork, magma_queue_t queue ) { dim3 threads( inf_bs, 4 ); dim3 grid( magma_ceildiv( n, inf_bs ), 1 ); magma_int_t n_full_block = (n - n % inf_bs) / inf_bs; magma_int_t n_mod_bs = n % inf_bs; if ( uplo == MagmaLower) { slansy_inf_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } else { slansy_inf_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } } // ============================================================================= // max-norm /******************************************************************************/ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void slansy_max_kernel_lower( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; for (int j=0; j < ind; ++j) { res = max_nan( res, MAGMA_S_ABS( *A )); A += lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A ))); dwork[ind] = res; } } /******************************************************************************/ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */ __global__ void slansy_max_kernel_upper( int n, const float * __restrict__ A, int lda, float * __restrict__ dwork ) { int ind = blockIdx.x*max_bs + threadIdx.x; float res = 0; if (ind < n) { A += ind; A += (n-1)*lda; for (int j=n-1; j > ind; j--) { res = max_nan( res, MAGMA_S_ABS( *A )); A -= lda; } // diagonal element (ignoring imaginary part) res = max_nan( res, MAGMA_D_ABS( MAGMA_S_REAL( *A ))); dwork[ind] = res; } } /******************************************************************************/ /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void slansy_max( magma_uplo_t uplo, magma_int_t n, magmaFloat_const_ptr A, magma_int_t lda, magmaFloat_ptr dwork, magma_queue_t queue ) { dim3 threads( max_bs ); dim3 grid( magma_ceildiv( n, max_bs ) ); if ( uplo == MagmaLower ) { slansy_max_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork ); } else { slansy_max_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>> ( n, A, lda, dwork ); } } /***************************************************************************//** Purpose ------- SLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. SLANSY = ( max(abs(A(i,j))), NORM = MagmaMaxNorm ( ( norm1(A), NORM = MagmaOneNorm ( ( normI(A), NORM = MagmaInfNorm ( ( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. On error, returns SLANSY < 0: if SLANSY = -i, the i-th argument had an illegal value. Arguments: ---------- @param[in] norm magma_norm_t Specifies the value to be returned in SLANSY as described above. @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. - = MagmaUpper: Upper triangular part of A is referenced - = MagmaLower: Lower triangular part of A is referenced @param[in] n INTEGER The order of the matrix A. N >= 0. When N = 0, SLANSY is set to zero. @param[in] dA REAL array on the GPU, dimension (LDDA,N) The symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(N,1). @param dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is required only for norm1 and normI. Here max-norm also requires WORK. @param[in] lwork INTEGER The dimension of the array DWORK. LWORK >= max( 1, N ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lanhe *******************************************************************************/ extern "C" float magmablas_slansy( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, magmaFloat_const_ptr dA, magma_int_t ldda, magmaFloat_ptr dwork, magma_int_t lwork, magma_queue_t queue ) { magma_int_t info = 0; // 1-norm == inf-norm since A is symmetric bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm); bool max_norm = (norm == MagmaMaxNorm); // inf_norm Double-Complex requires > 16 KB shared data (arch >= 200) #if defined(PRECISION_z) const bool inf_implemented = (magma_getdevice_arch() >= 200); #else const bool inf_implemented = true; #endif if ( ! (max_norm || (inf_norm && inf_implemented)) ) info = -1; else if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < n ) info = -5; else if ( lwork < n ) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { slansy_inf( uplo, n, dA, ldda, dwork, queue ); } else { slansy_max( uplo, n, dA, ldda, dwork, queue ); } magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork ); magma_sgetvector( 1, &dwork[0], 1, &res, 1, queue ); return res; }
f23aace643c63302deb5fa15514ff086b56b5188.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Extracts a surface mesh from the internal volume using the Marching Cubes algorithm // This is CUDA code; compile with nvcc // Author: Christian Diller, [email protected] // Thrust, for prefix scanning #include <thrust/device_ptr.h> #include <thrust/scan.h> // Internals #include "include/common.h" #include "include/mc_tables.h" using cv::cuda::GpuMat; namespace kinectfusion { namespace internal { namespace cuda { __device__ int global_count = 0; __device__ int output_count; __device__ unsigned int blocks_done = 0; //##### HELPERS ##### static __device__ __forceinline__ unsigned int lane_ID() { unsigned int ret; asm("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } static __device__ __forceinline__ int laneMaskLt() { unsigned int ret; asm("mov.u32 %0, %lanemask_lt;" : "=r"(ret)); return ret; } static __device__ __forceinline__ int binaryExclScan(int ballot_mask) { return __popc(laneMaskLt() & ballot_mask); } __device__ __forceinline__ float read_tsdf(const PtrStep<short2> tsdf_volume, const int3 volume_size, const int x, const int y, const int z, short& weight) { short2 voxel_tuple = tsdf_volume.ptr(z * volume_size.y + y)[x]; weight = voxel_tuple.y; return static_cast<float>(voxel_tuple.x) * DIVSHORTMAX; } __device__ __forceinline__ int compute_cube_index(const PtrStep<short2> tsdf_volume, const int3 volume_size, const int x, const int y, const int z, float tsdf_values[8]) { short weight; int cube_index = 0; // calculate flag indicating if each vertex is inside or outside isosurface cube_index += static_cast<int>(tsdf_values[0] = read_tsdf(tsdf_volume, volume_size, x, y, z, weight) < 0.f); if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[1] = read_tsdf(tsdf_volume, volume_size, x + 1, y, z, weight) < 0.f) << 1; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[2] = read_tsdf(tsdf_volume, volume_size, x + 1, y + 1, z, weight) < 0.f) << 2; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[3] = read_tsdf(tsdf_volume, volume_size, x, y + 1, z, weight) < 0.f) << 3; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[4] = read_tsdf(tsdf_volume, volume_size, x, y, z + 1, weight) < 0.f) << 4; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[5] = read_tsdf(tsdf_volume, volume_size, x + 1, y, z + 1, weight) < 0.f) << 5; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[6] = read_tsdf(tsdf_volume, volume_size, x + 1, y + 1, z + 1, weight) < 0.f) << 6; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[7] = read_tsdf(tsdf_volume, volume_size, x, y + 1, z + 1, weight) < 0.f) << 7; if (weight == 0) return 0; return cube_index; } __device__ __forceinline__ float3 get_node_coordinates(const int x, const int y, const int z, const float voxel_size) { float3 position; position.x = (x + 0.5f) * voxel_size; position.y = (y + 0.5f) * voxel_size; position.z = (z + 0.5f) * voxel_size; return position; } __device__ __forceinline__ float3 vertex_interpolate(const float3 p0, const float3 p1, const float f0, const float f1) { float t = (0.f - f0) / (f1 - f0 + 1e-15f); return make_float3(p0.x + t * (p1.x - p0.x), p0.y + t * (p1.y - p0.y), p0.z + t * (p1.z - p0.z)); } //##### KERNELS ##### __global__ void get_occupied_voxels_kernel(const PtrStep<short2> volume, const int3 volume_size, PtrStepSz<int> occupied_voxel_indices, PtrStepSz<int> number_vertices, const PtrStepSz<int> number_vertices_table) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (__all_sync(0xFFFFFFFF, x >= volume_size.x) || __all_sync(0xFFFFFFFF, y >= volume_size.y)) return; const auto flattened_tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; const auto warp_id = flattened_tid >> 5; const auto lane_id = lane_ID(); volatile __shared__ int warps_buffer[32]; // Number of threads / Warp size for (int z = 0; z < volume_size.z - 1; ++z) { int n_vertices = 0; if (x + 1 < volume_size.x && y + 1 < volume_size.y) { float tsdf_values[8]; const int cube_index = compute_cube_index(volume, volume_size, x, y, z, tsdf_values); n_vertices = (cube_index == 0 || cube_index == 255) ? 0 : number_vertices_table.ptr(0)[cube_index]; } const int total = __popc(__ballot_sync(0xFFFFFFFF, n_vertices > 0)); if (total == 0) continue; if (lane_id == 0) { const int old = atomicAdd(&global_count, total); warps_buffer[warp_id] = old; } const int old_global_voxels_count = warps_buffer[warp_id]; const int offset = binaryExclScan(__ballot_sync(0xFFFFFFFF, n_vertices > 0)); const int max_size = occupied_voxel_indices.cols; if (old_global_voxels_count + offset < max_size && n_vertices > 0) { const int current_voxel_index = volume_size.y * volume_size.x * z + volume_size.x * y + x; occupied_voxel_indices.ptr(0)[old_global_voxels_count + offset] = current_voxel_index; number_vertices.ptr(0)[old_global_voxels_count + offset] = n_vertices; } bool full = old_global_voxels_count + total >= max_size; if (full) break; } if (flattened_tid == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc(&blocks_done, total_blocks); if (value == total_blocks - 1) { output_count = min(occupied_voxel_indices.cols, global_count); blocks_done = 0; global_count = 0; } } } __global__ void generate_triangles_kernel(const PtrStep<short2> tsdf_volume, const int3 volume_size, const float voxel_size, const PtrStepSz<int> occupied_voxels, const PtrStepSz<int> vertex_offsets, const PtrStep<int> number_vertices_table, const PtrStep<int> triangle_table, PtrStep<float3> triangle_buffer) { const int idx = (blockIdx.y * 65536 + blockIdx.x) * 256 + threadIdx.x; if (idx >= occupied_voxels.cols) return; const int voxel = occupied_voxels.ptr(0)[idx]; const int z = voxel / (volume_size.x * volume_size.y); const int y = (voxel - z * volume_size.x * volume_size.y) / volume_size.x; const int x = (voxel - z * volume_size.x * volume_size.y) - y * volume_size.x; float tsdf_values[8]; const int cube_index = compute_cube_index(tsdf_volume, volume_size, x, y, z, tsdf_values); float3 v[8]; v[0] = get_node_coordinates(x, y, z, voxel_size); v[1] = get_node_coordinates(x + 1, y, z, voxel_size); v[2] = get_node_coordinates(x + 1, y + 1, z, voxel_size); v[3] = get_node_coordinates(x, y + 1, z, voxel_size); v[4] = get_node_coordinates(x, y, z + 1, voxel_size); v[5] = get_node_coordinates(x + 1, y, z + 1, voxel_size); v[6] = get_node_coordinates(x + 1, y + 1, z + 1, voxel_size); v[7] = get_node_coordinates(x, y + 1, z + 1, voxel_size); __shared__ float3 vertex_list[12][256]; vertex_list[0][threadIdx.x] = vertex_interpolate(v[0], v[1], tsdf_values[0], tsdf_values[1]); vertex_list[1][threadIdx.x] = vertex_interpolate(v[1], v[2], tsdf_values[1], tsdf_values[2]); vertex_list[2][threadIdx.x] = vertex_interpolate(v[2], v[3], tsdf_values[2], tsdf_values[3]); vertex_list[3][threadIdx.x] = vertex_interpolate(v[3], v[0], tsdf_values[3], tsdf_values[0]); vertex_list[4][threadIdx.x] = vertex_interpolate(v[4], v[5], tsdf_values[4], tsdf_values[5]); vertex_list[5][threadIdx.x] = vertex_interpolate(v[5], v[6], tsdf_values[5], tsdf_values[6]); vertex_list[6][threadIdx.x] = vertex_interpolate(v[6], v[7], tsdf_values[6], tsdf_values[7]); vertex_list[7][threadIdx.x] = vertex_interpolate(v[7], v[4], tsdf_values[7], tsdf_values[4]); vertex_list[8][threadIdx.x] = vertex_interpolate(v[0], v[4], tsdf_values[0], tsdf_values[4]); vertex_list[9][threadIdx.x] = vertex_interpolate(v[1], v[5], tsdf_values[1], tsdf_values[5]); vertex_list[10][threadIdx.x] = vertex_interpolate(v[2], v[6], tsdf_values[2], tsdf_values[6]); vertex_list[11][threadIdx.x] = vertex_interpolate(v[3], v[7], tsdf_values[3], tsdf_values[7]); __syncthreads(); const int n_vertices = number_vertices_table.ptr(0)[cube_index]; for (int i = 0; i < n_vertices; i += 3) { const int index = vertex_offsets.ptr(0)[idx] + i; const int v1 = triangle_table.ptr(0)[(cube_index * 16) + i + 0]; const int v2 = triangle_table.ptr(0)[(cube_index * 16) + i + 1]; const int v3 = triangle_table.ptr(0)[(cube_index * 16) + i + 2]; triangle_buffer.ptr(0)[index + 0] = make_float3(vertex_list[v1][threadIdx.x].x, vertex_list[v1][threadIdx.x].y, vertex_list[v1][threadIdx.x].z); triangle_buffer.ptr(0)[index + 1] = make_float3(vertex_list[v2][threadIdx.x].x, vertex_list[v2][threadIdx.x].y, vertex_list[v2][threadIdx.x].z); triangle_buffer.ptr(0)[index + 2] = make_float3(vertex_list[v3][threadIdx.x].x, vertex_list[v3][threadIdx.x].y, vertex_list[v3][threadIdx.x].z); } } __global__ void get_color_values_kernel(const PtrStep<uchar3> color_volume, const int3 volume_size, const float voxel_scale, const PtrStep<float3> vertices, PtrStepSz<uchar3> vertex_colors) { const auto thread_id = blockDim.x * blockIdx.x + threadIdx.x; if (thread_id >= vertex_colors.cols) return; const float3 vertex = vertices.ptr(0)[thread_id]; const int3 location_in_grid{static_cast<int>(vertex.x / voxel_scale), static_cast<int>(vertex.y / voxel_scale), static_cast<int>(vertex.z / voxel_scale)}; uchar3 color_value = color_volume.ptr( location_in_grid.z * volume_size.y + location_in_grid.y)[location_in_grid.x]; vertex_colors.ptr(0)[thread_id] = color_value; } //##### HOST FUNCTIONS ##### SurfaceMesh marching_cubes(const VolumeData& volume, const int triangles_buffer_size) { MeshData mesh_data(triangles_buffer_size / 3); // ### PREPARATION : Upload lookup tables ### GpuMat number_vertices_table, triangle_table; number_vertices_table = cv::cuda::createContinuous(256, 1, CV_32SC1); number_vertices_table.upload(cv::Mat(256, 1, CV_32SC1, number_vertices_table_host, cv::Mat::AUTO_STEP)); triangle_table = cv::cuda::createContinuous(256, 16, CV_32SC1); triangle_table.upload(cv::Mat(256, 16, CV_32SC1, triangle_table_host, cv::Mat::AUTO_STEP)); // ### ### //### KERNEL ONE : Get occupied voxels ### dim3 threads(32, 32); dim3 blocks(static_cast<unsigned>(::ceil(volume.volume_size.x / threads.x)), static_cast<unsigned>(::ceil(volume.volume_size.y / threads.y))); hipLaunchKernelGGL(( get_occupied_voxels_kernel), dim3(blocks), dim3(threads), 0, 0, volume.tsdf_volume, volume.volume_size, mesh_data.occupied_voxel_ids_buffer, mesh_data.number_vertices_buffer, number_vertices_table); hipDeviceSynchronize(); int active_voxels = 0; hipMemcpyFromSymbol(&active_voxels, output_count, sizeof(active_voxels)); // ### ### //### THRUST PART : Do an exclusive scan on the GPU ### mesh_data.create_view(active_voxels); thrust::device_ptr<int> beg = thrust::device_pointer_cast(mesh_data.number_vertices.ptr<int>(0)); thrust::device_ptr<int> end = beg + active_voxels; thrust::device_ptr<int> out = thrust::device_pointer_cast(mesh_data.vertex_offsets.ptr<int>(0)); thrust::exclusive_scan(beg, end, out); int last_element, last_scan_element; hipMemcpy(&last_element, mesh_data.number_vertices.ptr<int>(0) + active_voxels - 1, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&last_scan_element, mesh_data.vertex_offsets.ptr<int>(0) + active_voxels - 1, sizeof(int), hipMemcpyDeviceToHost); const int total_vertices = last_element + last_scan_element; // ### ### //### KERNEL TWO ### const int n_threads = 256; dim3 block(n_threads); unsigned blocks_num = static_cast<unsigned>(::ceil(active_voxels / n_threads)); dim3 grid(min(blocks_num, 65536), static_cast<unsigned>(::ceil(blocks_num / 65536))); grid.y = 1; hipLaunchKernelGGL(( generate_triangles_kernel), dim3(grid), dim3(block), 0, 0, volume.tsdf_volume, volume.volume_size, volume.voxel_scale, mesh_data.occupied_voxel_ids, mesh_data.vertex_offsets, number_vertices_table, triangle_table, mesh_data.triangle_buffer); hipDeviceSynchronize(); // ### ### // Get triangle vertex colors GpuMat triangles_output(mesh_data.triangle_buffer, cv::Range::all(), cv::Range(0, total_vertices)); GpuMat vertex_colors = cv::cuda::createContinuous(1, total_vertices, CV_8UC3); int n_blocks = static_cast<int>(::ceil(total_vertices / 1024)); hipLaunchKernelGGL(( get_color_values_kernel), dim3(n_blocks), dim3(1024), 0, 0, volume.color_volume, volume.volume_size, volume.voxel_scale, triangles_output, vertex_colors); hipDeviceSynchronize(); // Download triangles cv::Mat vertex_output {}; triangles_output.download(vertex_output); cv::Mat color_output {}; vertex_colors.download(color_output); return SurfaceMesh { vertex_output, color_output, total_vertices, total_vertices / 3 }; } } } }
f23aace643c63302deb5fa15514ff086b56b5188.cu
// Extracts a surface mesh from the internal volume using the Marching Cubes algorithm // This is CUDA code; compile with nvcc // Author: Christian Diller, [email protected] // Thrust, for prefix scanning #include <thrust/device_ptr.h> #include <thrust/scan.h> // Internals #include "include/common.h" #include "include/mc_tables.h" using cv::cuda::GpuMat; namespace kinectfusion { namespace internal { namespace cuda { __device__ int global_count = 0; __device__ int output_count; __device__ unsigned int blocks_done = 0; //##### HELPERS ##### static __device__ __forceinline__ unsigned int lane_ID() { unsigned int ret; asm("mov.u32 %0, %laneid;" : "=r"(ret)); return ret; } static __device__ __forceinline__ int laneMaskLt() { unsigned int ret; asm("mov.u32 %0, %lanemask_lt;" : "=r"(ret)); return ret; } static __device__ __forceinline__ int binaryExclScan(int ballot_mask) { return __popc(laneMaskLt() & ballot_mask); } __device__ __forceinline__ float read_tsdf(const PtrStep<short2> tsdf_volume, const int3 volume_size, const int x, const int y, const int z, short& weight) { short2 voxel_tuple = tsdf_volume.ptr(z * volume_size.y + y)[x]; weight = voxel_tuple.y; return static_cast<float>(voxel_tuple.x) * DIVSHORTMAX; } __device__ __forceinline__ int compute_cube_index(const PtrStep<short2> tsdf_volume, const int3 volume_size, const int x, const int y, const int z, float tsdf_values[8]) { short weight; int cube_index = 0; // calculate flag indicating if each vertex is inside or outside isosurface cube_index += static_cast<int>(tsdf_values[0] = read_tsdf(tsdf_volume, volume_size, x, y, z, weight) < 0.f); if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[1] = read_tsdf(tsdf_volume, volume_size, x + 1, y, z, weight) < 0.f) << 1; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[2] = read_tsdf(tsdf_volume, volume_size, x + 1, y + 1, z, weight) < 0.f) << 2; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[3] = read_tsdf(tsdf_volume, volume_size, x, y + 1, z, weight) < 0.f) << 3; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[4] = read_tsdf(tsdf_volume, volume_size, x, y, z + 1, weight) < 0.f) << 4; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[5] = read_tsdf(tsdf_volume, volume_size, x + 1, y, z + 1, weight) < 0.f) << 5; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[6] = read_tsdf(tsdf_volume, volume_size, x + 1, y + 1, z + 1, weight) < 0.f) << 6; if (weight == 0) return 0; cube_index += static_cast<int>(tsdf_values[7] = read_tsdf(tsdf_volume, volume_size, x, y + 1, z + 1, weight) < 0.f) << 7; if (weight == 0) return 0; return cube_index; } __device__ __forceinline__ float3 get_node_coordinates(const int x, const int y, const int z, const float voxel_size) { float3 position; position.x = (x + 0.5f) * voxel_size; position.y = (y + 0.5f) * voxel_size; position.z = (z + 0.5f) * voxel_size; return position; } __device__ __forceinline__ float3 vertex_interpolate(const float3 p0, const float3 p1, const float f0, const float f1) { float t = (0.f - f0) / (f1 - f0 + 1e-15f); return make_float3(p0.x + t * (p1.x - p0.x), p0.y + t * (p1.y - p0.y), p0.z + t * (p1.z - p0.z)); } //##### KERNELS ##### __global__ void get_occupied_voxels_kernel(const PtrStep<short2> volume, const int3 volume_size, PtrStepSz<int> occupied_voxel_indices, PtrStepSz<int> number_vertices, const PtrStepSz<int> number_vertices_table) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; if (__all_sync(0xFFFFFFFF, x >= volume_size.x) || __all_sync(0xFFFFFFFF, y >= volume_size.y)) return; const auto flattened_tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; const auto warp_id = flattened_tid >> 5; const auto lane_id = lane_ID(); volatile __shared__ int warps_buffer[32]; // Number of threads / Warp size for (int z = 0; z < volume_size.z - 1; ++z) { int n_vertices = 0; if (x + 1 < volume_size.x && y + 1 < volume_size.y) { float tsdf_values[8]; const int cube_index = compute_cube_index(volume, volume_size, x, y, z, tsdf_values); n_vertices = (cube_index == 0 || cube_index == 255) ? 0 : number_vertices_table.ptr(0)[cube_index]; } const int total = __popc(__ballot_sync(0xFFFFFFFF, n_vertices > 0)); if (total == 0) continue; if (lane_id == 0) { const int old = atomicAdd(&global_count, total); warps_buffer[warp_id] = old; } const int old_global_voxels_count = warps_buffer[warp_id]; const int offset = binaryExclScan(__ballot_sync(0xFFFFFFFF, n_vertices > 0)); const int max_size = occupied_voxel_indices.cols; if (old_global_voxels_count + offset < max_size && n_vertices > 0) { const int current_voxel_index = volume_size.y * volume_size.x * z + volume_size.x * y + x; occupied_voxel_indices.ptr(0)[old_global_voxels_count + offset] = current_voxel_index; number_vertices.ptr(0)[old_global_voxels_count + offset] = n_vertices; } bool full = old_global_voxels_count + total >= max_size; if (full) break; } if (flattened_tid == 0) { unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z; unsigned int value = atomicInc(&blocks_done, total_blocks); if (value == total_blocks - 1) { output_count = min(occupied_voxel_indices.cols, global_count); blocks_done = 0; global_count = 0; } } } __global__ void generate_triangles_kernel(const PtrStep<short2> tsdf_volume, const int3 volume_size, const float voxel_size, const PtrStepSz<int> occupied_voxels, const PtrStepSz<int> vertex_offsets, const PtrStep<int> number_vertices_table, const PtrStep<int> triangle_table, PtrStep<float3> triangle_buffer) { const int idx = (blockIdx.y * 65536 + blockIdx.x) * 256 + threadIdx.x; if (idx >= occupied_voxels.cols) return; const int voxel = occupied_voxels.ptr(0)[idx]; const int z = voxel / (volume_size.x * volume_size.y); const int y = (voxel - z * volume_size.x * volume_size.y) / volume_size.x; const int x = (voxel - z * volume_size.x * volume_size.y) - y * volume_size.x; float tsdf_values[8]; const int cube_index = compute_cube_index(tsdf_volume, volume_size, x, y, z, tsdf_values); float3 v[8]; v[0] = get_node_coordinates(x, y, z, voxel_size); v[1] = get_node_coordinates(x + 1, y, z, voxel_size); v[2] = get_node_coordinates(x + 1, y + 1, z, voxel_size); v[3] = get_node_coordinates(x, y + 1, z, voxel_size); v[4] = get_node_coordinates(x, y, z + 1, voxel_size); v[5] = get_node_coordinates(x + 1, y, z + 1, voxel_size); v[6] = get_node_coordinates(x + 1, y + 1, z + 1, voxel_size); v[7] = get_node_coordinates(x, y + 1, z + 1, voxel_size); __shared__ float3 vertex_list[12][256]; vertex_list[0][threadIdx.x] = vertex_interpolate(v[0], v[1], tsdf_values[0], tsdf_values[1]); vertex_list[1][threadIdx.x] = vertex_interpolate(v[1], v[2], tsdf_values[1], tsdf_values[2]); vertex_list[2][threadIdx.x] = vertex_interpolate(v[2], v[3], tsdf_values[2], tsdf_values[3]); vertex_list[3][threadIdx.x] = vertex_interpolate(v[3], v[0], tsdf_values[3], tsdf_values[0]); vertex_list[4][threadIdx.x] = vertex_interpolate(v[4], v[5], tsdf_values[4], tsdf_values[5]); vertex_list[5][threadIdx.x] = vertex_interpolate(v[5], v[6], tsdf_values[5], tsdf_values[6]); vertex_list[6][threadIdx.x] = vertex_interpolate(v[6], v[7], tsdf_values[6], tsdf_values[7]); vertex_list[7][threadIdx.x] = vertex_interpolate(v[7], v[4], tsdf_values[7], tsdf_values[4]); vertex_list[8][threadIdx.x] = vertex_interpolate(v[0], v[4], tsdf_values[0], tsdf_values[4]); vertex_list[9][threadIdx.x] = vertex_interpolate(v[1], v[5], tsdf_values[1], tsdf_values[5]); vertex_list[10][threadIdx.x] = vertex_interpolate(v[2], v[6], tsdf_values[2], tsdf_values[6]); vertex_list[11][threadIdx.x] = vertex_interpolate(v[3], v[7], tsdf_values[3], tsdf_values[7]); __syncthreads(); const int n_vertices = number_vertices_table.ptr(0)[cube_index]; for (int i = 0; i < n_vertices; i += 3) { const int index = vertex_offsets.ptr(0)[idx] + i; const int v1 = triangle_table.ptr(0)[(cube_index * 16) + i + 0]; const int v2 = triangle_table.ptr(0)[(cube_index * 16) + i + 1]; const int v3 = triangle_table.ptr(0)[(cube_index * 16) + i + 2]; triangle_buffer.ptr(0)[index + 0] = make_float3(vertex_list[v1][threadIdx.x].x, vertex_list[v1][threadIdx.x].y, vertex_list[v1][threadIdx.x].z); triangle_buffer.ptr(0)[index + 1] = make_float3(vertex_list[v2][threadIdx.x].x, vertex_list[v2][threadIdx.x].y, vertex_list[v2][threadIdx.x].z); triangle_buffer.ptr(0)[index + 2] = make_float3(vertex_list[v3][threadIdx.x].x, vertex_list[v3][threadIdx.x].y, vertex_list[v3][threadIdx.x].z); } } __global__ void get_color_values_kernel(const PtrStep<uchar3> color_volume, const int3 volume_size, const float voxel_scale, const PtrStep<float3> vertices, PtrStepSz<uchar3> vertex_colors) { const auto thread_id = blockDim.x * blockIdx.x + threadIdx.x; if (thread_id >= vertex_colors.cols) return; const float3 vertex = vertices.ptr(0)[thread_id]; const int3 location_in_grid{static_cast<int>(vertex.x / voxel_scale), static_cast<int>(vertex.y / voxel_scale), static_cast<int>(vertex.z / voxel_scale)}; uchar3 color_value = color_volume.ptr( location_in_grid.z * volume_size.y + location_in_grid.y)[location_in_grid.x]; vertex_colors.ptr(0)[thread_id] = color_value; } //##### HOST FUNCTIONS ##### SurfaceMesh marching_cubes(const VolumeData& volume, const int triangles_buffer_size) { MeshData mesh_data(triangles_buffer_size / 3); // ### PREPARATION : Upload lookup tables ### GpuMat number_vertices_table, triangle_table; number_vertices_table = cv::cuda::createContinuous(256, 1, CV_32SC1); number_vertices_table.upload(cv::Mat(256, 1, CV_32SC1, number_vertices_table_host, cv::Mat::AUTO_STEP)); triangle_table = cv::cuda::createContinuous(256, 16, CV_32SC1); triangle_table.upload(cv::Mat(256, 16, CV_32SC1, triangle_table_host, cv::Mat::AUTO_STEP)); // ### ### //### KERNEL ONE : Get occupied voxels ### dim3 threads(32, 32); dim3 blocks(static_cast<unsigned>(std::ceil(volume.volume_size.x / threads.x)), static_cast<unsigned>(std::ceil(volume.volume_size.y / threads.y))); get_occupied_voxels_kernel<<<blocks, threads>>>(volume.tsdf_volume, volume.volume_size, mesh_data.occupied_voxel_ids_buffer, mesh_data.number_vertices_buffer, number_vertices_table); cudaDeviceSynchronize(); int active_voxels = 0; cudaMemcpyFromSymbol(&active_voxels, output_count, sizeof(active_voxels)); // ### ### //### THRUST PART : Do an exclusive scan on the GPU ### mesh_data.create_view(active_voxels); thrust::device_ptr<int> beg = thrust::device_pointer_cast(mesh_data.number_vertices.ptr<int>(0)); thrust::device_ptr<int> end = beg + active_voxels; thrust::device_ptr<int> out = thrust::device_pointer_cast(mesh_data.vertex_offsets.ptr<int>(0)); thrust::exclusive_scan(beg, end, out); int last_element, last_scan_element; cudaMemcpy(&last_element, mesh_data.number_vertices.ptr<int>(0) + active_voxels - 1, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&last_scan_element, mesh_data.vertex_offsets.ptr<int>(0) + active_voxels - 1, sizeof(int), cudaMemcpyDeviceToHost); const int total_vertices = last_element + last_scan_element; // ### ### //### KERNEL TWO ### const int n_threads = 256; dim3 block(n_threads); unsigned blocks_num = static_cast<unsigned>(std::ceil(active_voxels / n_threads)); dim3 grid(min(blocks_num, 65536), static_cast<unsigned>(std::ceil(blocks_num / 65536))); grid.y = 1; generate_triangles_kernel<<<grid, block>>> (volume.tsdf_volume, volume.volume_size, volume.voxel_scale, mesh_data.occupied_voxel_ids, mesh_data.vertex_offsets, number_vertices_table, triangle_table, mesh_data.triangle_buffer); cudaDeviceSynchronize(); // ### ### // Get triangle vertex colors GpuMat triangles_output(mesh_data.triangle_buffer, cv::Range::all(), cv::Range(0, total_vertices)); GpuMat vertex_colors = cv::cuda::createContinuous(1, total_vertices, CV_8UC3); int n_blocks = static_cast<int>(std::ceil(total_vertices / 1024)); get_color_values_kernel<<<n_blocks, 1024>>> (volume.color_volume, volume.volume_size, volume.voxel_scale, triangles_output, vertex_colors); cudaDeviceSynchronize(); // Download triangles cv::Mat vertex_output {}; triangles_output.download(vertex_output); cv::Mat color_output {}; vertex_colors.download(color_output); return SurfaceMesh { vertex_output, color_output, total_vertices, total_vertices / 3 }; } } } }
4faa5172acf9b323e77437c5df02e25d04b1f489.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows ) { return; } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows ) { return; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize( 1, 1, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize( 1, 1, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( seperateChannels), dim3(gridSize), dim3(blockSize), 0, 0, ...); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
4faa5172acf9b323e77437c5df02e25d04b1f489.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows ) { return; } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows ) { return; } } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize( 1, 1, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize( 1, 1, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels seperateChannels<<<gridSize, blockSize>>>(...); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
e8ad1a71663aacad2551e52d84ee6ff504e631a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * calcAdvectionWHA.cu * * Created on: 16-04-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" __global__ void calcAdvectionSTM(Particle *p, Parameters *par) { uint tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < par->N) { p[tid].vel.x += par->DT * (p[tid].rh_vel.x + par->G_X); p[tid].vel.y += par->DT * (p[tid].rh_vel.y + par->G_Y); p[tid].pos.x += par->DT * p[tid].vel.x; p[tid].pos.y += par->DT * p[tid].vel.y; if (par->T_BOUNDARY_PERIODICITY == 0) { if (p[tid].pos.x > par->XCV) { p[tid].vel.x = -p[tid].vel.x; p[tid].pos.x = 2.0 * par->XCV - p[tid].pos.x; }; if (p[tid].pos.x <= 0.0) { p[tid].vel.x = -p[tid].vel.x; p[tid].pos.x = -p[tid].pos.x; }; } else { if (p[tid].pos.x >= par->XCV) { p[tid].pos.x = p[tid].pos.x - par->XCV; }; if (p[tid].pos.x < 0.0) { p[tid].pos.x = p[tid].pos.x + par->XCV; }; } if (par->T_BOUNDARY_PERIODICITY != 1){ if (p[tid].pos.y > par->YCV) { p[tid].vel.y = -p[tid].vel.y; p[tid].pos.y = 2.0 * par->YCV - p[tid].pos.y; }; if (p[tid].pos.y <= 0.0) { p[tid].vel.y = -p[tid].vel.y; p[tid].pos.y = -p[tid].pos.y; }; } else { if (p[tid].pos.y >= par->YCV) { p[tid].pos.y = p[tid].pos.y - par->YCV; }; if (p[tid].pos.y < 0.0) { p[tid].pos.y = p[tid].pos.y + par->YCV; }; } tid += blockDim.x * gridDim.x; } }
e8ad1a71663aacad2551e52d84ee6ff504e631a4.cu
/* * calcAdvectionWHA.cu * * Created on: 16-04-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" __global__ void calcAdvectionSTM(Particle *p, Parameters *par) { uint tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < par->N) { p[tid].vel.x += par->DT * (p[tid].rh_vel.x + par->G_X); p[tid].vel.y += par->DT * (p[tid].rh_vel.y + par->G_Y); p[tid].pos.x += par->DT * p[tid].vel.x; p[tid].pos.y += par->DT * p[tid].vel.y; if (par->T_BOUNDARY_PERIODICITY == 0) { if (p[tid].pos.x > par->XCV) { p[tid].vel.x = -p[tid].vel.x; p[tid].pos.x = 2.0 * par->XCV - p[tid].pos.x; }; if (p[tid].pos.x <= 0.0) { p[tid].vel.x = -p[tid].vel.x; p[tid].pos.x = -p[tid].pos.x; }; } else { if (p[tid].pos.x >= par->XCV) { p[tid].pos.x = p[tid].pos.x - par->XCV; }; if (p[tid].pos.x < 0.0) { p[tid].pos.x = p[tid].pos.x + par->XCV; }; } if (par->T_BOUNDARY_PERIODICITY != 1){ if (p[tid].pos.y > par->YCV) { p[tid].vel.y = -p[tid].vel.y; p[tid].pos.y = 2.0 * par->YCV - p[tid].pos.y; }; if (p[tid].pos.y <= 0.0) { p[tid].vel.y = -p[tid].vel.y; p[tid].pos.y = -p[tid].pos.y; }; } else { if (p[tid].pos.y >= par->YCV) { p[tid].pos.y = p[tid].pos.y - par->YCV; }; if (p[tid].pos.y < 0.0) { p[tid].pos.y = p[tid].pos.y + par->YCV; }; } tid += blockDim.x * gridDim.x; } }
1fd343c92f08f8f91ce527b3c519783f8990bf2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sigmoid.h" #include <cstdio> Sigmoid::Sigmoid(int inputDim) { this->N = inputDim; hipMalloc(&output, sizeof(float) * N); hipMalloc(&d_output, sizeof(float) * N); printf("Sigmoid Layer, input_size=%d\n", N); } Sigmoid::~Sigmoid() { hipFree(output); hipFree(d_output); } void Sigmoid::forward(float* prev_output) { hipLaunchKernelGGL(( cudaSigmoidForward), dim3(64),dim3(64), 0, 0, prev_output, output, N); } //IMPORTANT: make sure forward output vec is kept before run backward void Sigmoid::backward(float* prev_d_output) { hipLaunchKernelGGL(( backward_gradient_prev), dim3(64),dim3(64), 0, 0, prev_d_output, d_output, output, N); } void Sigmoid::forward_reset() { hipMemset(output, 0x00, sizeof(float)*N); } void Sigmoid::backward_reset() { hipMemset(d_output, 0x00, sizeof(float)*N); } __global__ void cudaSigmoidForward(float* prev_output, float* output, int N) { int pos = blockIdx.x * blockDim.x + threadIdx.x; int size = blockDim.x * gridDim.x; for (int i = N*pos/size; i<N*(pos+1)/size; ++i) { output[i] = 1/(1+exp(-prev_output[i])); } } __global__ void backward_gradient_prev(float* prev_d_output, float* d_output, float* output,int N) { int pos = blockIdx.x * blockDim.x + threadIdx.x; int size = blockDim.x * gridDim.x; for (int i = N*pos/size; i<N*(pos+1)/size; ++i) { prev_d_output[i] = d_output[i]*output[i]*(1-output[i]); } }
1fd343c92f08f8f91ce527b3c519783f8990bf2a.cu
#include "sigmoid.h" #include <cstdio> Sigmoid::Sigmoid(int inputDim) { this->N = inputDim; cudaMalloc(&output, sizeof(float) * N); cudaMalloc(&d_output, sizeof(float) * N); printf("Sigmoid Layer, input_size=%d\n", N); } Sigmoid::~Sigmoid() { cudaFree(output); cudaFree(d_output); } void Sigmoid::forward(float* prev_output) { cudaSigmoidForward<<<64,64>>>(prev_output, output, N); } //IMPORTANT: make sure forward output vec is kept before run backward void Sigmoid::backward(float* prev_d_output) { backward_gradient_prev<<<64,64>>>(prev_d_output, d_output, output, N); } void Sigmoid::forward_reset() { cudaMemset(output, 0x00, sizeof(float)*N); } void Sigmoid::backward_reset() { cudaMemset(d_output, 0x00, sizeof(float)*N); } __global__ void cudaSigmoidForward(float* prev_output, float* output, int N) { int pos = blockIdx.x * blockDim.x + threadIdx.x; int size = blockDim.x * gridDim.x; for (int i = N*pos/size; i<N*(pos+1)/size; ++i) { output[i] = 1/(1+exp(-prev_output[i])); } } __global__ void backward_gradient_prev(float* prev_d_output, float* d_output, float* output,int N) { int pos = blockIdx.x * blockDim.x + threadIdx.x; int size = blockDim.x * gridDim.x; for (int i = N*pos/size; i<N*(pos+1)/size; ++i) { prev_d_output[i] = d_output[i]*output[i]*(1-output[i]); } }
5c5fb1f0b3d444f7420aa7f7a9daf837ebd2714f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <cstdio> #include <string> #include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <face_quda.h> #include <tune_quda.h> #define PRESERVE_SPINOR_NORM #ifdef PRESERVE_SPINOR_NORM // Preserve the norm regardless of basis #define kP (1.0/sqrt(2.0)) #define kU (1.0/sqrt(2.0)) #else // More numerically accurate not to preserve the norm between basis #define kP (0.5) #define kU (1.0) #endif namespace quda { void exchangeExtendedGhost(cudaColorSpinorField* spinor, int R[], int parity, hipStream_t *stream_p) { #ifdef MULTI_GPU int nFace = 0; for(int i=0; i<4; i++){ if(R[i] > nFace) nFace = R[i]; } int dagger = 0; int gatherCompleted[2] = {0,0}; int commsCompleted[2] = {0,0}; hipEvent_t gatherEnd[2]; for(int dir=0; dir<2; dir++) hipEventCreate(&gatherEnd[dir], hipEventDisableTiming); for(int dim=3; dim<=0; dim--){ if(!commDim(dim)) continue; spinor->packExtended(nFace, R, parity, dagger, dim, stream_p); // packing in the dim dimension complete hipDeviceSynchronize(); // Need this since packing is performed in stream[Nstream-1] for(int dir=1; dir<=0; dir--){ spinor->gather(nFace, dagger, 2*dim + dir); hipEventRecord(gatherEnd[dir], streams[2*dim+dir]); // gatherEnd[1], gatherEnd[0] } int completeSum = 0; int dir = 1; while(completeSum < 2){ if(!gatherCompleted[dir]){ if(hipSuccess == hipEventQuery(gatherEnd[dir])){ spinor->commsStart(nFace, 2*dim+dir, dagger); completeSum++; gatherCompleted[dir--] = 1; } } } gatherCompleted[0] = gatherCompleted[1] = 0; // Query if comms has completed dir = 1; while(completeSum < 4){ if(!commsCompleted[dir]){ if(spinor->commsQuery(nFace, 2*dim+dir, dagger)){ spinor->scatterExtended(nFace, parity, dagger, 2*dim+dir); completeSum++; commsCompleted[dir--] = 1; } } } commsCompleted[0] = commsCompleted[1] = 0; hipDeviceSynchronize(); // Wait for scatters to complete before next iteration } // loop over dim for(int dir=0; dir<2; dir++) hipEventDestroy(gatherEnd[dir]); #endif return; } /** Straight copy with no basis change */ template <typename FloatOut, typename FloatIn, int Ns, int Nc> class PreserveBasis { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; public: __device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) { for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { for (int z=0; z<2; z++) { out[(s*Nc+c)*2+z] = in[(s*Nc+c)*2+z]; } } } } }; /** Transform from relativistic into non-relavisitic basis */ template <typename FloatOut, typename FloatIn, int Ns, int Nc> struct NonRelBasis { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; __device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) { int s1[4] = {1, 2, 3, 0}; int s2[4] = {3, 0, 1, 2}; RegTypeOut K1[4] = {kP, -kP, -kP, -kP}; RegTypeOut K2[4] = {kP, -kP, kP, kP}; for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { for (int z=0; z<2; z++) { out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z]; } } } } }; /** Transform from non-relativistic into relavisitic basis */ template <typename FloatOut, typename FloatIn, int Ns, int Nc> struct RelBasis { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; __device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) { int s1[4] = {1, 2, 3, 0}; int s2[4] = {3, 0, 1, 2}; RegTypeOut K1[4] = {-kU, kU, kU, kU}; RegTypeOut K2[4] = {-kU, kU, -kU, -kU}; for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { for (int z=0; z<2; z++) { out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z]; } } } } }; template<typename OutOrder, typename InOrder, typename Basis> struct CopySpinorExArg{ OutOrder out; const InOrder in; Basis basis; int E[QUDA_MAX_DIM]; int X[QUDA_MAX_DIM]; int length; int parity; CopySpinorExArg(const OutOrder &out, const InOrder &in, const Basis& basis, const int *E, const int *X, const int parity) : out(out), in(in), basis(basis), parity(parity) { this->length = 1; for(int d=0; d<4; d++){ this->E[d] = E[d]; this->X[d] = X[d]; this->length *= X[d]; // smaller volume } } }; template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> __device__ __host__ void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg, int X) { int x[4]; int R[4]; for(int d=0; d<4; d++) R[d] = (arg.E[d] - arg.X[d]) >> 1; int za = X/(arg.X[0]/2); int x0h = X - za*(arg.X[0]/2); int zb = za/arg.X[1]; x[1] = za - zb*arg.X[1]; x[3] = zb / arg.X[2]; x[2] = zb - x[3]*arg.X[2]; x[0] = 2*x0h + ((x[1] + x[2] + x[3] + arg.parity) & 1); // Y is the cb spatial index into the extended gauge field int Y = ((((x[3]+R[3])*arg.E[2] + (x[2]+R[2]))*arg.E[1] + (x[1]+R[1]))*arg.E[0]+(x[0]+R[0])) >> 1; typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; RegTypeIn in[Ns*Nc*2]; RegTypeOut out[Ns*Nc*2]; if(extend){ arg.in.load(in, X); arg.basis(out, in); arg.out.save(out, Y); }else{ arg.in.load(in, Y); arg.basis(out,in); arg.out.save(out, Y); } } template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> __global__ void copyInteriorKernel(CopySpinorExArg<OutOrder,InOrder,Basis> arg) { int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; while(cb_idx < arg.length){ copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg,cb_idx); cb_idx += gridDim.x*blockDim.x; } } /* Host function */ template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg) { for(int cb_idx=0; cb_idx<arg.length; cb_idx++){ copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg, cb_idx); } } template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> class CopySpinorEx : Tunable { CopySpinorExArg<OutOrder,InOrder,Basis> arg; const ColorSpinorField &meta; QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool advanceSharedBytes(TuneParam &param) const { return false; } // Don't tune shared mem bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.length; } public: CopySpinorEx(CopySpinorExArg<OutOrder,InOrder,Basis> &arg, const ColorSpinorField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("out_stride=%d,in_stride=%d",arg.out.stride,arg.in.stride); } virtual ~CopySpinorEx() {} void apply(const hipStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if(location == QUDA_CPU_FIELD_LOCATION){ copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg); }else if(location == QUDA_CUDA_FIELD_LOCATION){ hipLaunchKernelGGL(( copyInteriorKernel<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")"; ps << "shared=" << param.shared_bytes; return ps.str(); } long long flops() const { return 0; } long long bytes() const { return arg.length*2*Nc*Ns*(sizeof(FloatIn) + sizeof(FloatOut)); } }; // CopySpinorEx template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis> void copySpinorEx(OutOrder outOrder, const InOrder inOrder, const Basis basis, const int *E, const int *X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location) { CopySpinorExArg<OutOrder,InOrder,Basis> arg(outOrder, inOrder, basis, E, X, parity); if(extend){ CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, true> copier(arg, meta, location); copier.apply(0); }else{ CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, false> copier(arg, meta, location); copier.apply(0); } if(location == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder> void copySpinorEx(OutOrder outOrder, InOrder inOrder, const QudaGammaBasis outBasis, const QudaGammaBasis inBasis, const int* E, const int* X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location) { if(inBasis == outBasis){ PreserveBasis<FloatOut,FloatIn,Ns,Nc> basis; copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, PreserveBasis<FloatOut,FloatIn,Ns,Nc> > (outOrder, inOrder, basis, E, X, parity, extend, meta, location); }else if(outBasis == QUDA_UKQCD_GAMMA_BASIS && inBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){ if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns); NonRelBasis<FloatOut,FloatIn,Ns,Nc> basis; copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, NonRelBasis<FloatOut,FloatIn,Ns,Nc> > (outOrder, inOrder, basis, E, X, parity, extend, meta, location); }else if(inBasis == QUDA_UKQCD_GAMMA_BASIS && outBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){ if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns); RelBasis<FloatOut,FloatIn,Ns,Nc> basis; copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, RelBasis<FloatOut,FloatIn,Ns,Nc> > (outOrder, inOrder, basis, E, X, parity, extend, meta, location); }else{ errorQuda("Basis change not supported"); } } // Need to rewrite the following two functions... // Decide on the output order template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename InOrder> void extendedCopyColorSpinor(InOrder &inOrder, ColorSpinorField &out, QudaGammaBasis inBasis, const int *E, const int *X, const int parity, const bool extend, QudaFieldLocation location, FloatOut *Out, float *outNorm){ if(out.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){ FloatNOrder<FloatOut, Ns, Nc, 4> outOrder(out, Out, outNorm); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); }else if(out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){ FloatNOrder<FloatOut, Ns, Nc, 2> outOrder(out, Out, outNorm); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); #if 0 }else if(out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){ SpaceSpinorColorOrder<FloatOut, Ns, Nc> outOrder(out, Out); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); }else if(out.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){ SpaceColorSpinorOrder<FloatOut, Ns, Nc> outOrder(out, Out); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); } else if (out.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){ #ifdef BUILD_QDPJIT_INTERFACE QDPJITDiracOrder<FloatOut, Ns, Nc> outOrder(out, Out); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif #endif }else{ errorQuda("Order not defined"); } } template<typename FloatOut, typename FloatIn, int Ns, int Nc> void extendedCopyColorSpinor(ColorSpinorField &out, const ColorSpinorField &in, const int parity, const QudaFieldLocation location, FloatOut *Out, FloatIn *In, float* outNorm, float *inNorm){ int E[4]; int X[4]; const bool extend = (out.Volume() >= in.Volume()); if(extend){ for(int d=0; d<4; d++){ E[d] = out.X()[d]; X[d] = in.X()[d]; } }else{ for(int d=0; d<4; d++){ E[d] = in.X()[d]; X[d] = out.X()[d]; } } X[0] *= 2; E[0] *= 2; // Since we consider only a single parity at a time if(in.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){ FloatNOrder<FloatIn,Ns,Nc,4> inOrder(in, In, inNorm); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); }else if(in.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){ FloatNOrder<FloatIn,Ns,Nc,2> inOrder(in, In, inNorm); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); #if 0 }else if(in.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){ SpaceSpinorColorOrder<FloatIn,Ns,Nc> inOrder(in, In); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); }else if(in.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){ SpaceColorSpinorOrder<FloatIn,Ns,Nc> inOrder(in, In); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); }else if (in.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){ #ifdef BUILD_QDPJIT_INTERFACE QDPJITDiracOrder<FloatIn,Ns,Nc> inOrder(in, In); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend,location, Out, outNorm); #else errorQuda("QDPJIT interface has not been built\n"); #endif #endif }else{ errorQuda("Order not defined"); } } template<int Ns, typename dstFloat, typename srcFloat> void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src, const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src, float *dstNorm, float *srcNorm) { if(dst.Ndim() != src.Ndim()) errorQuda("Number of dimensions %d %d don't match", dst.Ndim(), src.Ndim()); if(!(dst.SiteOrder() == src.SiteOrder() || (dst.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER && src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER) || (dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER && src.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER) ) ){ errorQuda("Subset orders %d %d don't match", dst.SiteOrder(), src.SiteOrder()); } if(dst.SiteSubset() != src.SiteSubset()) errorQuda("Subset types do not match %d %d", dst.SiteSubset(), src.SiteSubset()); if(dst.Ncolor() != 3 || src.Ncolor() != 3) errorQuda("Nc != 3 not yet supported"); const int Nc = 3; // We currently only support parity-ordered fields; even-odd or odd-even if(dst.SiteOrder() == QUDA_LEXICOGRAPHIC_SITE_ORDER){ errorQuda("Copying to full fields with lexicographical ordering is not currently supported"); } if(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET){ if(src.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER || dst.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){ errorQuda("QDPJIT field ordering not supported for full site fields"); } // set for the source subset ordering srcFloat *srcEven = Src ? Src : (srcFloat*)src.V(); srcFloat* srcOdd = (srcFloat*)((char*)srcEven + src.Bytes()/2); float *srcNormEven = srcNorm ? srcNorm : (float*)src.Norm(); float *srcNormOdd = (float*)((char*)srcNormEven + src.NormBytes()/2); if(src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){ std::swap<srcFloat*>(srcEven, srcOdd); std::swap<float*>(srcNormEven, srcNormOdd); } // set for the destination subset ordering dstFloat *dstEven = Dst ? Dst : (dstFloat*)dst.V(); dstFloat *dstOdd = (dstFloat*)((char*)dstEven + dst.Bytes()/2); float *dstNormEven = dstNorm ? dstNorm : (float*)dst.Norm(); float *dstNormOdd = (float*)((char*)dstNormEven + dst.NormBytes()/2); if(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){ std::swap<dstFloat*>(dstEven, dstOdd); std::swap<float*>(dstNormEven, dstNormOdd); } // should be able to apply to select either even or odd parity at this point as well. extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc> (dst, src, 0, location, dstEven, srcEven, dstNormEven, srcNormEven); extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc> (dst, src, 1, location, dstOdd, srcOdd, dstNormOdd, srcNormOdd); }else{ extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc> (dst, src, parity, location, Dst, Src, dstNorm, srcNorm); } // N.B. Need to update this to account for differences in parity } template<typename dstFloat, typename srcFloat> void CopyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src, const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src, float *dstNorm=0, float *srcNorm=0) { if(dst.Nspin() != src.Nspin()) errorQuda("source and destination spins must match"); if(dst.Nspin() == 4){ #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) copyExtendedColorSpinor<4>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm); #else errorQuda("Extended copy has not been built for Nspin=%d fields",dst.Nspin()); #endif }else if(dst.Nspin() == 1){ #ifdef GPU_STAGGERED_DIRAC copyExtendedColorSpinor<1>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm); #else errorQuda("Extended copy has not been built for Nspin=%d fields", dst.Nspin()); #endif }else{ errorQuda("Nspin=%d unsupported", dst.Nspin()); } } // There's probably no need to have the additional Dst and Src arguments here! void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src, QudaFieldLocation location, const int parity, void *Dst, void *Src, void *dstNorm, void *srcNorm){ if(dst.Precision() == QUDA_DOUBLE_PRECISION){ if(src.Precision() == QUDA_DOUBLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<double*>(Src)); }else if(src.Precision() == QUDA_SINGLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<float*>(Src)); }else if(src.Precision() == QUDA_HALF_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm)); } else { errorQuda("Unsupported Precision %d", src.Precision()); } } else if (dst.Precision() == QUDA_SINGLE_PRECISION){ if(src.Precision() == QUDA_DOUBLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<double*>(Src)); }else if(src.Precision() == QUDA_SINGLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<float*>(Src)); }else if(src.Precision() == QUDA_HALF_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm)); }else{ errorQuda("Unsupported Precision %d", src.Precision()); } } else if (dst.Precision() == QUDA_HALF_PRECISION){ if(src.Precision() == QUDA_DOUBLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<double*>(Src), static_cast<float*>(dstNorm), 0); }else if(src.Precision() == QUDA_SINGLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<float*>(Src), static_cast<float*>(dstNorm), 0); }else if(src.Precision() == QUDA_HALF_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<short*>(Src), static_cast<float*>(dstNorm), static_cast<float*>(srcNorm)); }else{ errorQuda("Unsupported Precision %d", src.Precision()); } }else{ errorQuda("Unsupported Precision %d", dst.Precision()); } } } // quda
5c5fb1f0b3d444f7420aa7f7a9daf837ebd2714f.cu
#include <cstdlib> #include <cstdio> #include <string> #include <color_spinor_field.h> #include <color_spinor_field_order.h> #include <face_quda.h> #include <tune_quda.h> #define PRESERVE_SPINOR_NORM #ifdef PRESERVE_SPINOR_NORM // Preserve the norm regardless of basis #define kP (1.0/sqrt(2.0)) #define kU (1.0/sqrt(2.0)) #else // More numerically accurate not to preserve the norm between basis #define kP (0.5) #define kU (1.0) #endif namespace quda { void exchangeExtendedGhost(cudaColorSpinorField* spinor, int R[], int parity, cudaStream_t *stream_p) { #ifdef MULTI_GPU int nFace = 0; for(int i=0; i<4; i++){ if(R[i] > nFace) nFace = R[i]; } int dagger = 0; int gatherCompleted[2] = {0,0}; int commsCompleted[2] = {0,0}; cudaEvent_t gatherEnd[2]; for(int dir=0; dir<2; dir++) cudaEventCreate(&gatherEnd[dir], cudaEventDisableTiming); for(int dim=3; dim<=0; dim--){ if(!commDim(dim)) continue; spinor->packExtended(nFace, R, parity, dagger, dim, stream_p); // packing in the dim dimension complete cudaDeviceSynchronize(); // Need this since packing is performed in stream[Nstream-1] for(int dir=1; dir<=0; dir--){ spinor->gather(nFace, dagger, 2*dim + dir); cudaEventRecord(gatherEnd[dir], streams[2*dim+dir]); // gatherEnd[1], gatherEnd[0] } int completeSum = 0; int dir = 1; while(completeSum < 2){ if(!gatherCompleted[dir]){ if(cudaSuccess == cudaEventQuery(gatherEnd[dir])){ spinor->commsStart(nFace, 2*dim+dir, dagger); completeSum++; gatherCompleted[dir--] = 1; } } } gatherCompleted[0] = gatherCompleted[1] = 0; // Query if comms has completed dir = 1; while(completeSum < 4){ if(!commsCompleted[dir]){ if(spinor->commsQuery(nFace, 2*dim+dir, dagger)){ spinor->scatterExtended(nFace, parity, dagger, 2*dim+dir); completeSum++; commsCompleted[dir--] = 1; } } } commsCompleted[0] = commsCompleted[1] = 0; cudaDeviceSynchronize(); // Wait for scatters to complete before next iteration } // loop over dim for(int dir=0; dir<2; dir++) cudaEventDestroy(gatherEnd[dir]); #endif return; } /** Straight copy with no basis change */ template <typename FloatOut, typename FloatIn, int Ns, int Nc> class PreserveBasis { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; public: __device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) { for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { for (int z=0; z<2; z++) { out[(s*Nc+c)*2+z] = in[(s*Nc+c)*2+z]; } } } } }; /** Transform from relativistic into non-relavisitic basis */ template <typename FloatOut, typename FloatIn, int Ns, int Nc> struct NonRelBasis { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; __device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) { int s1[4] = {1, 2, 3, 0}; int s2[4] = {3, 0, 1, 2}; RegTypeOut K1[4] = {kP, -kP, -kP, -kP}; RegTypeOut K2[4] = {kP, -kP, kP, kP}; for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { for (int z=0; z<2; z++) { out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z]; } } } } }; /** Transform from non-relativistic into relavisitic basis */ template <typename FloatOut, typename FloatIn, int Ns, int Nc> struct RelBasis { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; __device__ __host__ inline void operator()(RegTypeOut out[Ns*Nc*2], const RegTypeIn in[Ns*Nc*2]) { int s1[4] = {1, 2, 3, 0}; int s2[4] = {3, 0, 1, 2}; RegTypeOut K1[4] = {-kU, kU, kU, kU}; RegTypeOut K2[4] = {-kU, kU, -kU, -kU}; for (int s=0; s<Ns; s++) { for (int c=0; c<Nc; c++) { for (int z=0; z<2; z++) { out[(s*Nc+c)*2+z] = K1[s]*in[(s1[s]*Nc+c)*2+z] + K2[s]*in[(s2[s]*Nc+c)*2+z]; } } } } }; template<typename OutOrder, typename InOrder, typename Basis> struct CopySpinorExArg{ OutOrder out; const InOrder in; Basis basis; int E[QUDA_MAX_DIM]; int X[QUDA_MAX_DIM]; int length; int parity; CopySpinorExArg(const OutOrder &out, const InOrder &in, const Basis& basis, const int *E, const int *X, const int parity) : out(out), in(in), basis(basis), parity(parity) { this->length = 1; for(int d=0; d<4; d++){ this->E[d] = E[d]; this->X[d] = X[d]; this->length *= X[d]; // smaller volume } } }; template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> __device__ __host__ void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg, int X) { int x[4]; int R[4]; for(int d=0; d<4; d++) R[d] = (arg.E[d] - arg.X[d]) >> 1; int za = X/(arg.X[0]/2); int x0h = X - za*(arg.X[0]/2); int zb = za/arg.X[1]; x[1] = za - zb*arg.X[1]; x[3] = zb / arg.X[2]; x[2] = zb - x[3]*arg.X[2]; x[0] = 2*x0h + ((x[1] + x[2] + x[3] + arg.parity) & 1); // Y is the cb spatial index into the extended gauge field int Y = ((((x[3]+R[3])*arg.E[2] + (x[2]+R[2]))*arg.E[1] + (x[1]+R[1]))*arg.E[0]+(x[0]+R[0])) >> 1; typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; RegTypeIn in[Ns*Nc*2]; RegTypeOut out[Ns*Nc*2]; if(extend){ arg.in.load(in, X); arg.basis(out, in); arg.out.save(out, Y); }else{ arg.in.load(in, Y); arg.basis(out,in); arg.out.save(out, Y); } } template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> __global__ void copyInteriorKernel(CopySpinorExArg<OutOrder,InOrder,Basis> arg) { int cb_idx = blockIdx.x*blockDim.x + threadIdx.x; while(cb_idx < arg.length){ copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg,cb_idx); cb_idx += gridDim.x*blockDim.x; } } /* Host function */ template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> void copyInterior(CopySpinorExArg<OutOrder,InOrder,Basis>& arg) { for(int cb_idx=0; cb_idx<arg.length; cb_idx++){ copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg, cb_idx); } } template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis, bool extend> class CopySpinorEx : Tunable { CopySpinorExArg<OutOrder,InOrder,Basis> arg; const ColorSpinorField &meta; QudaFieldLocation location; private: unsigned int sharedBytesPerThread() const { return 0; } unsigned int sharedBytesPerBlock(const TuneParam &param) const { return 0; } bool advanceSharedBytes(TuneParam &param) const { return false; } // Don't tune shared mem bool tuneGridDim() const { return false; } // Don't tune the grid dimensions. unsigned int minThreads() const { return arg.length; } public: CopySpinorEx(CopySpinorExArg<OutOrder,InOrder,Basis> &arg, const ColorSpinorField &meta, QudaFieldLocation location) : arg(arg), meta(meta), location(location) { writeAuxString("out_stride=%d,in_stride=%d",arg.out.stride,arg.in.stride); } virtual ~CopySpinorEx() {} void apply(const cudaStream_t &stream){ TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if(location == QUDA_CPU_FIELD_LOCATION){ copyInterior<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend>(arg); }else if(location == QUDA_CUDA_FIELD_LOCATION){ copyInteriorKernel<FloatOut,FloatIn,Ns,Nc,OutOrder,InOrder,Basis,extend> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")"; ps << "shared=" << param.shared_bytes; return ps.str(); } long long flops() const { return 0; } long long bytes() const { return arg.length*2*Nc*Ns*(sizeof(FloatIn) + sizeof(FloatOut)); } }; // CopySpinorEx template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder, typename Basis> void copySpinorEx(OutOrder outOrder, const InOrder inOrder, const Basis basis, const int *E, const int *X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location) { CopySpinorExArg<OutOrder,InOrder,Basis> arg(outOrder, inOrder, basis, E, X, parity); if(extend){ CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, true> copier(arg, meta, location); copier.apply(0); }else{ CopySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, Basis, false> copier(arg, meta, location); copier.apply(0); } if(location == QUDA_CUDA_FIELD_LOCATION) checkCudaError(); } template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename OutOrder, typename InOrder> void copySpinorEx(OutOrder outOrder, InOrder inOrder, const QudaGammaBasis outBasis, const QudaGammaBasis inBasis, const int* E, const int* X, const int parity, const bool extend, const ColorSpinorField &meta, QudaFieldLocation location) { if(inBasis == outBasis){ PreserveBasis<FloatOut,FloatIn,Ns,Nc> basis; copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, PreserveBasis<FloatOut,FloatIn,Ns,Nc> > (outOrder, inOrder, basis, E, X, parity, extend, meta, location); }else if(outBasis == QUDA_UKQCD_GAMMA_BASIS && inBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){ if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns); NonRelBasis<FloatOut,FloatIn,Ns,Nc> basis; copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, NonRelBasis<FloatOut,FloatIn,Ns,Nc> > (outOrder, inOrder, basis, E, X, parity, extend, meta, location); }else if(inBasis == QUDA_UKQCD_GAMMA_BASIS && outBasis == QUDA_DEGRAND_ROSSI_GAMMA_BASIS){ if(Ns != 4) errorQuda("Can only change basis with Nspin = 4, not Nspin = %d", Ns); RelBasis<FloatOut,FloatIn,Ns,Nc> basis; copySpinorEx<FloatOut, FloatIn, Ns, Nc, OutOrder, InOrder, RelBasis<FloatOut,FloatIn,Ns,Nc> > (outOrder, inOrder, basis, E, X, parity, extend, meta, location); }else{ errorQuda("Basis change not supported"); } } // Need to rewrite the following two functions... // Decide on the output order template<typename FloatOut, typename FloatIn, int Ns, int Nc, typename InOrder> void extendedCopyColorSpinor(InOrder &inOrder, ColorSpinorField &out, QudaGammaBasis inBasis, const int *E, const int *X, const int parity, const bool extend, QudaFieldLocation location, FloatOut *Out, float *outNorm){ if(out.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){ FloatNOrder<FloatOut, Ns, Nc, 4> outOrder(out, Out, outNorm); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); }else if(out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){ FloatNOrder<FloatOut, Ns, Nc, 2> outOrder(out, Out, outNorm); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); #if 0 }else if(out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){ SpaceSpinorColorOrder<FloatOut, Ns, Nc> outOrder(out, Out); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); }else if(out.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){ SpaceColorSpinorOrder<FloatOut, Ns, Nc> outOrder(out, Out); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); } else if (out.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){ #ifdef BUILD_QDPJIT_INTERFACE QDPJITDiracOrder<FloatOut, Ns, Nc> outOrder(out, Out); copySpinorEx<FloatOut,FloatIn,Ns,Nc> (outOrder, inOrder, out.GammaBasis(), inBasis, E, X, parity, extend, out, location); #else errorQuda("QDPJIT interface has not been built\n"); #endif #endif }else{ errorQuda("Order not defined"); } } template<typename FloatOut, typename FloatIn, int Ns, int Nc> void extendedCopyColorSpinor(ColorSpinorField &out, const ColorSpinorField &in, const int parity, const QudaFieldLocation location, FloatOut *Out, FloatIn *In, float* outNorm, float *inNorm){ int E[4]; int X[4]; const bool extend = (out.Volume() >= in.Volume()); if(extend){ for(int d=0; d<4; d++){ E[d] = out.X()[d]; X[d] = in.X()[d]; } }else{ for(int d=0; d<4; d++){ E[d] = in.X()[d]; X[d] = out.X()[d]; } } X[0] *= 2; E[0] *= 2; // Since we consider only a single parity at a time if(in.FieldOrder() == QUDA_FLOAT4_FIELD_ORDER){ FloatNOrder<FloatIn,Ns,Nc,4> inOrder(in, In, inNorm); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); }else if(in.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER){ FloatNOrder<FloatIn,Ns,Nc,2> inOrder(in, In, inNorm); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); #if 0 }else if(in.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER){ SpaceSpinorColorOrder<FloatIn,Ns,Nc> inOrder(in, In); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); }else if(in.FieldOrder() == QUDA_SPACE_COLOR_SPIN_FIELD_ORDER){ SpaceColorSpinorOrder<FloatIn,Ns,Nc> inOrder(in, In); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend, location, Out, outNorm); }else if (in.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){ #ifdef BUILD_QDPJIT_INTERFACE QDPJITDiracOrder<FloatIn,Ns,Nc> inOrder(in, In); extendedCopyColorSpinor<FloatOut,FloatIn,Ns,Nc>(inOrder, out, in.GammaBasis(), E, X, parity, extend,location, Out, outNorm); #else errorQuda("QDPJIT interface has not been built\n"); #endif #endif }else{ errorQuda("Order not defined"); } } template<int Ns, typename dstFloat, typename srcFloat> void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src, const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src, float *dstNorm, float *srcNorm) { if(dst.Ndim() != src.Ndim()) errorQuda("Number of dimensions %d %d don't match", dst.Ndim(), src.Ndim()); if(!(dst.SiteOrder() == src.SiteOrder() || (dst.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER && src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER) || (dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER && src.SiteOrder() == QUDA_EVEN_ODD_SITE_ORDER) ) ){ errorQuda("Subset orders %d %d don't match", dst.SiteOrder(), src.SiteOrder()); } if(dst.SiteSubset() != src.SiteSubset()) errorQuda("Subset types do not match %d %d", dst.SiteSubset(), src.SiteSubset()); if(dst.Ncolor() != 3 || src.Ncolor() != 3) errorQuda("Nc != 3 not yet supported"); const int Nc = 3; // We currently only support parity-ordered fields; even-odd or odd-even if(dst.SiteOrder() == QUDA_LEXICOGRAPHIC_SITE_ORDER){ errorQuda("Copying to full fields with lexicographical ordering is not currently supported"); } if(dst.SiteSubset() == QUDA_FULL_SITE_SUBSET){ if(src.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER || dst.FieldOrder() == QUDA_QDPJIT_FIELD_ORDER){ errorQuda("QDPJIT field ordering not supported for full site fields"); } // set for the source subset ordering srcFloat *srcEven = Src ? Src : (srcFloat*)src.V(); srcFloat* srcOdd = (srcFloat*)((char*)srcEven + src.Bytes()/2); float *srcNormEven = srcNorm ? srcNorm : (float*)src.Norm(); float *srcNormOdd = (float*)((char*)srcNormEven + src.NormBytes()/2); if(src.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){ std::swap<srcFloat*>(srcEven, srcOdd); std::swap<float*>(srcNormEven, srcNormOdd); } // set for the destination subset ordering dstFloat *dstEven = Dst ? Dst : (dstFloat*)dst.V(); dstFloat *dstOdd = (dstFloat*)((char*)dstEven + dst.Bytes()/2); float *dstNormEven = dstNorm ? dstNorm : (float*)dst.Norm(); float *dstNormOdd = (float*)((char*)dstNormEven + dst.NormBytes()/2); if(dst.SiteOrder() == QUDA_ODD_EVEN_SITE_ORDER){ std::swap<dstFloat*>(dstEven, dstOdd); std::swap<float*>(dstNormEven, dstNormOdd); } // should be able to apply to select either even or odd parity at this point as well. extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc> (dst, src, 0, location, dstEven, srcEven, dstNormEven, srcNormEven); extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc> (dst, src, 1, location, dstOdd, srcOdd, dstNormOdd, srcNormOdd); }else{ extendedCopyColorSpinor<dstFloat, srcFloat, Ns, Nc> (dst, src, parity, location, Dst, Src, dstNorm, srcNorm); } // N.B. Need to update this to account for differences in parity } template<typename dstFloat, typename srcFloat> void CopyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src, const int parity, const QudaFieldLocation location, dstFloat *Dst, srcFloat *Src, float *dstNorm=0, float *srcNorm=0) { if(dst.Nspin() != src.Nspin()) errorQuda("source and destination spins must match"); if(dst.Nspin() == 4){ #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) copyExtendedColorSpinor<4>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm); #else errorQuda("Extended copy has not been built for Nspin=%d fields",dst.Nspin()); #endif }else if(dst.Nspin() == 1){ #ifdef GPU_STAGGERED_DIRAC copyExtendedColorSpinor<1>(dst, src, parity, location, Dst, Src, dstNorm, srcNorm); #else errorQuda("Extended copy has not been built for Nspin=%d fields", dst.Nspin()); #endif }else{ errorQuda("Nspin=%d unsupported", dst.Nspin()); } } // There's probably no need to have the additional Dst and Src arguments here! void copyExtendedColorSpinor(ColorSpinorField &dst, const ColorSpinorField &src, QudaFieldLocation location, const int parity, void *Dst, void *Src, void *dstNorm, void *srcNorm){ if(dst.Precision() == QUDA_DOUBLE_PRECISION){ if(src.Precision() == QUDA_DOUBLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<double*>(Src)); }else if(src.Precision() == QUDA_SINGLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<float*>(Src)); }else if(src.Precision() == QUDA_HALF_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<double*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm)); } else { errorQuda("Unsupported Precision %d", src.Precision()); } } else if (dst.Precision() == QUDA_SINGLE_PRECISION){ if(src.Precision() == QUDA_DOUBLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<double*>(Src)); }else if(src.Precision() == QUDA_SINGLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<float*>(Src)); }else if(src.Precision() == QUDA_HALF_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<float*>(Dst), static_cast<short*>(Src), 0, static_cast<float*>(srcNorm)); }else{ errorQuda("Unsupported Precision %d", src.Precision()); } } else if (dst.Precision() == QUDA_HALF_PRECISION){ if(src.Precision() == QUDA_DOUBLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<double*>(Src), static_cast<float*>(dstNorm), 0); }else if(src.Precision() == QUDA_SINGLE_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<float*>(Src), static_cast<float*>(dstNorm), 0); }else if(src.Precision() == QUDA_HALF_PRECISION){ CopyExtendedColorSpinor(dst, src, parity, location, static_cast<short*>(Dst), static_cast<short*>(Src), static_cast<float*>(dstNorm), static_cast<float*>(srcNorm)); }else{ errorQuda("Unsupported Precision %d", src.Precision()); } }else{ errorQuda("Unsupported Precision %d", dst.Precision()); } } } // quda
a8d2d423bda09805787fb69e72ca2dcca2a651c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lab2.h" #include <math.h> #include <cstdio> #include <cstdint> #include <cstdlib> //parameter setting static const unsigned W = 800; static const unsigned H = 450; static const unsigned NFRAME = 1200; static const int ANGLE = 3; static const int octs = 4; static const double freq = (double)1/(double)120; static const double Y1 = 120; static const double U1 = 95; static const double V1 = 225; static const double Y2 = 240; static const double U2 = 120; static const double V2 = 150; //define const #define PI 3.14159265 __device__ double dirs[256][2]; //perlin table __device__ int perm[256] = { 151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 }; //power function __device__ double power( double x, int y) { double ans = 1; for(int i=0; i < y; i++) { ans = ans * x; } return ans; } //absolute value function __device__ double Abs(double x) { if( x < 0 ) { return -x; } return x; } __device__ double Corners4(double x, double y, int perX, int perY, int c, int f) { int gridX = (int)x + c%2, gridY = (int)y + c/2; int hashed = perm[ (perm[ (gridX%perX)%256 ] + gridY%perY)%256]; double grad = (x-gridX) * dirs[(hashed + ANGLE*f) % 256][0] + (y-gridY) * dirs[(hashed + ANGLE*f) % 256][1]; double distX = dblAbs((double)x-gridX), distY = dblAbs((double)y-gridY); double polyX = 1 - 6*power(distX, 5) + 15*power(distX, 4) - 10*power(distX, 3); double polyY = 1 - 6*power(distY, 5) + 15*power(distY, 4) - 10*power(distY, 3); return polyX * polyY * grad; } __device__ double perlin(double x, double y, int perX, int perY, int f) { double ans = 0;//initial for(int i = 0; i < 4; i++) { ans += Corners4(x, y, perX, perY, i, f); } return ans; } __global__ void pixelRate(int f, double *douimgptr) { int perX = (int)((double)W*freq), perY = (int)((double)H*freq); int idx = blockIdx.x * blockDim.x + threadIdx.x; int xint = idx%W, yint = idx/W; double x = xint*freq, y = yint*freq; double ans = 0; for(int i = 0;i < octs; i++) { ans += power(0.5, i) * perlin(x*power(2, i), y*power(2, i), perX*power(2, i), perY*power(2, i), f); } douimgptr[yint*W + xint] = (1/2.0)*ans + (1/2.0); //0-1 } __global__ void initdirs() { int idx = blockIdx.x * blockDim.x + threadIdx.x; dirs[idx][0] = cos((idx * 2.0 * PI)/256.0); dirs[idx][1] = sin((idx * 2.0 * PI)/256.0); } __global__ void linearInter(double *douimgptr, double C0, double C1, uint8_t *intimgptr, int r) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int xint = idx%W, yint = idx/W; if(yint%r == 0 and xint%r == 0) { int th = (yint/r)*(W/r) + (xint/r); intimgptr[th] = (uint8_t)(C0 + (C1-C0) * douimgptr[idx]); } } __global__ void NColor(double *douimgptr, double *colorListGPU, uint8_t *intimgptr, int r, int cstart, int cN) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int xint = idx%W, yint = idx/W; if(yint%r == 0 && xint%r == 0) { int th = (yint/r)*(W/r) + (xint/r); intimgptr[th] = (uint8_t) (colorListGPU[ cstart*cN + (int)(floor(douimgptr[idx] * cN)) ]); } } struct Lab2VideoGenerator::Impl { int t = 1; int f = 1; }; Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) { hipLaunchKernelGGL(( initdirs), dim3(1), dim3(256), 0, 0, ); } Lab2VideoGenerator::~Lab2VideoGenerator() { } void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) { info.w = W; info.h = H; info.n_frame = NFRAME; info.fps_n = 120; info.fps_d = 1; }; void Lab2VideoGenerator::Generate(uint8_t *yuv) { double *douimgptr; hipMalloc((void **) &douimgptr, H*W*sizeof(double)); hipLaunchKernelGGL(( pixelRate), dim3(((H*W)/32)+1), dim3(32), 0, 0, (impl->f), douimgptr); hipDeviceSynchronize(); uint8_t *intimgptr; hipMalloc((void **) &intimgptr, H*W*sizeof(uint8_t)); hipLaunchKernelGGL(( linearInter), dim3(((H*W)/32)+1), dim3(32), 0, 0, douimgptr, Y1, Y2, intimgptr, 1); hipMemcpy(yuv, intimgptr, H*W, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( linearInter), dim3(((H*W)/32)+1), dim3(32), 0, 0, douimgptr, U1, U2, intimgptr, 2); hipMemcpy(yuv+(H*W), intimgptr, H*W/4, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); hipLaunchKernelGGL(( linearInter), dim3(((H*W)/32)+1), dim3(32), 0, 0, douimgptr, V1, V2, intimgptr, 2); hipMemcpy(yuv+(H*W)+(H*W)/4, intimgptr, H*W/4, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); ++(impl->t); ++(impl->f); }
a8d2d423bda09805787fb69e72ca2dcca2a651c7.cu
#include "lab2.h" #include <math.h> #include <cstdio> #include <cstdint> #include <cstdlib> //parameter setting static const unsigned W = 800; static const unsigned H = 450; static const unsigned NFRAME = 1200; static const int ANGLE = 3; static const int octs = 4; static const double freq = (double)1/(double)120; static const double Y1 = 120; static const double U1 = 95; static const double V1 = 225; static const double Y2 = 240; static const double U2 = 120; static const double V2 = 150; //define const #define PI 3.14159265 __device__ double dirs[256][2]; //perlin table __device__ int perm[256] = { 151,160,137,91,90,15, 131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23, 190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33, 88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166, 77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244, 102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196, 135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123, 5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42, 223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9, 129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228, 251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107, 49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254, 138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180 }; //power function __device__ double power( double x, int y) { double ans = 1; for(int i=0; i < y; i++) { ans = ans * x; } return ans; } //absolute value function __device__ double Abs(double x) { if( x < 0 ) { return -x; } return x; } __device__ double Corners4(double x, double y, int perX, int perY, int c, int f) { int gridX = (int)x + c%2, gridY = (int)y + c/2; int hashed = perm[ (perm[ (gridX%perX)%256 ] + gridY%perY)%256]; double grad = (x-gridX) * dirs[(hashed + ANGLE*f) % 256][0] + (y-gridY) * dirs[(hashed + ANGLE*f) % 256][1]; double distX = dblAbs((double)x-gridX), distY = dblAbs((double)y-gridY); double polyX = 1 - 6*power(distX, 5) + 15*power(distX, 4) - 10*power(distX, 3); double polyY = 1 - 6*power(distY, 5) + 15*power(distY, 4) - 10*power(distY, 3); return polyX * polyY * grad; } __device__ double perlin(double x, double y, int perX, int perY, int f) { double ans = 0;//initial for(int i = 0; i < 4; i++) { ans += Corners4(x, y, perX, perY, i, f); } return ans; } __global__ void pixelRate(int f, double *douimgptr) { int perX = (int)((double)W*freq), perY = (int)((double)H*freq); int idx = blockIdx.x * blockDim.x + threadIdx.x; int xint = idx%W, yint = idx/W; double x = xint*freq, y = yint*freq; double ans = 0; for(int i = 0;i < octs; i++) { ans += power(0.5, i) * perlin(x*power(2, i), y*power(2, i), perX*power(2, i), perY*power(2, i), f); } douimgptr[yint*W + xint] = (1/2.0)*ans + (1/2.0); //0-1 } __global__ void initdirs() { int idx = blockIdx.x * blockDim.x + threadIdx.x; dirs[idx][0] = cos((idx * 2.0 * PI)/256.0); dirs[idx][1] = sin((idx * 2.0 * PI)/256.0); } __global__ void linearInter(double *douimgptr, double C0, double C1, uint8_t *intimgptr, int r) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int xint = idx%W, yint = idx/W; if(yint%r == 0 and xint%r == 0) { int th = (yint/r)*(W/r) + (xint/r); intimgptr[th] = (uint8_t)(C0 + (C1-C0) * douimgptr[idx]); } } __global__ void NColor(double *douimgptr, double *colorListGPU, uint8_t *intimgptr, int r, int cstart, int cN) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int xint = idx%W, yint = idx/W; if(yint%r == 0 && xint%r == 0) { int th = (yint/r)*(W/r) + (xint/r); intimgptr[th] = (uint8_t) (colorListGPU[ cstart*cN + (int)(floor(douimgptr[idx] * cN)) ]); } } struct Lab2VideoGenerator::Impl { int t = 1; int f = 1; }; Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) { initdirs<<<1, 256>>>(); } Lab2VideoGenerator::~Lab2VideoGenerator() { } void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) { info.w = W; info.h = H; info.n_frame = NFRAME; info.fps_n = 120; info.fps_d = 1; }; void Lab2VideoGenerator::Generate(uint8_t *yuv) { double *douimgptr; cudaMalloc((void **) &douimgptr, H*W*sizeof(double)); pixelRate<<<((H*W)/32)+1, 32>>>((impl->f), douimgptr); cudaDeviceSynchronize(); uint8_t *intimgptr; cudaMalloc((void **) &intimgptr, H*W*sizeof(uint8_t)); linearInter<<<((H*W)/32)+1, 32>>>(douimgptr, Y1, Y2, intimgptr, 1); cudaMemcpy(yuv, intimgptr, H*W, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); linearInter<<<((H*W)/32)+1, 32>>>(douimgptr, U1, U2, intimgptr, 2); cudaMemcpy(yuv+(H*W), intimgptr, H*W/4, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); linearInter<<<((H*W)/32)+1, 32>>>(douimgptr, V1, V2, intimgptr, 2); cudaMemcpy(yuv+(H*W)+(H*W)/4, intimgptr, H*W/4, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); ++(impl->t); ++(impl->f); }
75648c55dc58944efee5b5e67ef4722333a30b79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> hipError_t addWithCuda(float *Picture, int m, int n); __global__ void PictureKernel(float *d_Pin, float *d_Pout, int m, int n) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if((Row < m) && (Col < n)) { d_Pout[Row * n + Col] = 2 * d_Pin[Row * n + Col]; } } int main() { // create a picture int i, j; float picture[9][12]; for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { picture[i][j] = i * 12 + j; } } // mapping picture data into 1D array float *picture_1D; picture_1D = (float *)malloc(9*12*sizeof(float)); for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { picture_1D[i*12 + j] = picture[i][j]; } } // before calling picturekernel for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { printf("%1.0f ", picture_1D[i*12 + j]); } printf("\n"); } // call the addwithcuda function. hipError_t cudaStatus = addWithCuda(picture_1D, 9, 12); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("\n*******============================================================*********\n"); // after calling picturekernel for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { printf("%1.0f ", picture_1D[i*12 + j]); } printf("\n"); } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(float *Picture, int m, int n) { float *d_Pin, *d_Pout; int Psize = m * n * sizeof(float); hipMalloc((void**)&d_Pin, Psize); hipMemcpy(d_Pin, Picture, Psize, hipMemcpyHostToDevice); hipMalloc((void**)&d_Pout, Psize); hipError_t cudaStatus; dim3 threadsPerBlocks(16, 16, 1); dim3 blocksPerGird(ceil(n/16.0), ceil(m/16.0), 1); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( PictureKernel), dim3(blocksPerGird), dim3(threadsPerBlocks), 0, 0, d_Pin, d_Pout, m, n); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(Picture, d_Pout, Psize, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(d_Pin); hipFree(d_Pout); return cudaStatus; }
75648c55dc58944efee5b5e67ef4722333a30b79.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> cudaError_t addWithCuda(float *Picture, int m, int n); __global__ void PictureKernel(float *d_Pin, float *d_Pout, int m, int n) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if((Row < m) && (Col < n)) { d_Pout[Row * n + Col] = 2 * d_Pin[Row * n + Col]; } } int main() { // create a picture int i, j; float picture[9][12]; for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { picture[i][j] = i * 12 + j; } } // mapping picture data into 1D array float *picture_1D; picture_1D = (float *)malloc(9*12*sizeof(float)); for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { picture_1D[i*12 + j] = picture[i][j]; } } // before calling picturekernel for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { printf("%1.0f ", picture_1D[i*12 + j]); } printf("\n"); } // call the addwithcuda function. cudaError_t cudaStatus = addWithCuda(picture_1D, 9, 12); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("\n*******============================================================*********\n"); // after calling picturekernel for(i = 0; i < 9; i++) { for(j = 0; j < 12; j++) { printf("%1.0f ", picture_1D[i*12 + j]); } printf("\n"); } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(float *Picture, int m, int n) { float *d_Pin, *d_Pout; int Psize = m * n * sizeof(float); cudaMalloc((void**)&d_Pin, Psize); cudaMemcpy(d_Pin, Picture, Psize, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_Pout, Psize); cudaError_t cudaStatus; dim3 threadsPerBlocks(16, 16, 1); dim3 blocksPerGird(ceil(n/16.0), ceil(m/16.0), 1); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Launch a kernel on the GPU with one thread for each element. PictureKernel<<<blocksPerGird, threadsPerBlocks>>>(d_Pin, d_Pout, m, n); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(Picture, d_Pout, Psize, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(d_Pin); cudaFree(d_Pout); return cudaStatus; }
9ae270a00a2592352eb0cbd25e28ae889cf722b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* TODO get tree and leaf array sizes to cut down on GPU memory*/ #include <stdio.h> #include <string.h> #include <omp.h> extern "C" { #include "tree.h" #include "voxel.h" #include "traversal.h" } #include "gpu_traversal.h" //__device__ __constant__ node const_tree[4217]; static float *values; static short *lindex; hipError_t copy_const(node *tree, hipStream_t stream); hipError_t allocate_texture(short *volume); void gpu_errchk(const char *file, int line, hipError_t cuErr) { if(cuErr != hipSuccess) printf("EECUDA| Error at %s, %d: %s\n", file, line, hipGetErrorString(cuErr)); } void copyTreeToGPU(node *tree, node **gpu_tree, leaf *leaves, leaf **gpu_leaves, int treesize, int leafsize) { leaf leafptr; //float *values; //short *index; //printf("2.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) gpu_tree, treesize * sizeof(node))); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) gpu_leaves, sizeof(leaf))); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) &values, leafsize * sizeof(float))); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) &lindex, leafsize * sizeof(short))); leafptr.value = values; leafptr.index = lindex; gpu_errchk(__FILE__, __LINE__, hipMemcpy(*gpu_leaves, &leafptr, sizeof(leaf), hipMemcpyHostToDevice)); gpu_errchk(__FILE__, __LINE__, hipMemcpy(values, leaves->value, leafsize * sizeof(float), hipMemcpyHostToDevice)); gpu_errchk(__FILE__, __LINE__, hipMemcpy(lindex, leaves->index, leafsize * sizeof(short), hipMemcpyHostToDevice)); gpu_errchk(__FILE__, __LINE__, hipMemcpy(*gpu_tree, tree, treesize * sizeof(node), hipMemcpyHostToDevice)); //printf("3.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); } void copyTreeToGPU2(node *tree, node **gpu_tree, leaf *leaves, leaf **gpu_leaves, int treesize, int leafsize, hipStream_t *streams) { leaf leafptr; //float *values; //short *index; //printf("2.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); /*gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) gpu_tree, treesize * sizeof(node))); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) gpu_leaves, sizeof(leaf))); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) &values, leafsize * sizeof(float))); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) &index, leafsize * sizeof(short)));*/ leafptr.value = values; leafptr.index = lindex; gpu_errchk(__FILE__, __LINE__, hipMemcpyAsync(*gpu_leaves, &leafptr, sizeof(leaf), hipMemcpyHostToDevice, streams[2])); gpu_errchk(__FILE__, __LINE__, hipMemcpyAsync(values, leaves->value, leafsize * sizeof(float), hipMemcpyHostToDevice, streams[3])); gpu_errchk(__FILE__, __LINE__, hipMemcpyAsync(lindex, leaves->index, leafsize * sizeof(short), hipMemcpyHostToDevice, streams[2])); gpu_errchk(__FILE__, __LINE__, hipMemcpyAsync(*gpu_tree, tree, treesize * sizeof(node), hipMemcpyHostToDevice, streams[3])); //printf("3.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); } int main(int argc, char **argv) { short *volume = NULL, *results = NULL; char **classes= NULL; node *root = NULL; leaf *leaves = NULL; node *gpu_root = NULL; leaf *gpu_leaves = NULL; int classCount; int x, y, z; int treesize = 4217, leafsize = 16384; short *gpu_volume = NULL, *gpu_results = NULL; double starttime, endtime; volume = parseVolume(&x, &y, &z); results = allocate_volume(x, y, z); hipDeviceSetCacheConfig(hipFuncCachePreferL1); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) &gpu_volume, x*y*z*sizeof(short))); gpu_errchk(__FILE__, __LINE__, hipMalloc((void**) &gpu_results, x*y*z*sizeof(short))); gpu_errchk(__FILE__, __LINE__, hipMemcpy(gpu_volume, volume, x*y*z*sizeof(short), hipMemcpyHostToDevice)); printf("DIMS: %d %d %d\n", x, y, z); root = parseXMLTree(TREEFILE, &classes, &classCount, &leaves); //hipMemcpyToSymbol(const_tree, root, 4217 * //sizeof(node))); // hipMemcpyHostToDevice)); //copyTreeToGPU(root, &gpu_root, leaves, &gpu_leaves, treesize, leafsize); //starttime = omp_get_wtime(); //printf("CPU run disabled\n"); //traverse(root, leaves, volume, results, x, y, z); //endtime = omp_get_wtime(); //printf("Single traversal time, CPU: %f.\n", endtime-starttime); /* TODO texture copy */ //gpu_errchk(__FILE__, __LINE__, allocate_texture(volume)); dim3 params; params.x = 1; params.y = 512; params.z = 525; dim3 params2; params2.x = 512; hipStream_t streams[4]; printf("%d ||||||||\n", sizeof(node)); for(int repeat = 0; repeat < 4; repeat++) hipStreamCreate(&streams[repeat]); gpu_errchk(__FILE__, __LINE__, copy_const(root, streams[0])); starttime = omp_get_wtime(); //gpu_traverse<<<1,1>>>(gpu_root, gpu_leaves, gpu_volume, gpu_results, x, y, z); hipLaunchKernelGGL(( gpu_traverse), dim3(params),dim3(params2),0,streams[0], gpu_root, gpu_leaves, gpu_volume, gpu_results, x, y, z); for(int repeat = 0; repeat < 3; repeat++) { //copyTreeToGPU2(root, &gpu_root, leaves, &gpu_leaves, treesize, leafsize, streams); hipDeviceSynchronize(); gpu_errchk(__FILE__, __LINE__, copy_const(root, streams[0])); hipLaunchKernelGGL(( gpu_traverse), dim3(params),dim3(params2),0,streams[0], gpu_root, gpu_leaves, gpu_volume, gpu_results, x, y, z); gpu_errchk(__FILE__, __LINE__, hipMemcpyAsync(results, gpu_results, x*y*z*sizeof(short), hipMemcpyDeviceToHost, streams[1])); } hipDeviceSynchronize(); endtime = omp_get_wtime(); for(int repeat = 0; repeat < 4; repeat++) hipStreamDestroy(streams[repeat]); /*Copy results back to host*/ gpu_errchk(__FILE__, __LINE__, hipMemcpy(results, gpu_results, x*y*z*sizeof(short), hipMemcpyDeviceToHost)); printf("Single traversal time, GPU: %f.\n", endtime-starttime); if(argc == 3) { if(!(strcmp(argv[1], "-csv"))) { printf("Printing results to %s.\n", argv[2]); toCSV(results, x, y, z, argv[2]); } } gpu_errchk(__FILE__, __LINE__, hipFree(gpu_volume)); gpu_errchk(__FILE__, __LINE__, hipFree(gpu_results)); freeVolume(volume, y, z); freeVolume(results, y, z); classes = freeClasses(classes, classCount); free(root); free(leaves); return 0; }
9ae270a00a2592352eb0cbd25e28ae889cf722b3.cu
/* TODO get tree and leaf array sizes to cut down on GPU memory*/ #include <stdio.h> #include <string.h> #include <omp.h> extern "C" { #include "tree.h" #include "voxel.h" #include "traversal.h" } #include "gpu_traversal.h" //__device__ __constant__ node const_tree[4217]; static float *values; static short *lindex; cudaError_t copy_const(node *tree, cudaStream_t stream); cudaError_t allocate_texture(short *volume); void gpu_errchk(const char *file, int line, cudaError cuErr) { if(cuErr != cudaSuccess) printf("EECUDA| Error at %s, %d: %s\n", file, line, cudaGetErrorString(cuErr)); } void copyTreeToGPU(node *tree, node **gpu_tree, leaf *leaves, leaf **gpu_leaves, int treesize, int leafsize) { leaf leafptr; //float *values; //short *index; //printf("2.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) gpu_tree, treesize * sizeof(node))); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) gpu_leaves, sizeof(leaf))); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) &values, leafsize * sizeof(float))); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) &lindex, leafsize * sizeof(short))); leafptr.value = values; leafptr.index = lindex; gpu_errchk(__FILE__, __LINE__, cudaMemcpy(*gpu_leaves, &leafptr, sizeof(leaf), cudaMemcpyHostToDevice)); gpu_errchk(__FILE__, __LINE__, cudaMemcpy(values, leaves->value, leafsize * sizeof(float), cudaMemcpyHostToDevice)); gpu_errchk(__FILE__, __LINE__, cudaMemcpy(lindex, leaves->index, leafsize * sizeof(short), cudaMemcpyHostToDevice)); gpu_errchk(__FILE__, __LINE__, cudaMemcpy(*gpu_tree, tree, treesize * sizeof(node), cudaMemcpyHostToDevice)); //printf("3.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); } void copyTreeToGPU2(node *tree, node **gpu_tree, leaf *leaves, leaf **gpu_leaves, int treesize, int leafsize, cudaStream_t *streams) { leaf leafptr; //float *values; //short *index; //printf("2.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); /*gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) gpu_tree, treesize * sizeof(node))); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) gpu_leaves, sizeof(leaf))); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) &values, leafsize * sizeof(float))); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) &index, leafsize * sizeof(short)));*/ leafptr.value = values; leafptr.index = lindex; gpu_errchk(__FILE__, __LINE__, cudaMemcpyAsync(*gpu_leaves, &leafptr, sizeof(leaf), cudaMemcpyHostToDevice, streams[2])); gpu_errchk(__FILE__, __LINE__, cudaMemcpyAsync(values, leaves->value, leafsize * sizeof(float), cudaMemcpyHostToDevice, streams[3])); gpu_errchk(__FILE__, __LINE__, cudaMemcpyAsync(lindex, leaves->index, leafsize * sizeof(short), cudaMemcpyHostToDevice, streams[2])); gpu_errchk(__FILE__, __LINE__, cudaMemcpyAsync(*gpu_tree, tree, treesize * sizeof(node), cudaMemcpyHostToDevice, streams[3])); //printf("3.Host->Device:\n %p->%p\tTree\n%p->%p\tLeaf\n", tree, gpu_tree, leaves, // gpu_leaves); } int main(int argc, char **argv) { short *volume = NULL, *results = NULL; char **classes= NULL; node *root = NULL; leaf *leaves = NULL; node *gpu_root = NULL; leaf *gpu_leaves = NULL; int classCount; int x, y, z; int treesize = 4217, leafsize = 16384; short *gpu_volume = NULL, *gpu_results = NULL; double starttime, endtime; volume = parseVolume(&x, &y, &z); results = allocate_volume(x, y, z); cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) &gpu_volume, x*y*z*sizeof(short))); gpu_errchk(__FILE__, __LINE__, cudaMalloc((void**) &gpu_results, x*y*z*sizeof(short))); gpu_errchk(__FILE__, __LINE__, cudaMemcpy(gpu_volume, volume, x*y*z*sizeof(short), cudaMemcpyHostToDevice)); printf("DIMS: %d %d %d\n", x, y, z); root = parseXMLTree(TREEFILE, &classes, &classCount, &leaves); //cudaMemcpyToSymbol(const_tree, root, 4217 * //sizeof(node))); // cudaMemcpyHostToDevice)); //copyTreeToGPU(root, &gpu_root, leaves, &gpu_leaves, treesize, leafsize); //starttime = omp_get_wtime(); //printf("CPU run disabled\n"); //traverse(root, leaves, volume, results, x, y, z); //endtime = omp_get_wtime(); //printf("Single traversal time, CPU: %f.\n", endtime-starttime); /* TODO texture copy */ //gpu_errchk(__FILE__, __LINE__, allocate_texture(volume)); dim3 params; params.x = 1; params.y = 512; params.z = 525; dim3 params2; params2.x = 512; cudaStream_t streams[4]; printf("%d ||||||||\n", sizeof(node)); for(int repeat = 0; repeat < 4; repeat++) cudaStreamCreate(&streams[repeat]); gpu_errchk(__FILE__, __LINE__, copy_const(root, streams[0])); starttime = omp_get_wtime(); //gpu_traverse<<<1,1>>>(gpu_root, gpu_leaves, gpu_volume, gpu_results, x, y, z); gpu_traverse<<<params,params2,0,streams[0]>>>(gpu_root, gpu_leaves, gpu_volume, gpu_results, x, y, z); for(int repeat = 0; repeat < 3; repeat++) { //copyTreeToGPU2(root, &gpu_root, leaves, &gpu_leaves, treesize, leafsize, streams); cudaDeviceSynchronize(); gpu_errchk(__FILE__, __LINE__, copy_const(root, streams[0])); gpu_traverse<<<params,params2,0,streams[0]>>>(gpu_root, gpu_leaves, gpu_volume, gpu_results, x, y, z); gpu_errchk(__FILE__, __LINE__, cudaMemcpyAsync(results, gpu_results, x*y*z*sizeof(short), cudaMemcpyDeviceToHost, streams[1])); } cudaDeviceSynchronize(); endtime = omp_get_wtime(); for(int repeat = 0; repeat < 4; repeat++) cudaStreamDestroy(streams[repeat]); /*Copy results back to host*/ gpu_errchk(__FILE__, __LINE__, cudaMemcpy(results, gpu_results, x*y*z*sizeof(short), cudaMemcpyDeviceToHost)); printf("Single traversal time, GPU: %f.\n", endtime-starttime); if(argc == 3) { if(!(strcmp(argv[1], "-csv"))) { printf("Printing results to %s.\n", argv[2]); toCSV(results, x, y, z, argv[2]); } } gpu_errchk(__FILE__, __LINE__, cudaFree(gpu_volume)); gpu_errchk(__FILE__, __LINE__, cudaFree(gpu_results)); freeVolume(volume, y, z); freeVolume(results, y, z); classes = freeClasses(classes, classCount); free(root); free(leaves); return 0; }
7122a4c38a74d9f5d2c68babc672d2781125ae59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*0]; t2 = input[__iter_3__+M*1]; } // Rest of the computation for (int __iter_1__ = 1; __iter_1__ < N-1; __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = b2; float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = t2; float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = b3; float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = t3; float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = b4; float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = t4; float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = b5; float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = t5; float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __block_0___kernel___forma_kernel__0__ = 64; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); } /*Host Free End*/
7122a4c38a74d9f5d2c68babc672d2781125ae59.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*0]; t2 = input[__iter_3__+M*1]; } // Rest of the computation for (int __iter_1__ = 1; __iter_1__ < N-1; __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = b2; float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = t2; float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = b3; float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = t3; float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = b4; float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = t4; float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = b5; float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = t5; float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1; int __block_0___kernel___forma_kernel__0__ = 64; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = 1; dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); } /*Host Free End*/
422fb571b9ee49629330dca2aaddfe2d8d067090.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RefineDetPlugin.hpp" #include "MathFunction.hpp" namespace Shadow{ __global__ void applyConf_gpu(int batchSize, int _numPriorboxes, int _numClasses, float _objectness_score, const float *arm_conf, const float *odm_conf, float *conf, hipStream_t stream) { int priorboxesId = threadIdx.x + blockIdx.x * blockDim.x; if (priorboxesId < batchSize * _numPriorboxes) { if (arm_conf[2 * priorboxesId + 1] < _objectness_score) { for (int c = 0; c < _numClasses; ++c) { if (c != 0) conf[priorboxesId * _numClasses + c] = 0.0; else conf[priorboxesId * _numClasses + c] = 1.0; } } else { for (int c = 0; c < _numClasses; c++) conf[priorboxesId * _numClasses + c] = odm_conf[priorboxesId * _numClasses + c]; } } } __global__ void applyLoc_gpu(int batchSize, int _numPriorboxes, const float *arm_loc, const float *priorbox_loc, float *loc) { int box_id = threadIdx.x + blockIdx.x * blockDim.x; int beginAddress = box_id / _numPriorboxes * _numPriorboxes * 8; int box_id_image = box_id % _numPriorboxes; if (box_id < batchSize * _numPriorboxes) { // float xmin = priorbox_loc[beginAddress + box_id_image * 4], // ymin = priorbox_loc[beginAddress + box_id_image * 4 + 1], // xmax = priorbox_loc[beginAddress + box_id_image * 4 + 2], // ymax = priorbox_loc[beginAddress + box_id_image * 4 + 3]; // float var1 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4], // var2 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 1], // var3 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 2], // var4 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 3]; float xmin = priorbox_loc[box_id_image * 4], ymin = priorbox_loc[box_id_image * 4 + 1], xmax = priorbox_loc[box_id_image * 4 + 2], ymax = priorbox_loc[box_id_image * 4 + 3]; float var1 = priorbox_loc[(box_id_image + _numPriorboxes) * 4], var2 = priorbox_loc[(box_id_image + _numPriorboxes) * 4 + 1], var3 = priorbox_loc[(box_id_image + _numPriorboxes) * 4 + 2], var4 = priorbox_loc[(box_id_image + _numPriorboxes) * 4 + 3]; float bbox1 = arm_loc[box_id * 4], bbox2 = arm_loc[box_id * 4 + 1], bbox3 = arm_loc[box_id * 4 + 2], bbox4 = arm_loc[box_id * 4 + 3]; // if(xmin == priorbox_loc[1200] && ymin == priorbox_loc[1201] && xmax == priorbox_loc[1202] && ymax == priorbox_loc[1203]){ // printf("%d %d %d\n",box_id, _numPriorboxes, box_id_image); // } float prior_width = xmax - xmin, prior_height = ymax - ymin, prior_center_x = (xmax + xmin) / 2, prior_center_y = (ymax + ymin) / 2; float decode_bbox_center_x = var1 * bbox1 * prior_width + prior_center_x, decode_bbox_center_y = var2 * bbox2 * prior_height + prior_center_y, decode_bbox_width = exp(var3 * bbox3) * prior_width, decode_bbox_height = exp(var4 * bbox4) * prior_height; loc[beginAddress + box_id_image * 4] = decode_bbox_center_x - decode_bbox_width / 2; loc[beginAddress + box_id_image * 4 + 1] = decode_bbox_center_y - decode_bbox_height / 2; loc[beginAddress + box_id_image * 4 + 2] = decode_bbox_center_x + decode_bbox_width / 2; loc[beginAddress + box_id_image * 4 + 3] = decode_bbox_center_y + decode_bbox_height / 2; loc[beginAddress + (box_id_image + _numPriorboxes) * 4] = var1; loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 1] = var2; loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 2] = var3; loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 3] = var4; } } void applyConf(int batchSize, int _numPriorboxes, int _numClasses, float _objectness_score, const float *arm_conf, const float *odm_conf, float *conf, hipStream_t stream) { int block = GET_BLOCKS(batchSize * _numPriorboxes); int grid = (batchSize * _numPriorboxes + block - 1) / block; hipLaunchKernelGGL(( applyConf_gpu), dim3(grid), dim3(block), 0, 0, batchSize, _numPriorboxes, _numClasses, _objectness_score, arm_conf, odm_conf, conf, stream); } void applyLoc(int batchSize, int _numPriorboxes, const float *arm_loc, const float *priorbox_loc, float *loc) { int block = GET_BLOCKS(batchSize * _numPriorboxes); int grid = (batchSize * _numPriorboxes + block - 1) / block; hipLaunchKernelGGL(( applyLoc_gpu), dim3(grid), dim3(block), 0, 0, batchSize, _numPriorboxes, arm_loc, priorbox_loc, loc); } }
422fb571b9ee49629330dca2aaddfe2d8d067090.cu
#include "RefineDetPlugin.hpp" #include "MathFunction.hpp" namespace Shadow{ __global__ void applyConf_gpu(int batchSize, int _numPriorboxes, int _numClasses, float _objectness_score, const float *arm_conf, const float *odm_conf, float *conf, cudaStream_t stream) { int priorboxesId = threadIdx.x + blockIdx.x * blockDim.x; if (priorboxesId < batchSize * _numPriorboxes) { if (arm_conf[2 * priorboxesId + 1] < _objectness_score) { for (int c = 0; c < _numClasses; ++c) { if (c != 0) conf[priorboxesId * _numClasses + c] = 0.0; else conf[priorboxesId * _numClasses + c] = 1.0; } } else { for (int c = 0; c < _numClasses; c++) conf[priorboxesId * _numClasses + c] = odm_conf[priorboxesId * _numClasses + c]; } } } __global__ void applyLoc_gpu(int batchSize, int _numPriorboxes, const float *arm_loc, const float *priorbox_loc, float *loc) { int box_id = threadIdx.x + blockIdx.x * blockDim.x; int beginAddress = box_id / _numPriorboxes * _numPriorboxes * 8; int box_id_image = box_id % _numPriorboxes; if (box_id < batchSize * _numPriorboxes) { // float xmin = priorbox_loc[beginAddress + box_id_image * 4], // ymin = priorbox_loc[beginAddress + box_id_image * 4 + 1], // xmax = priorbox_loc[beginAddress + box_id_image * 4 + 2], // ymax = priorbox_loc[beginAddress + box_id_image * 4 + 3]; // float var1 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4], // var2 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 1], // var3 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 2], // var4 = priorbox_loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 3]; float xmin = priorbox_loc[box_id_image * 4], ymin = priorbox_loc[box_id_image * 4 + 1], xmax = priorbox_loc[box_id_image * 4 + 2], ymax = priorbox_loc[box_id_image * 4 + 3]; float var1 = priorbox_loc[(box_id_image + _numPriorboxes) * 4], var2 = priorbox_loc[(box_id_image + _numPriorboxes) * 4 + 1], var3 = priorbox_loc[(box_id_image + _numPriorboxes) * 4 + 2], var4 = priorbox_loc[(box_id_image + _numPriorboxes) * 4 + 3]; float bbox1 = arm_loc[box_id * 4], bbox2 = arm_loc[box_id * 4 + 1], bbox3 = arm_loc[box_id * 4 + 2], bbox4 = arm_loc[box_id * 4 + 3]; // if(xmin == priorbox_loc[1200] && ymin == priorbox_loc[1201] && xmax == priorbox_loc[1202] && ymax == priorbox_loc[1203]){ // printf("%d %d %d\n",box_id, _numPriorboxes, box_id_image); // } float prior_width = xmax - xmin, prior_height = ymax - ymin, prior_center_x = (xmax + xmin) / 2, prior_center_y = (ymax + ymin) / 2; float decode_bbox_center_x = var1 * bbox1 * prior_width + prior_center_x, decode_bbox_center_y = var2 * bbox2 * prior_height + prior_center_y, decode_bbox_width = exp(var3 * bbox3) * prior_width, decode_bbox_height = exp(var4 * bbox4) * prior_height; loc[beginAddress + box_id_image * 4] = decode_bbox_center_x - decode_bbox_width / 2; loc[beginAddress + box_id_image * 4 + 1] = decode_bbox_center_y - decode_bbox_height / 2; loc[beginAddress + box_id_image * 4 + 2] = decode_bbox_center_x + decode_bbox_width / 2; loc[beginAddress + box_id_image * 4 + 3] = decode_bbox_center_y + decode_bbox_height / 2; loc[beginAddress + (box_id_image + _numPriorboxes) * 4] = var1; loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 1] = var2; loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 2] = var3; loc[beginAddress + (box_id_image + _numPriorboxes) * 4 + 3] = var4; } } void applyConf(int batchSize, int _numPriorboxes, int _numClasses, float _objectness_score, const float *arm_conf, const float *odm_conf, float *conf, cudaStream_t stream) { int block = GET_BLOCKS(batchSize * _numPriorboxes); int grid = (batchSize * _numPriorboxes + block - 1) / block; applyConf_gpu<<<grid, block>>>(batchSize, _numPriorboxes, _numClasses, _objectness_score, arm_conf, odm_conf, conf, stream); } void applyLoc(int batchSize, int _numPriorboxes, const float *arm_loc, const float *priorbox_loc, float *loc) { int block = GET_BLOCKS(batchSize * _numPriorboxes); int grid = (batchSize * _numPriorboxes + block - 1) / block; applyLoc_gpu<<<grid, block>>>(batchSize, _numPriorboxes, arm_loc, priorbox_loc, loc); } }
a10a7e55c874a78b3608a0cd2101bebf50a9701c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <helper_cuda.h> #include <iostream> #include <set> #include "../../cudaconv3/include/cudaconv2.cuh" #include "../../util/include/matrix.h" #include "../include/layer_kernels.cuh" #include "../include/layer.cuh" #include "../include/data.cuh" #include "../include/util.cuh" #include "../include/weights.cuh" using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : _convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _numOutputs = pyDictGetInt(paramsDict, "outputs"); _numReplicas = pyDictGetInt(paramsDict, "numReplicas"); _numReplicasPrev = 1; _rcvdBInputMsgs = 0; _actBroadcaster = NULL; _gradReducer = NULL; _initialized = false; } Layer::~Layer() { if (_actBroadcaster != NULL) { _actBroadcaster->stop(); delete _actBroadcaster; } if (_gradReducer != NULL) { _gradReducer->stop(); delete _gradReducer; } // For now, gradReducer doesn't have a destructor // delete _gradReducer; for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } } hipStream_t Layer::getStream() { assert(getDeviceID() >= 0); return NVMatrix::getDefaultStream(getDeviceID()); } void Layer::syncStream() { NVMatrix::syncStream(getStream()); } void Layer::fpropNext(PASS_TYPE passType, int passIdx) { if (_next.size() > 0) { if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) { syncStream(); // Make sure I've finished computing before broadcasting } getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue)); } if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) { _broadcastFinishQueue.dequeue(); assert(_broadcastFinishQueue.getNumElements() == 0); } } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } bool Layer::fprop(PASS_TYPE passType, int passIdx) { _rcvdFInputMsgs++; // I require messages from *all* input replicas because it makes the propagation easier to think about. // Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation // might not actually be finished yet. if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) { // printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID()); int ridx = getFwdActiveInputReplicaIdx(passIdx); assert(getDeviceID() == NVMatrix::getDeviceID()); map<int, NVMatrix*> v; if (ridx >= 0) { for (int i = 0; i < getNumLayersPrev(); i++) { v[i] = &_prev[ridx][i]->getActs(getDeviceID()); } } fprop(v, passType, passIdx); return true; } return false; } void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) { if (getFwdActiveInputReplicaIdx(passIdx) >= 0) { assert(v.size() == getNumLayersPrev()); _inputs.clear(); _inputs.insert(v.begin(), v.end()); int numCases = _inputs[0]->getLeadingDim(); for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemory(numCases); } if (numCases > 0) { //printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases); _rcvdFInputMsgs = getNumExpectedFwdMsgs(); for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) { it->second->transpose(_trans); } getActs().transpose(_trans); fpropCommon(passType); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType, passIdx); } // Then add the rest of the inputs to that for (int i = 0; i < getNumLayersPrev(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx); } } } } fpropNext(passType, passIdx); } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_actsGradTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } if (_actsTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } } int Layer::getNumGradProducersNext() { return _numGradProducersNext; } int Layer::getNumExpectedBwdMsgs() { return _numGradProducersNext * getNumSiblingReplicas(); } int Layer::getNumExpectedFwdMsgs() { return getNumLayersPrev() * getNumInputReplicas(); } void Layer::bprop(PASS_TYPE passType, int passIdx) { if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) { // printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID()); if (_gradReducer != NULL) { _gradReducer->waitForFinish(); } // This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages bprop(getActsGrad(), passType, passIdx); if (_bwdTerminal[passIdx]) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL)); } } } void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) { Layer& prev = *_prev[replicaIdx][inputIdx]; if (prev.isGradConsumer() && isGradProducer(prev.getName())) { if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0 bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType); } prev.getNumComputedActsGrads(getDeviceID())++; // Synchronize if the previous layer is going to actually do a reduction. // If the previous layer is on the same GPU as us and has no next layers // on other GPUs then it won't need to do a reduction. if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) { syncStream(); } prev.getGradReducer().enqueueReduction(getDeviceID()); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { v.transpose(_trans); assert(getDeviceID() == NVMatrix::getDeviceID()); int ridx = getBwdActiveInputReplicaIdx(passIdx); LayerV& prev = _prev[ridx]; map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx]; for (int i = 0; i < prev.size(); i++) { _inputs[i]->transpose(_trans); prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); // NOTE: this should be here (before the bpropActs) because if you have a layer // that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite // v which is used in bpropCommon. So bpropCommon must come first. bpropCommon(v, ridx, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) { const set<Layer*>& deviceLayers = it->second; for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) { if (_actsGradTarget != (*it2)->getInputIdx(_name)) { bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name)); } } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0) { bpropActsCall(v, passType, ridx, _actsGradTarget); } } // Synchronization is necessary because the kernel calls that compute my backward acts // execute asynchronously. Therefore I don't want to tell other threads that I've // computed bprop activities for them when in fact I've only called a function which // will eventually compute them. if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) { syncStream(); } if (getConvNet().isConserveMemory()) { truncBwdActs(); } if (isGradProducer()) { /*for (int i = 0; i < prev.size(); i++) { if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) { prev[i]->getGradReducer().enqueueReduction(getDeviceID()); } }*/ // Send backward messages to *all* replicas. // Note that the messages will be dismissed unless the passIdx indicates // that the previous layer should do some work. for (int r = 0; r < getNumInputReplicas(); r++) { for (int i = 0; i < _prev[r].size(); i++) { if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) { _prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx)); } } } } } IActGradReducer& Layer::getGradReducer() { return *_gradReducer; } // This is called between minibatches void Layer::reset() { _rcvdFInputMsgs = 0; _rcvdBInputMsgs = 0; for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) { it->second = 0; } } // This is called between microbatches void Layer::resetPassIdx() { _rcvdFInputMsgs = 0; if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) { reset(); } } /* * Returns number of cases in given matrix. */ int Layer::getNumCases(NVMatrix& v) { return v.getLeadingDim(); } int Layer::incRcvdBInputMsgs() { return ++_rcvdBInputMsgs; } std::string& Layer::getName() { return _name; } std::string& Layer::getType() { return _type; } int& Layer::getNumComputedActsGrads(int deviceID) { return _numComputedActsGrads[deviceID]; } void Layer::addNext(Layer& l) { _next.push_back(&l); _numReplicasNext = l.getNumReplicas(); if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_nextDeviceIDs.size() + 1); _nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addPrev(Layer& l, int replicaIdx) { _prev[replicaIdx].push_back(&l); _numReplicasPrev = l.getNumReplicas(); l.setInputIdx(getName(), _prev[replicaIdx].size() - 1); if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_prevDeviceIDs.size() + 1); _prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addReplica(Layer& l) { assert(_replicas.count(l.getReplicaID()) == 0); _replicas[l.getReplicaID()] = &l; } bool Layer::hasGradProducerNext(std::string& layerName) { bool b = _next.size() == 0; for (int i = 0; i < _next.size(); i++) { b |= _next[i]->hasGradProducerNext(_name); } return b && isGradProducer(layerName); } bool Layer::postInit() { // We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop(). // In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating // it from _prev->getActs() // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); if (!_initialized) { _initialized = true; map<int,int> numGradProducersNext; _numGradProducersNext = 0; for (int r = 0; r < getNumInputReplicas(); ++r) { for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) { (*it)->postInit(); } } _memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name); // _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0] _memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name); for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); _numComputedActsGrads[d] = 0; if (_next[i]->hasGradProducerNext(_name)) { if (numGradProducersNext.count(d) == 0) { numGradProducersNext[d] = 0; } numGradProducersNext[d]++; _numGradProducersNext++; if (_memSrcActsGrad.count(d) == 0) { _memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_memSrcActs.count(d) == 0) { _memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_next.size() == 0) { _numReplicasNext = getNumReplicas(); } /* * Initialize forward broadcaster. First sibling owns it. */ if (getReplicaIdx() == 0 && _convNetThread != NULL) { _actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID())); _actBroadcaster->start(); } /* * Initialize backward reducer. */ if (isGradConsumer() && _numGradProducersNext > 0) { _gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext); _gradReducer->start(); } /* * Initialize specially sorted previous array */ for (int r = 0; r < _prev.size(); ++r) { for (int i = 0; i < _prev[r].size(); ++i) { // Previous devices in reverse order of processing by (sequential) GradReducer _prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID() + 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]); } } return true; } return false; } ActBroadcaster& Layer::getActBroadcaster() { return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers && _prev.size() > 0) { for (int i = 0; i < _prev[0].size(); i++) { _gradConsumer |= _prev[0][i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } bool Layer::isGradProducer(std::string& layerName) { return isGradProducer(); } map<int,vector<Layer*> >& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { return getActs(getDeviceID()); } NVMatrix& Layer::getActs(int deviceID) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(); } NVMatrix& Layer::getActs(int deviceID, int numCases) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(numCases); } NVMatrix& Layer::getActsGrad(int deviceID) { assert(_memSrcActsGrad.count(deviceID) > 0); return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim()); } NVMatrix& Layer::getActsGrad() { return getActsGrad(NVMatrix::getDeviceID()); } map<int, NVMatrix*> Layer::getAllActs() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } map<int, NVMatrix*> Layer::getAllActsGrads() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } int Layer::getDeviceID() { return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID(); } ConvNetThread& Layer::getConvNetThread() { assert(_convNetThread != NULL); return *_convNetThread; } ConvNet& Layer::getConvNet() { return getConvNetThread().getConvNet(); } void Layer::setBwdTerminal(int passIdx) { _bwdTerminal[passIdx] = true; } int Layer::getReplicaID() { return _replicaID; } int Layer::getActivePassPeriod() { return getNumReplicas() / getConvNet().getNumReplicasMin(); } int Layer::getFwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return passIdx % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getBwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getFwdActiveReplicaIdx(int passIdx) { assert(_next.size() > 0); return _next[0]->getFwdActiveInputReplicaIdx(passIdx); } int Layer::getNumReplicas() { return _replicas.size(); } int Layer::getNumSiblingReplicas() { return getNumReplicas() / getNumReplicasNext(); } int Layer::getNumReplicasPrev() { return _numReplicasPrev; } int Layer::getNumReplicasNext() { return _numReplicasNext; } int Layer::getNumInputReplicas() { return _numReplicasPrev / getNumReplicas(); } int Layer::getReplicaIdx() { return getReplicaID() % getNumSiblingReplicas(); } int Layer::getNumLayersPrev() { return _prev.size() > 0 ? _prev[0].size() : 0; } void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) { assert(_memSrcActs[deviceID]->isParent()); delete _memSrcActs[deviceID]; _memSrcActs[deviceID] = &mem; if (_actsTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName())); } } void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) { assert(_memSrcActsGrad[deviceID]->isParent()); delete _memSrcActsGrad[deviceID]; _memSrcActsGrad[deviceID] = &mem; if (_actsGradTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName())); } } MemoryView& Layer::getMemorySourceActs(int deviceID) { return *_memSrcActs[deviceID]; } MemoryView& Layer::getMemorySourceActsGrad(int deviceID) { return *_memSrcActsGrad[deviceID]; } int Layer::getNumOutputs() { return _numOutputs; } void Layer::setInputIdx(std::string& parentName, int idx) { _inputIndices[parentName] = idx; } int Layer::getInputIdx(std::string& parentName) { return _inputIndices[parentName]; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true) { PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron"); _neuronType = pyDictGetString(neuronDict, "type"); _neuron = &Neuron::makeNeuron(neuronDict); } NeuronLayer::~NeuronLayer() { delete _neuron; } void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) { _neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0); } } bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // Special optimization for cross-entropy objective with logistic units. // Better to just compute the input gradient in one go to avoid division by small numbers. bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1 && (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce") && _next[0]->getDeviceID() == getDeviceID() && _next[0]->getNumReplicas() == getNumReplicas(); LayerV& prev = _prev[replicaIdx]; if (doCrossEntGrad) { NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]); float gradCoeff = cost.getCoeff(); labels.transpose(_trans); if (cost.getPosWeight() == 1) { if (scaleTargets == 0) { getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } else { if (scaleTargets == 0) { getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } } return doCrossEntGrad; } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _neuron->activate(*_inputs[0], getActs()); } std::string& NeuronLayer::getNeuronType() { return _neuronType; } /* * ======================= * WeightLayer * ======================= * * The useGrad parameter here merely expresses a preference by the subclass. It may * be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes. * So when computing gradient updates, the subclass must always first check weights.isUseGrad(). * * Note: biases always useGrad. */ WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) : Layer(convNetThread, paramsDict, replicaID, trans) { _weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod"); MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW"); PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed"); /* * When there are multiple replicas, the present implementation * requires that useGrad is true. This is because weights.update() * performs a simultaneous write to both replicas' weightsInc matrix, * which means that the read should come from somewhere else (i.e. a * grads matrix). */ useGrad |= _numReplicas > 1; // Source layers for shared weights stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); _weights = new WeightList(); for (int i = 0; i < weightSourceLayers.size(); i++) { std::string& srcLayerName = weightSourceLayers[i]; int matrixIdx = weightSourceMatrixIndices[i]; PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i); ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule if (srcLayerName == _name) { // Current layer _weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this)); } else if (srcLayerName != "") { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights->addWeights(*new Weights(*srcWeights, lrs, *this)); } else { _weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true); delete &weightSourceLayers; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &wc; delete &wball; _wStep = 0.02; _bStep = 0.005; } WeightLayer::~WeightLayer() { delete _weights; delete _biases; } bool WeightLayer::postInit() { if (Layer::postInit()) { _weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod()); assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0); return true; } return false; } void WeightLayer::fpropCommon(PASS_TYPE passType) { } void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) { if (_biases->getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropBiases(v, passType); } else { _biases->getGrad().resize(_biases->getW()); _biases->getGrad().scale(getBIncScale()); } _biases->incNumUpdates(); } for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropWeights(v, replicaIdx, i, passType); } else { _weights->at(i).getGrad().resize(_weights->at(i).getW()); // This will cause it to forget momentum when shown 0 training cases // and _useGrad = false but it's not too important. _weights->at(i).getGrad().scale(getIncScale(i, passType)); } // Increment its number of updates _weights->at(i).incNumUpdates(); } } } bool WeightLayer::updateWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _weights->update(getConvNet().getTrainingProgress()); _biases->update(getConvNet().getTrainingProgress()); // constrainWeights(); return true; } return false; } bool WeightLayer::constrainWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _constrainWeights(); return true; } return false; } void WeightLayer::_constrainWeights() { } void WeightLayer::copyToCPU() { _weights->copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights->copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradient() { for (int i = 0; i < _weights->getSize(); i++) { getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i)); } getConvNet().checkGradient(_name + " biases", _bStep, *_biases); } void WeightLayer::addReplica(Layer& l) { Layer::addReplica(l); _weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights); _biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights->at(idx); } float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) { // weight update period must be multiple of activation period // TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate. double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses())); if (_weights->at(inpIdx).isUseGrad()) { return passType == PASS_GC ? 1.0f : 1.0f / numCases; } return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases; } float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) { if (_weights->at(inpIdx).isUseGrad()) { return _weights->at(inpIdx).getNumUpdates() > 0; } return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0 : (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f)); } NVMatrix& WeightLayer::getGradTarget(int inpIdx) { return _weights->at(inpIdx).getGrad(); } float WeightLayer::getBGradScale(PASS_TYPE passType) { int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses()); return passType == PASS_GC ? 1.0f : 1.0f / numCases; } float WeightLayer::getBIncScale() { return _biases->getNumUpdates() > 0; } NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) { return _weights->at(inpIdx).getW(); } NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) { return _biases->getW(); } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) { _wStep = 0.01; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } } void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType)); } void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); float scaleGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } void FCLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; // Unfortunate extra weight matrix... _weights->at(i).getW().sumOfSquares(0, _norm2); // norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * SplitFCLayer * ======================= */ SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : FCLayer(convNetThread, paramsDict, replicaID, useGrad) { _numParts = pyDictGetInt(paramsDict, "parts"); } void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true); NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts); NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts); NVMatrixV& splitTarget = getActs().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } deleteElements(splitInput, true); deleteElements(splitWeights, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts); NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1); delete &weights_T; deleteElements(splitV, true); deleteElements(splitWeights_T, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts); NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType)); delete &prevActs_T; deleteElements(splitPrevActs_T, true); deleteElements(splitV, true); deleteElements(splitGradTarget, true); } /* * ======================= * TwoDLayerInterface * ======================= */ TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _imgPixels = _imgSize * _imgSize; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); } LocalLayer::~LocalLayer() { delete _padding; delete _stride; delete _filterSize; delete _channels; delete _imgSize; delete _groups; delete _filterChannels; delete _filterPixels; delete _imgPixels; } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, true) { _sumWidth = pyDictGetInt(paramsDict, "sumWidth"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); _weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin"); _weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax"); } ConvLayer::~ConvLayer() { delete _weightContrastNormMin; delete _weightContrastNormMax; } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(getBiasMatrix(passType)); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(getBiasMatrix(passType)); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { float scaleBGrad = getBGradScale(passType); float scaleInc = getBIncScale(); if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { assert(_weights->at(inpIdx).isUseGrad()); bool doPartialSum = _sumWidth < _modulesX; NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad(); float scaleWGrad = getGradScale(inpIdx, passType); float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum; convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad); if (doPartialSum) { scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0; int outWidth = DIVUP(_modulesX, _sumWidth); _weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); _weightGradTmp.truncate(); } void ConvLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { float fz = _weights->at(i).getW().getNumRows(); NVMatrix tmp; _weights->at(i).getW().sum(0, tmp); _weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad()); // Now _weights->at(i).getGrad() contains zero-mean filters _weights->at(i).getGrad().apply(NVMatrixOps::Square()); _weights->at(i).getGrad().sum(0, tmp); tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz)); // Now tmp has the stdev _weights->at(i).getW().eltwiseMultByVector(tmp); } // It's pretty silly to do both these things but whatever if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; _weights->at(i).getW().sumOfSquares(0, _norm2); // norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType)); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType)); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { float scaleWGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void LocalUnsharedLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall()); } } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& input = *_inputs[0]; input.max(1, _max); input.addVector(_max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); getActs().sum(1, _sum); getActs().eltwiseDivideByVector(_sum); } void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); LayerV& prev = _prev[replicaIdx]; if (_doUpperGrad) { // Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense for (int i = 0; i < _next.size(); ++i) { if (_next[i]->isGradProducer(getName())) { NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); break; } } } else { computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1); } } void SoftmaxLayer::setDoUpperGrad(bool b) { _doUpperGrad = b; } /* * ======================= * ConcatenationLayer * ======================= */ ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets"); _copyOffsets->push_back(_numOutputs); } ConcatenationLayer::~ConcatenationLayer() { delete _copyOffsets; } void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols()); _inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0); } void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view _prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1); delete &copySrc; } /* * ======================= * PassThroughLayer * ======================= */ PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // No-op } void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // No-op } bool PassThroughLayer::postInit() { if (Layer::postInit()) { assert(getNumInputReplicas() == 1); for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) { MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); _prev[0][i]->setMemorySourceActs(getDeviceID(), vActs); _prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad); } return true; } return false; } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } EltwiseSumLayer::~EltwiseSumLayer() { delete _coeffs; } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx)); } void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DropoutLayer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _enable = pyDictGetInt(paramsDict, "enable"); _keep = pyDictGetFloat(paramsDict, "keep"); } void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.apply(DropoutSmallerThanOperator(_keep)); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->copy(getActs()); } } void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1); } } void DropoutLayer::truncBwdActs() { Layer::truncBwdActs(); _keepMask.truncate(); } /* * ======================= * Dropout2Layer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) { } void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.smallerThanScalar(_keep); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->scale(_keep, getActs()); } } void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { if (scaleTargets != 0) { v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)), prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.scale(_keep, prev[inpIdx]->getActsGrad()); } } } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); _start = pyDictGetInt(paramsDict, "start"); _end = pyDictGetInt(paramsDict, "end"); _useBuffer = false; _outstandingCopyRequest = false; _convNet = convNet; } DataLayer::~DataLayer() { for (map<int,hipStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) { checkCudaErrors(hipStreamDestroy(it->second)); } for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } _copier->stop(); delete _copier; } void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) { waitForCopyFinish(); if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) { _useBuffer = !_useBuffer; } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } void DataLayer::waitForCopyFinish() { if (_outstandingCopyRequest) { _copyFinishQueue.dequeue(); assert(_copyFinishQueue.getNumElements() == 0); _outstandingCopyRequest = false; } } hipStream_t DataLayer::getCopyStream(int deviceID) { if (_copyStreams.count(deviceID) == 0) { NVMatrix::setDeviceID(deviceID); checkCudaErrors(hipStreamCreateWithFlags(&_copyStreams[deviceID], hipStreamNonBlocking)); } return _copyStreams[deviceID]; } void DataLayer::copyData(CPUData& data, bool other, int passIdx) { assert(!_outstandingCopyRequest); assert(_copyFinishQueue.getNumElements() == 0); _copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx)); _outstandingCopyRequest = true; } int DataLayer::getNumInputReplicas() { return _convNet->getNumReplicasMax() / getNumReplicas(); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { } NVMatrix& DataLayer::getActs(int deviceID) { return getActs(deviceID, false, -1); } NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) { // printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases); assert(_memSrcActs.count(deviceID) > 0); assert(_memSrcActs2.count(deviceID) > 0); return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases)); } ConvNet& DataLayer::getConvNet() { return *_convNet; } bool DataLayer::postInit() { if (Layer::postInit()) { for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); if (_memSrcActs2.count(d) == 0) { _memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName()); } } intv cpus = getDeviceCPUs(_next[0]->getDeviceID()); _copier = new DataCopyThread(*this, cpus); _copier->start(); return true; } return false; } bool DataLayer::isGradProducer() { return false; } /* * ======================= * DataCopyThread * ======================= */ DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) { } Queue<DataCopyMessage*>& DataCopyThread::getQueue() { return _queue; } void DataCopyThread::stop() { getQueue().enqueue(new DataCopyExitMessage()); join(); } void* DataCopyThread::run() { NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin()); bool exit = false; while(!exit) { DataCopyMessage& msg = *_queue.dequeue(); exit = msg.getType() == DataCopyMessage::EXIT; if (!exit) { CPUData& data = msg.getData(); int passIdx = msg.getPassIdx(); bool other = msg.isOther(); Matrix& dataMatrix = data.getData(_parent->getDataIdx()); // How many times is this layer going to process microbatches from this minibatch? assert(_parent->getNumReplicasNext() == _parent->getNumReplicas()); int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx); if (microIdx >= 0) { if (_requestTimer.isStarted()) { double requestIntervalMsec = _requestTimer.stop(); // Sleep for up to 1/20th the average request interval _sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0)); } _requestTimer.start(); if (other) { // Sleeping a bit is helpful because in typical nets, copying input data // as soon as it's available will produce contention with other communications // that are happening at the time. This is very much a hack, so in the future // it might be good to replace it with something smarter which schedules access // to communication links. usleep(_sleepUsec); } microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas(); // Safer to divup because this way you won't get a minibatch size of 0 int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax()); int microStart = microIdx * microbatchSize; int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize); // Check that this replica has some data. This can be false when, for example, // there are only 7 examples in the minibatch but 8 replicas. if (microStart < microEnd) { assert(dataMatrix.isView() == dataMatrix.isTrans()); int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2); if (dataMatrix.isTrans()) { Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd); // In this case, dataMatrix is a view on memory allocated by Python. //_hostMemFwd.copyFromHost(replicaDataMatrix, true); _hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true); memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes()); delete &replicaDataMatrix; // view NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); // Note to self: this is the path that gets executed in practice // in my models. It does a transpose & copy simultaneously. hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } else { // Hacky way to copy a slice to _hostMemFwd _hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart); Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans()); dataMatrix.sliceCols(microStart, microEnd, tmp); NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; NVMatrix::setDeviceID(deviceID); NVMatrix::syncStream(_parent->getCopyStream(deviceID)); } _parent->getConvNet().getDataCopyPD().freePipe(pipe); } else { for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; _parent->getActs(deviceID, other, 0); } } } _parent->getCopyFinishQueue().enqueue(1); } delete &msg; } return NULL; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false); } else if(_pool == "maxabs") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true); } else if(_pool == "avg") { return *new AvgPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_abs) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler()); } else { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } } void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * CrossMapPoolLayer * ===================== */ CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputs = pyDictGetInt(paramsDict, "outputChannels"); _pool = pyDictGetString(paramsDict, "pool"); } CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * CrossMapMaxPoolLayer * ===================== */ CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) { } void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler()); } void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1); } /* * ===================== * RandomScaleLayer * ===================== */ RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _maxScale = pyDictGetFloat(paramsDict, "maxScale"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); // The smallest size the image could be after rescaling _minScaledSize = _imgSize / _maxScale; // The number of discrete scales we're considering int numScales = _imgSize - _minScaledSize + 1; // The total number of squares of size _tgtSize that we can extract // from all these scales double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6; // For each scale, record the fraction of the squares that it has. // This will be the probability of sampling this scale. _scaleProbs.push_back(1.0 / numCrops); for (int s = 1; s < numScales; ++s) { _scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops); } } void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (IS_TRAIN(passType)) { // _maxScale is in the range [1, 2) float r = randf; int rescaledSize = _tgtSize; float scaleFactor = _maxScale; // Find which scale we have sampled for (int s = 0; s < _scaleProbs.size(); ++s) { if (r <= _scaleProbs[s]) { rescaledSize += s; float scaleFactorEnd = _imgSize / float(rescaledSize); float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize)); scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart); break; } } assert(rescaledSize >= _tgtSize); int maxStart = rescaledSize - _tgtSize; int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart); if (rescaledSize == _imgSize) { convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX); } else { convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor); convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX); } _rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it. } else if (IS_MULTIVIEW_TEST(passType)) { // for now... _inputs[0]->copy(getActs()); } else if (IS_TEST(passType)) { // Test on center patch convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale); } } void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * CropLayer * ===================== */ CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _startX = pyDictGetInt(paramsDict, "startX"); _startY = pyDictGetInt(paramsDict, "startY"); _tgtSize = pyDictGetInt(paramsDict, "sizeX"); } void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX); } void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _hFilter = pyDictGetMatrix(paramsDict, "filter"); } GaussianBlurLayer::~GaussianBlurLayer() { delete _hFilter; } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad(); convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1); convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * HorizontalReflectionLayer * ===================== */ HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { assert(_channels >= 1 && _channels <= 3); } void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convReflectHorizontal(*_inputs[0], getActs(), _imgSize); } void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); _minDiv = pyDictGetFloat(paramsDict, "minDiv"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); _denoms.truncate(); } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { assert(inpIdx == 0); convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); _meanDiffs.truncate(); } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); _numCases = 0; _aggregated = pyDictGetInt(paramsDict, "aggregated") != 0; } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { if (_coeff != 0) { Layer::bprop(v, passType, passIdx); } } bool CostLayer::fprop(PASS_TYPE passType, int passIdx) { if (Layer::fprop(passType, passIdx)) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL)); return true; } return false; } void CostLayer::fpropCommon(PASS_TYPE passType) { _numCases = Layer::getNumCases(*_inputs[0]); } int CostLayer::getNumCases() { return _numCases; } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { return *new doublev(_costv); } // This is called between microbatches void CostLayer::resetPassIdx() { Layer::resetPassIdx(); _costv.clear(); } CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) { if (type == "cost.crossent") { return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.bce") { return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.dce") { return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.logreg") { return *new LogregCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown cost layer type ") + type; } /* * ===================== * CrossEntCostLayer * ===================== */ CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID(); if (doWork) { computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * BinomialCrossEntropyCostLayer * ===================== */ BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate"); _posWeight = pyDictGetFloat(paramsDict, "posWeight"); } void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs); _costv.clear(); // Cross-entropy cost _costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim()); // If aggregated, we don't produce these outputs because they're not additive. // They have no meaning if this is just a partial cost. if (!_aggregated) { // "Correct" classifications. To compute these we threshold probs // and just count the number of entries that agree with labels. probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.equals(labels); _costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim())); if (_computeSoftmaxErrorRate) { // Also compute top-1 error as if this is softmax and there's only one correct class probs.max(0, _tmpVec); assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis probs.equalsVector(_tmpVec, _correctProbs); _correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present float m = _tmpVec.max(); _correctProbs.eltwiseDivideByVector(_tmpVec); _correctProbs.eltwiseMult(labels); _costv.push_back(numCases - _correctProbs.sum(_tmpbuf)); } } } } void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "neuron" || static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (doWork) { printf("Computing cross-entropy gradient the stupid way\n"); if (scaleTargets == 0) { labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target); } else { labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target); } } } float BinomialCrossEntropyCostLayer::getPosWeight() { return _posWeight; } /* * ===================== * DetectionCrossEntropyCostLayer * ===================== */ DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) { assert(!_aggregated); } void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx); // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); /* * Add information sufficient to compute precision and recall for each class. */ // NOTE: _tmpProbs contains ((probs > 0.5) == labels) labels.sum(1, _numPositive); // sum(labels, 1) _tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels) _tmpProbs.sum(1, _numTruePositive); probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.sum(1, _numDeclaredPositive); _numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true); _numPositive.copyToHost(_hNumPositive, true); _numTruePositive.copyToHost(_hNumTruePositive, true); for (int i = 0; i < labels.getFollowingDim(); ++i) { _costv.push_back(_hNumDeclaredPositive(i, 0)); // 2 _costv.push_back(_hNumPositive(i, 0)); // 3 _costv.push_back(_hNumTruePositive(i, 0)); // 4 } } } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _topk = pyDictGetInt(paramsDict, "topk"); // _numAccumed = 0; } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix* probs = _inputs[1]; _doCompute = !IS_MULTIVIEW_TEST(passType); if (!_doCompute) { if (IS_MULTIVIEW_TEST_START(passType)) { if (_probsAccum.count(passIdx) == 0) { _probsAccum[passIdx] = new NVMatrix(*probs); } probs->copy(*_probsAccum[passIdx]); _numAccumed[passIdx] = 1; } else { _probsAccum[passIdx]->add(*probs); _numAccumed[passIdx] += 1; } if (IS_MULTIVIEW_TEST_END(passType)) { probs = _probsAccum[passIdx]; probs->scale(1.0 / _numAccumed[passIdx]); _doCompute = true; } } if (_doCompute) { int numCases = labels.getNumElements(); probs->max(0,_maxProbs); if (_topk == 1) { computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs); } else { computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk); } _costv.clear(); double top1 = _correctProbs.sum(_tmpbuf); _costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf)); _costv.push_back(numCases - top1); _costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf))); } } } NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) { return *_probsAccum[replicaIdx]; } void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (prev[1]->getType() == "softmax") { static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork); } if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _inputs[0]->apply(NVMatrixOps::Square(), _tmp); _costv.clear(); _costv.push_back(_tmp.sum()); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff); }
a10a7e55c874a78b3608a0cd2101bebf50a9701c.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <helper_cuda.h> #include <iostream> #include <set> #include "../../cudaconv3/include/cudaconv2.cuh" #include "../../util/include/matrix.h" #include "../include/layer_kernels.cuh" #include "../include/layer.cuh" #include "../include/data.cuh" #include "../include/util.cuh" #include "../include/weights.cuh" using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : _convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _numOutputs = pyDictGetInt(paramsDict, "outputs"); _numReplicas = pyDictGetInt(paramsDict, "numReplicas"); _numReplicasPrev = 1; _rcvdBInputMsgs = 0; _actBroadcaster = NULL; _gradReducer = NULL; _initialized = false; } Layer::~Layer() { if (_actBroadcaster != NULL) { _actBroadcaster->stop(); delete _actBroadcaster; } if (_gradReducer != NULL) { _gradReducer->stop(); delete _gradReducer; } // For now, gradReducer doesn't have a destructor // delete _gradReducer; for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } } cudaStream_t Layer::getStream() { assert(getDeviceID() >= 0); return NVMatrix::getDefaultStream(getDeviceID()); } void Layer::syncStream() { NVMatrix::syncStream(getStream()); } void Layer::fpropNext(PASS_TYPE passType, int passIdx) { if (_next.size() > 0) { if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) { syncStream(); // Make sure I've finished computing before broadcasting } getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue)); } if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) { _broadcastFinishQueue.dequeue(); assert(_broadcastFinishQueue.getNumElements() == 0); } } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } bool Layer::fprop(PASS_TYPE passType, int passIdx) { _rcvdFInputMsgs++; // I require messages from *all* input replicas because it makes the propagation easier to think about. // Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation // might not actually be finished yet. if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) { // printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID()); int ridx = getFwdActiveInputReplicaIdx(passIdx); assert(getDeviceID() == NVMatrix::getDeviceID()); map<int, NVMatrix*> v; if (ridx >= 0) { for (int i = 0; i < getNumLayersPrev(); i++) { v[i] = &_prev[ridx][i]->getActs(getDeviceID()); } } fprop(v, passType, passIdx); return true; } return false; } void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) { if (getFwdActiveInputReplicaIdx(passIdx) >= 0) { assert(v.size() == getNumLayersPrev()); _inputs.clear(); _inputs.insert(v.begin(), v.end()); int numCases = _inputs[0]->getLeadingDim(); for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemory(numCases); } if (numCases > 0) { //printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases); _rcvdFInputMsgs = getNumExpectedFwdMsgs(); for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) { it->second->transpose(_trans); } getActs().transpose(_trans); fpropCommon(passType); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType, passIdx); } // Then add the rest of the inputs to that for (int i = 0; i < getNumLayersPrev(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx); } } } } fpropNext(passType, passIdx); } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_actsGradTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } if (_actsTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } } int Layer::getNumGradProducersNext() { return _numGradProducersNext; } int Layer::getNumExpectedBwdMsgs() { return _numGradProducersNext * getNumSiblingReplicas(); } int Layer::getNumExpectedFwdMsgs() { return getNumLayersPrev() * getNumInputReplicas(); } void Layer::bprop(PASS_TYPE passType, int passIdx) { if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) { // printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID()); if (_gradReducer != NULL) { _gradReducer->waitForFinish(); } // This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages bprop(getActsGrad(), passType, passIdx); if (_bwdTerminal[passIdx]) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL)); } } } void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) { Layer& prev = *_prev[replicaIdx][inputIdx]; if (prev.isGradConsumer() && isGradProducer(prev.getName())) { if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0 bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType); } prev.getNumComputedActsGrads(getDeviceID())++; // Synchronize if the previous layer is going to actually do a reduction. // If the previous layer is on the same GPU as us and has no next layers // on other GPUs then it won't need to do a reduction. if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) { syncStream(); } prev.getGradReducer().enqueueReduction(getDeviceID()); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { v.transpose(_trans); assert(getDeviceID() == NVMatrix::getDeviceID()); int ridx = getBwdActiveInputReplicaIdx(passIdx); LayerV& prev = _prev[ridx]; map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx]; for (int i = 0; i < prev.size(); i++) { _inputs[i]->transpose(_trans); prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); // NOTE: this should be here (before the bpropActs) because if you have a layer // that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite // v which is used in bpropCommon. So bpropCommon must come first. bpropCommon(v, ridx, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) { const set<Layer*>& deviceLayers = it->second; for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) { if (_actsGradTarget != (*it2)->getInputIdx(_name)) { bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name)); } } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0) { bpropActsCall(v, passType, ridx, _actsGradTarget); } } // Synchronization is necessary because the kernel calls that compute my backward acts // execute asynchronously. Therefore I don't want to tell other threads that I've // computed bprop activities for them when in fact I've only called a function which // will eventually compute them. if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) { syncStream(); } if (getConvNet().isConserveMemory()) { truncBwdActs(); } if (isGradProducer()) { /*for (int i = 0; i < prev.size(); i++) { if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) { prev[i]->getGradReducer().enqueueReduction(getDeviceID()); } }*/ // Send backward messages to *all* replicas. // Note that the messages will be dismissed unless the passIdx indicates // that the previous layer should do some work. for (int r = 0; r < getNumInputReplicas(); r++) { for (int i = 0; i < _prev[r].size(); i++) { if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) { _prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx)); } } } } } IActGradReducer& Layer::getGradReducer() { return *_gradReducer; } // This is called between minibatches void Layer::reset() { _rcvdFInputMsgs = 0; _rcvdBInputMsgs = 0; for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) { it->second = 0; } } // This is called between microbatches void Layer::resetPassIdx() { _rcvdFInputMsgs = 0; if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) { reset(); } } /* * Returns number of cases in given matrix. */ int Layer::getNumCases(NVMatrix& v) { return v.getLeadingDim(); } int Layer::incRcvdBInputMsgs() { return ++_rcvdBInputMsgs; } std::string& Layer::getName() { return _name; } std::string& Layer::getType() { return _type; } int& Layer::getNumComputedActsGrads(int deviceID) { return _numComputedActsGrads[deviceID]; } void Layer::addNext(Layer& l) { _next.push_back(&l); _numReplicasNext = l.getNumReplicas(); if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_nextDeviceIDs.size() + 1); _nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addPrev(Layer& l, int replicaIdx) { _prev[replicaIdx].push_back(&l); _numReplicasPrev = l.getNumReplicas(); l.setInputIdx(getName(), _prev[replicaIdx].size() - 1); if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_prevDeviceIDs.size() + 1); _prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addReplica(Layer& l) { assert(_replicas.count(l.getReplicaID()) == 0); _replicas[l.getReplicaID()] = &l; } bool Layer::hasGradProducerNext(std::string& layerName) { bool b = _next.size() == 0; for (int i = 0; i < _next.size(); i++) { b |= _next[i]->hasGradProducerNext(_name); } return b && isGradProducer(layerName); } bool Layer::postInit() { // We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop(). // In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating // it from _prev->getActs() // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); if (!_initialized) { _initialized = true; map<int,int> numGradProducersNext; _numGradProducersNext = 0; for (int r = 0; r < getNumInputReplicas(); ++r) { for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) { (*it)->postInit(); } } _memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name); // _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0] _memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name); for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); _numComputedActsGrads[d] = 0; if (_next[i]->hasGradProducerNext(_name)) { if (numGradProducersNext.count(d) == 0) { numGradProducersNext[d] = 0; } numGradProducersNext[d]++; _numGradProducersNext++; if (_memSrcActsGrad.count(d) == 0) { _memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_memSrcActs.count(d) == 0) { _memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_next.size() == 0) { _numReplicasNext = getNumReplicas(); } /* * Initialize forward broadcaster. First sibling owns it. */ if (getReplicaIdx() == 0 && _convNetThread != NULL) { _actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID())); _actBroadcaster->start(); } /* * Initialize backward reducer. */ if (isGradConsumer() && _numGradProducersNext > 0) { _gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext); _gradReducer->start(); } /* * Initialize specially sorted previous array */ for (int r = 0; r < _prev.size(); ++r) { for (int i = 0; i < _prev[r].size(); ++i) { // Previous devices in reverse order of processing by (sequential) GradReducer _prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID() + 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]); } } return true; } return false; } ActBroadcaster& Layer::getActBroadcaster() { return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers && _prev.size() > 0) { for (int i = 0; i < _prev[0].size(); i++) { _gradConsumer |= _prev[0][i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } bool Layer::isGradProducer(std::string& layerName) { return isGradProducer(); } map<int,vector<Layer*> >& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { return getActs(getDeviceID()); } NVMatrix& Layer::getActs(int deviceID) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(); } NVMatrix& Layer::getActs(int deviceID, int numCases) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(numCases); } NVMatrix& Layer::getActsGrad(int deviceID) { assert(_memSrcActsGrad.count(deviceID) > 0); return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim()); } NVMatrix& Layer::getActsGrad() { return getActsGrad(NVMatrix::getDeviceID()); } map<int, NVMatrix*> Layer::getAllActs() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } map<int, NVMatrix*> Layer::getAllActsGrads() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } int Layer::getDeviceID() { return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID(); } ConvNetThread& Layer::getConvNetThread() { assert(_convNetThread != NULL); return *_convNetThread; } ConvNet& Layer::getConvNet() { return getConvNetThread().getConvNet(); } void Layer::setBwdTerminal(int passIdx) { _bwdTerminal[passIdx] = true; } int Layer::getReplicaID() { return _replicaID; } int Layer::getActivePassPeriod() { return getNumReplicas() / getConvNet().getNumReplicasMin(); } int Layer::getFwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return passIdx % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getBwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getFwdActiveReplicaIdx(int passIdx) { assert(_next.size() > 0); return _next[0]->getFwdActiveInputReplicaIdx(passIdx); } int Layer::getNumReplicas() { return _replicas.size(); } int Layer::getNumSiblingReplicas() { return getNumReplicas() / getNumReplicasNext(); } int Layer::getNumReplicasPrev() { return _numReplicasPrev; } int Layer::getNumReplicasNext() { return _numReplicasNext; } int Layer::getNumInputReplicas() { return _numReplicasPrev / getNumReplicas(); } int Layer::getReplicaIdx() { return getReplicaID() % getNumSiblingReplicas(); } int Layer::getNumLayersPrev() { return _prev.size() > 0 ? _prev[0].size() : 0; } void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) { assert(_memSrcActs[deviceID]->isParent()); delete _memSrcActs[deviceID]; _memSrcActs[deviceID] = &mem; if (_actsTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName())); } } void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) { assert(_memSrcActsGrad[deviceID]->isParent()); delete _memSrcActsGrad[deviceID]; _memSrcActsGrad[deviceID] = &mem; if (_actsGradTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName())); } } MemoryView& Layer::getMemorySourceActs(int deviceID) { return *_memSrcActs[deviceID]; } MemoryView& Layer::getMemorySourceActsGrad(int deviceID) { return *_memSrcActsGrad[deviceID]; } int Layer::getNumOutputs() { return _numOutputs; } void Layer::setInputIdx(std::string& parentName, int idx) { _inputIndices[parentName] = idx; } int Layer::getInputIdx(std::string& parentName) { return _inputIndices[parentName]; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true) { PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron"); _neuronType = pyDictGetString(neuronDict, "type"); _neuron = &Neuron::makeNeuron(neuronDict); } NeuronLayer::~NeuronLayer() { delete _neuron; } void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) { _neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0); } } bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // Special optimization for cross-entropy objective with logistic units. // Better to just compute the input gradient in one go to avoid division by small numbers. bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1 && (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce") && _next[0]->getDeviceID() == getDeviceID() && _next[0]->getNumReplicas() == getNumReplicas(); LayerV& prev = _prev[replicaIdx]; if (doCrossEntGrad) { NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]); float gradCoeff = cost.getCoeff(); labels.transpose(_trans); if (cost.getPosWeight() == 1) { if (scaleTargets == 0) { getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } else { if (scaleTargets == 0) { getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } } return doCrossEntGrad; } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _neuron->activate(*_inputs[0], getActs()); } std::string& NeuronLayer::getNeuronType() { return _neuronType; } /* * ======================= * WeightLayer * ======================= * * The useGrad parameter here merely expresses a preference by the subclass. It may * be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes. * So when computing gradient updates, the subclass must always first check weights.isUseGrad(). * * Note: biases always useGrad. */ WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) : Layer(convNetThread, paramsDict, replicaID, trans) { _weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod"); MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW"); PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed"); /* * When there are multiple replicas, the present implementation * requires that useGrad is true. This is because weights.update() * performs a simultaneous write to both replicas' weightsInc matrix, * which means that the read should come from somewhere else (i.e. a * grads matrix). */ useGrad |= _numReplicas > 1; // Source layers for shared weights stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); _weights = new WeightList(); for (int i = 0; i < weightSourceLayers.size(); i++) { std::string& srcLayerName = weightSourceLayers[i]; int matrixIdx = weightSourceMatrixIndices[i]; PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i); ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule if (srcLayerName == _name) { // Current layer _weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this)); } else if (srcLayerName != "") { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights->addWeights(*new Weights(*srcWeights, lrs, *this)); } else { _weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true); delete &weightSourceLayers; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &wc; delete &wball; _wStep = 0.02; _bStep = 0.005; } WeightLayer::~WeightLayer() { delete _weights; delete _biases; } bool WeightLayer::postInit() { if (Layer::postInit()) { _weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod()); assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0); return true; } return false; } void WeightLayer::fpropCommon(PASS_TYPE passType) { } void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) { if (_biases->getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropBiases(v, passType); } else { _biases->getGrad().resize(_biases->getW()); _biases->getGrad().scale(getBIncScale()); } _biases->incNumUpdates(); } for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropWeights(v, replicaIdx, i, passType); } else { _weights->at(i).getGrad().resize(_weights->at(i).getW()); // This will cause it to forget momentum when shown 0 training cases // and _useGrad = false but it's not too important. _weights->at(i).getGrad().scale(getIncScale(i, passType)); } // Increment its number of updates _weights->at(i).incNumUpdates(); } } } bool WeightLayer::updateWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _weights->update(getConvNet().getTrainingProgress()); _biases->update(getConvNet().getTrainingProgress()); // constrainWeights(); return true; } return false; } bool WeightLayer::constrainWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _constrainWeights(); return true; } return false; } void WeightLayer::_constrainWeights() { } void WeightLayer::copyToCPU() { _weights->copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights->copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradient() { for (int i = 0; i < _weights->getSize(); i++) { getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i)); } getConvNet().checkGradient(_name + " biases", _bStep, *_biases); } void WeightLayer::addReplica(Layer& l) { Layer::addReplica(l); _weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights); _biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights->at(idx); } float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) { // weight update period must be multiple of activation period // TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate. double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses())); if (_weights->at(inpIdx).isUseGrad()) { return passType == PASS_GC ? 1.0f : 1.0f / numCases; } return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases; } float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) { if (_weights->at(inpIdx).isUseGrad()) { return _weights->at(inpIdx).getNumUpdates() > 0; } return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0 : (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f)); } NVMatrix& WeightLayer::getGradTarget(int inpIdx) { return _weights->at(inpIdx).getGrad(); } float WeightLayer::getBGradScale(PASS_TYPE passType) { int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses()); return passType == PASS_GC ? 1.0f : 1.0f / numCases; } float WeightLayer::getBIncScale() { return _biases->getNumUpdates() > 0; } NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) { return _weights->at(inpIdx).getW(); } NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) { return _biases->getW(); } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) { _wStep = 0.01; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } } void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType)); } void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); float scaleGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } void FCLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; // Unfortunate extra weight matrix... _weights->at(i).getW().sumOfSquares(0, _norm2); // norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * SplitFCLayer * ======================= */ SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : FCLayer(convNetThread, paramsDict, replicaID, useGrad) { _numParts = pyDictGetInt(paramsDict, "parts"); } void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true); NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts); NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts); NVMatrixV& splitTarget = getActs().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } deleteElements(splitInput, true); deleteElements(splitWeights, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts); NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1); delete &weights_T; deleteElements(splitV, true); deleteElements(splitWeights_T, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts); NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType)); delete &prevActs_T; deleteElements(splitPrevActs_T, true); deleteElements(splitV, true); deleteElements(splitGradTarget, true); } /* * ======================= * TwoDLayerInterface * ======================= */ TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _imgPixels = _imgSize * _imgSize; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); } LocalLayer::~LocalLayer() { delete _padding; delete _stride; delete _filterSize; delete _channels; delete _imgSize; delete _groups; delete _filterChannels; delete _filterPixels; delete _imgPixels; } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, true) { _sumWidth = pyDictGetInt(paramsDict, "sumWidth"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); _weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin"); _weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax"); } ConvLayer::~ConvLayer() { delete _weightContrastNormMin; delete _weightContrastNormMax; } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(getBiasMatrix(passType)); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(getBiasMatrix(passType)); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { float scaleBGrad = getBGradScale(passType); float scaleInc = getBIncScale(); if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { assert(_weights->at(inpIdx).isUseGrad()); bool doPartialSum = _sumWidth < _modulesX; NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad(); float scaleWGrad = getGradScale(inpIdx, passType); float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum; convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad); if (doPartialSum) { scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0; int outWidth = DIVUP(_modulesX, _sumWidth); _weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); _weightGradTmp.truncate(); } void ConvLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { float fz = _weights->at(i).getW().getNumRows(); NVMatrix tmp; _weights->at(i).getW().sum(0, tmp); _weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad()); // Now _weights->at(i).getGrad() contains zero-mean filters _weights->at(i).getGrad().apply(NVMatrixOps::Square()); _weights->at(i).getGrad().sum(0, tmp); tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz)); // Now tmp has the stdev _weights->at(i).getW().eltwiseMultByVector(tmp); } // It's pretty silly to do both these things but whatever if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; _weights->at(i).getW().sumOfSquares(0, _norm2); // norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType)); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType)); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { float scaleWGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void LocalUnsharedLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall()); } } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& input = *_inputs[0]; input.max(1, _max); input.addVector(_max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); getActs().sum(1, _sum); getActs().eltwiseDivideByVector(_sum); } void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); LayerV& prev = _prev[replicaIdx]; if (_doUpperGrad) { // Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense for (int i = 0; i < _next.size(); ++i) { if (_next[i]->isGradProducer(getName())) { NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); break; } } } else { computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1); } } void SoftmaxLayer::setDoUpperGrad(bool b) { _doUpperGrad = b; } /* * ======================= * ConcatenationLayer * ======================= */ ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets"); _copyOffsets->push_back(_numOutputs); } ConcatenationLayer::~ConcatenationLayer() { delete _copyOffsets; } void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols()); _inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0); } void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view _prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1); delete &copySrc; } /* * ======================= * PassThroughLayer * ======================= */ PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // No-op } void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // No-op } bool PassThroughLayer::postInit() { if (Layer::postInit()) { assert(getNumInputReplicas() == 1); for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) { MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); _prev[0][i]->setMemorySourceActs(getDeviceID(), vActs); _prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad); } return true; } return false; } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } EltwiseSumLayer::~EltwiseSumLayer() { delete _coeffs; } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx)); } void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DropoutLayer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _enable = pyDictGetInt(paramsDict, "enable"); _keep = pyDictGetFloat(paramsDict, "keep"); } void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.apply(DropoutSmallerThanOperator(_keep)); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->copy(getActs()); } } void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1); } } void DropoutLayer::truncBwdActs() { Layer::truncBwdActs(); _keepMask.truncate(); } /* * ======================= * Dropout2Layer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) { } void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.smallerThanScalar(_keep); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->scale(_keep, getActs()); } } void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { if (scaleTargets != 0) { v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)), prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.scale(_keep, prev[inpIdx]->getActsGrad()); } } } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); _start = pyDictGetInt(paramsDict, "start"); _end = pyDictGetInt(paramsDict, "end"); _useBuffer = false; _outstandingCopyRequest = false; _convNet = convNet; } DataLayer::~DataLayer() { for (map<int,cudaStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) { checkCudaErrors(cudaStreamDestroy(it->second)); } for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } _copier->stop(); delete _copier; } void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) { waitForCopyFinish(); if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) { _useBuffer = !_useBuffer; } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } void DataLayer::waitForCopyFinish() { if (_outstandingCopyRequest) { _copyFinishQueue.dequeue(); assert(_copyFinishQueue.getNumElements() == 0); _outstandingCopyRequest = false; } } cudaStream_t DataLayer::getCopyStream(int deviceID) { if (_copyStreams.count(deviceID) == 0) { NVMatrix::setDeviceID(deviceID); checkCudaErrors(cudaStreamCreateWithFlags(&_copyStreams[deviceID], cudaStreamNonBlocking)); } return _copyStreams[deviceID]; } void DataLayer::copyData(CPUData& data, bool other, int passIdx) { assert(!_outstandingCopyRequest); assert(_copyFinishQueue.getNumElements() == 0); _copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx)); _outstandingCopyRequest = true; } int DataLayer::getNumInputReplicas() { return _convNet->getNumReplicasMax() / getNumReplicas(); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { } NVMatrix& DataLayer::getActs(int deviceID) { return getActs(deviceID, false, -1); } NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) { // printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases); assert(_memSrcActs.count(deviceID) > 0); assert(_memSrcActs2.count(deviceID) > 0); return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases)); } ConvNet& DataLayer::getConvNet() { return *_convNet; } bool DataLayer::postInit() { if (Layer::postInit()) { for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); if (_memSrcActs2.count(d) == 0) { _memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName()); } } intv cpus = getDeviceCPUs(_next[0]->getDeviceID()); _copier = new DataCopyThread(*this, cpus); _copier->start(); return true; } return false; } bool DataLayer::isGradProducer() { return false; } /* * ======================= * DataCopyThread * ======================= */ DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) { } Queue<DataCopyMessage*>& DataCopyThread::getQueue() { return _queue; } void DataCopyThread::stop() { getQueue().enqueue(new DataCopyExitMessage()); join(); } void* DataCopyThread::run() { NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin()); bool exit = false; while(!exit) { DataCopyMessage& msg = *_queue.dequeue(); exit = msg.getType() == DataCopyMessage::EXIT; if (!exit) { CPUData& data = msg.getData(); int passIdx = msg.getPassIdx(); bool other = msg.isOther(); Matrix& dataMatrix = data.getData(_parent->getDataIdx()); // How many times is this layer going to process microbatches from this minibatch? assert(_parent->getNumReplicasNext() == _parent->getNumReplicas()); int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx); if (microIdx >= 0) { if (_requestTimer.isStarted()) { double requestIntervalMsec = _requestTimer.stop(); // Sleep for up to 1/20th the average request interval _sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0)); } _requestTimer.start(); if (other) { // Sleeping a bit is helpful because in typical nets, copying input data // as soon as it's available will produce contention with other communications // that are happening at the time. This is very much a hack, so in the future // it might be good to replace it with something smarter which schedules access // to communication links. usleep(_sleepUsec); } microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas(); // Safer to divup because this way you won't get a minibatch size of 0 int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax()); int microStart = microIdx * microbatchSize; int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize); // Check that this replica has some data. This can be false when, for example, // there are only 7 examples in the minibatch but 8 replicas. if (microStart < microEnd) { assert(dataMatrix.isView() == dataMatrix.isTrans()); int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2); if (dataMatrix.isTrans()) { Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd); // In this case, dataMatrix is a view on memory allocated by Python. //_hostMemFwd.copyFromHost(replicaDataMatrix, true); _hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true); memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes()); delete &replicaDataMatrix; // view NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); // Note to self: this is the path that gets executed in practice // in my models. It does a transpose & copy simultaneously. hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } else { // Hacky way to copy a slice to _hostMemFwd _hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart); Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans()); dataMatrix.sliceCols(microStart, microEnd, tmp); NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; NVMatrix::setDeviceID(deviceID); NVMatrix::syncStream(_parent->getCopyStream(deviceID)); } _parent->getConvNet().getDataCopyPD().freePipe(pipe); } else { for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; _parent->getActs(deviceID, other, 0); } } } _parent->getCopyFinishQueue().enqueue(1); } delete &msg; } return NULL; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false); } else if(_pool == "maxabs") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true); } else if(_pool == "avg") { return *new AvgPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_abs) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler()); } else { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } } void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * CrossMapPoolLayer * ===================== */ CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputs = pyDictGetInt(paramsDict, "outputChannels"); _pool = pyDictGetString(paramsDict, "pool"); } CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * CrossMapMaxPoolLayer * ===================== */ CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) { } void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler()); } void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1); } /* * ===================== * RandomScaleLayer * ===================== */ RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _maxScale = pyDictGetFloat(paramsDict, "maxScale"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); // The smallest size the image could be after rescaling _minScaledSize = _imgSize / _maxScale; // The number of discrete scales we're considering int numScales = _imgSize - _minScaledSize + 1; // The total number of squares of size _tgtSize that we can extract // from all these scales double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6; // For each scale, record the fraction of the squares that it has. // This will be the probability of sampling this scale. _scaleProbs.push_back(1.0 / numCrops); for (int s = 1; s < numScales; ++s) { _scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops); } } void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (IS_TRAIN(passType)) { // _maxScale is in the range [1, 2) float r = randf; int rescaledSize = _tgtSize; float scaleFactor = _maxScale; // Find which scale we have sampled for (int s = 0; s < _scaleProbs.size(); ++s) { if (r <= _scaleProbs[s]) { rescaledSize += s; float scaleFactorEnd = _imgSize / float(rescaledSize); float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize)); scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart); break; } } assert(rescaledSize >= _tgtSize); int maxStart = rescaledSize - _tgtSize; int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart); if (rescaledSize == _imgSize) { convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX); } else { convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor); convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX); } _rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it. } else if (IS_MULTIVIEW_TEST(passType)) { // for now... _inputs[0]->copy(getActs()); } else if (IS_TEST(passType)) { // Test on center patch convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale); } } void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * CropLayer * ===================== */ CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _startX = pyDictGetInt(paramsDict, "startX"); _startY = pyDictGetInt(paramsDict, "startY"); _tgtSize = pyDictGetInt(paramsDict, "sizeX"); } void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX); } void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _hFilter = pyDictGetMatrix(paramsDict, "filter"); } GaussianBlurLayer::~GaussianBlurLayer() { delete _hFilter; } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad(); convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1); convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * HorizontalReflectionLayer * ===================== */ HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { assert(_channels >= 1 && _channels <= 3); } void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convReflectHorizontal(*_inputs[0], getActs(), _imgSize); } void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); _minDiv = pyDictGetFloat(paramsDict, "minDiv"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); _denoms.truncate(); } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { assert(inpIdx == 0); convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); _meanDiffs.truncate(); } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); _numCases = 0; _aggregated = pyDictGetInt(paramsDict, "aggregated") != 0; } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { if (_coeff != 0) { Layer::bprop(v, passType, passIdx); } } bool CostLayer::fprop(PASS_TYPE passType, int passIdx) { if (Layer::fprop(passType, passIdx)) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL)); return true; } return false; } void CostLayer::fpropCommon(PASS_TYPE passType) { _numCases = Layer::getNumCases(*_inputs[0]); } int CostLayer::getNumCases() { return _numCases; } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { return *new doublev(_costv); } // This is called between microbatches void CostLayer::resetPassIdx() { Layer::resetPassIdx(); _costv.clear(); } CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) { if (type == "cost.crossent") { return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.bce") { return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.dce") { return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.logreg") { return *new LogregCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown cost layer type ") + type; } /* * ===================== * CrossEntCostLayer * ===================== */ CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID(); if (doWork) { computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * BinomialCrossEntropyCostLayer * ===================== */ BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate"); _posWeight = pyDictGetFloat(paramsDict, "posWeight"); } void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs); _costv.clear(); // Cross-entropy cost _costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim()); // If aggregated, we don't produce these outputs because they're not additive. // They have no meaning if this is just a partial cost. if (!_aggregated) { // "Correct" classifications. To compute these we threshold probs // and just count the number of entries that agree with labels. probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.equals(labels); _costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim())); if (_computeSoftmaxErrorRate) { // Also compute top-1 error as if this is softmax and there's only one correct class probs.max(0, _tmpVec); assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis probs.equalsVector(_tmpVec, _correctProbs); _correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present float m = _tmpVec.max(); _correctProbs.eltwiseDivideByVector(_tmpVec); _correctProbs.eltwiseMult(labels); _costv.push_back(numCases - _correctProbs.sum(_tmpbuf)); } } } } void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "neuron" || static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (doWork) { printf("Computing cross-entropy gradient the stupid way\n"); if (scaleTargets == 0) { labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target); } else { labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target); } } } float BinomialCrossEntropyCostLayer::getPosWeight() { return _posWeight; } /* * ===================== * DetectionCrossEntropyCostLayer * ===================== */ DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) { assert(!_aggregated); } void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx); // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); /* * Add information sufficient to compute precision and recall for each class. */ // NOTE: _tmpProbs contains ((probs > 0.5) == labels) labels.sum(1, _numPositive); // sum(labels, 1) _tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels) _tmpProbs.sum(1, _numTruePositive); probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.sum(1, _numDeclaredPositive); _numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true); _numPositive.copyToHost(_hNumPositive, true); _numTruePositive.copyToHost(_hNumTruePositive, true); for (int i = 0; i < labels.getFollowingDim(); ++i) { _costv.push_back(_hNumDeclaredPositive(i, 0)); // 2 _costv.push_back(_hNumPositive(i, 0)); // 3 _costv.push_back(_hNumTruePositive(i, 0)); // 4 } } } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _topk = pyDictGetInt(paramsDict, "topk"); // _numAccumed = 0; } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix* probs = _inputs[1]; _doCompute = !IS_MULTIVIEW_TEST(passType); if (!_doCompute) { if (IS_MULTIVIEW_TEST_START(passType)) { if (_probsAccum.count(passIdx) == 0) { _probsAccum[passIdx] = new NVMatrix(*probs); } probs->copy(*_probsAccum[passIdx]); _numAccumed[passIdx] = 1; } else { _probsAccum[passIdx]->add(*probs); _numAccumed[passIdx] += 1; } if (IS_MULTIVIEW_TEST_END(passType)) { probs = _probsAccum[passIdx]; probs->scale(1.0 / _numAccumed[passIdx]); _doCompute = true; } } if (_doCompute) { int numCases = labels.getNumElements(); probs->max(0,_maxProbs); if (_topk == 1) { computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs); } else { computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk); } _costv.clear(); double top1 = _correctProbs.sum(_tmpbuf); _costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf)); _costv.push_back(numCases - top1); _costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf))); } } } NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) { return *_probsAccum[replicaIdx]; } void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (prev[1]->getType() == "softmax") { static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork); } if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _inputs[0]->apply(NVMatrixOps::Square(), _tmp); _costv.clear(); _costv.push_back(_tmp.sum()); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff); }
e218e9b6ae657c54561cd24cdb1cc52cf0c99445.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Author: Lingfeng Chen, Pengkun Li * PID: A53270085, A53270024 * This file defines the data structure for grid */ #include "basicLayer.h" #define BLOCK_SIZE 128 namespace basicLayer { __global__ void randInitGPU(int nx, int ny, hiprandState_t* rndstates) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) { hiprand_init(clock() + idx, idx, 0, &rndstates[idx]); } } __global__ void weightbiasTruncInitGPU(int hid, int emb, float* d_Wh, float* d_Wx, float* d_b, hiprandState_t* rndstates) { // truncated uniform unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idh = iy * hid + emb + ix; //for Wh if (ix < hid && iy < hid) { d_Wh[iy * hid + ix] = hiprand_uniform(&rndstates[idh]) * 2.0E-3 - 1.0E-3; }//for Wx else if (ix < hid && iy < hid + emb) { d_Wx[(iy - hid) * hid + ix] = hiprand_uniform(&rndstates[idh]) * 2.0E-3 - 1.0E-3; }//for bin else if (ix < hid && iy == hid + emb) { d_b[ix] = 0.0f; } } __global__ void weightbiasGradInitGPU(int hid, int emb, float* d_Wh, float* d_Wx, float* d_b) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idh = iy * (hid + emb) + ix; //for Wh if (ix < hid && iy < hid) { d_Wh[iy * hid + ix] = 0.0f; }//for Wx else if (ix < hid && iy < hid + emb) { d_Wx[(iy - hid) * hid + ix] = 0.0f; }//for bin else if (ix < hid && iy == hid + emb) { d_b[ix] = 0.0f; } } __global__ void concatVecGPU(const int nx, const int ny, float* d_out, float* d_A, float* d_B){ unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < nx) { d_out[idx] = d_A[idx]; } else if (idx < nx + ny) { d_out[idx] = d_B[idx - nx]; } } __global__ void denseweightbiasTruncInitGPU(int nx, int ny, float* d_W, float* d_b, hiprandState_t* rndstates, int layerId) {// truncated uniform unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (layerId == 1) {//for dense //for W if (ix < nx - 1 && iy < ny) { d_W[iy * (nx - 1) + ix] = hiprand_uniform(&rndstates[idx]) * 2.0E-3 - 1.0E-3; }//for b else if (ix == nx - 1 && iy < ny) { d_b[iy] = 0.0f; } } else {//for embed if (ix < nx && iy < ny) d_W[idx] = hiprand_uniform(&rndstates[idx]) * 2.0E-3 - 1.0E-3; } } __global__ void embedweightbiasTruncInitGPU(int nx, int ny, float* d_W, hiprandState_t* rndstates) {// truncated uniform unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) d_W[idx] = hiprand_uniform(&rndstates[idx]) * 2.0E-3 - 1.0E-3; } __global__ void denseweightbiasGradInitGPU(int nx, int ny, float* d_W, float* d_b, hiprandState_t* rndstates, int layerId) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (layerId == 1) {//for dense //for W if (ix < nx - 1 && iy < ny) { d_W[iy * (nx - 1) + ix] = 0.0f; }//for b else if (ix == nx - 1 && iy < ny) { d_b[iy] = 0.0f; } } else {//for dense if (ix < nx && iy < ny) d_W[idx] = 0.0f; } } __global__ void embedCalGradGPU(float* W, float* delta, int textCode, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx) { W[ix * ny + textCode] = delta[ix]; } } float* BasicLayer::embedCalGrad(float* W, float* delta, int textCode, int nx, int ny) { int n = nx * ny; float* out = new float[n]; float* d_W, * d_delta; CHECK(hipMalloc((void**)& d_W, n * sizeof(float))); CHECK(hipMalloc((void**)& d_delta, nx * sizeof(float))); CHECK(hipMemcpy(d_W, W, n * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_delta, delta, nx * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x); embedCalGradGPU << <grid, block >> > (d_W, d_delta, textCode, nx, ny); //sync hipDeviceSynchronize(); CHECK(hipMemcpy(out, d_W, n * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_W); hipFree(d_delta); return out; } void BasicLayer::randInit(int m ,int n) { hipMalloc((void**)& rndstates, m * n * sizeof(hiprandState_t)); dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); randInitGPU << <grid,block >> > (n, m, rndstates); } void BasicLayer::weightbiasTruncInit(float* Wh, float* Wx, float* b, const int Whlen, const int Wxlen, const int blen) { int m = hiddenStates + embedSize + 1; int n = blen; randInit(m, n); float* d_Wh, * d_Wx, * d_b; //malloc device memory CHECK(hipMalloc((void**)& d_Wh, Whlen * sizeof(float))); CHECK(hipMalloc((void**)& d_Wx, Wxlen * sizeof(float))); CHECK(hipMalloc((void**)& d_b, blen * sizeof(float))); //transfer data from host to device CHECK(hipMemcpy(d_Wh, Wh, Whlen * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_Wx, Wx, Wxlen * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_b, b, blen * sizeof(float), hipMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); weightbiasTruncInitGPU << <grid, block >> > (hiddenStates, embedSize, d_Wh, d_Wx, d_b, rndstates); //transfer data from device to host CHECK(hipMemcpy(Wh, d_Wh, Whlen * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(Wx, d_Wx, Wxlen * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(b, d_b, blen * sizeof(float), hipMemcpyDeviceToHost)); //free device memory CHECK(hipFree(d_Wh)); CHECK(hipFree(d_Wx)); CHECK(hipFree(d_b)); } void BasicLayer::weightbiasGradInit(float* Wh, float* Wx, float* b, const int Whlen, const int Wxlen, const int blen) { float* d_Wh, * d_Wx, * d_b; //malloc device memory CHECK(hipMalloc((void**)& d_Wh, Whlen * sizeof(float))); CHECK(hipMalloc((void**)& d_Wx, Wxlen * sizeof(float))); CHECK(hipMalloc((void**)& d_b, blen * sizeof(float))); //Init as 0; int m = hiddenStates + embedSize + 1; int n = blen; dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); weightbiasGradInitGPU << <grid, block >> > (hiddenStates, embedSize, d_Wh, d_Wx, d_b); //transfer data from device to host CHECK(hipMemcpy(Wh, d_Wh, Whlen * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(Wx, d_Wx, Wxlen * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(b, d_b, blen * sizeof(float), hipMemcpyDeviceToHost)); //free device memory CHECK(hipFree(d_Wh)); CHECK(hipFree(d_Wx)); CHECK(hipFree(d_b)); } float* BasicLayer::concatVec(float* vecA, float* vecB, const int alen, const int blen) { const int outlen = alen + blen; float* out = new float[outlen]; float* d_A, * d_B, * d_out; //malloc device memory CHECK(hipMalloc((void**)& d_A, alen * sizeof(float))); CHECK(hipMalloc((void**)& d_B, blen * sizeof(float))); CHECK(hipMalloc((void**)& d_out, outlen * sizeof(float))); //transfer data from host to device CHECK(hipMemcpy(d_A, vecA, alen * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, vecB, blen * sizeof(float), hipMemcpyHostToDevice)); //Invoke kernel dim3 block(BLOCK_SIZE); dim3 grid((outlen + block.x - 1) / block.x); concatVecGPU << <grid, block >> > (alen, blen, d_out, d_A, d_B); //transfer data from device to host CHECK(hipMemcpy(out, d_out, outlen * sizeof(float), hipMemcpyDeviceToHost)); //free device memory CHECK(hipFree(d_A)); CHECK(hipFree(d_B)); CHECK(hipFree(d_out)); return out; } void BasicLayer::showVar() const { for (int i = 0; i < 4; i++) { std::cout << allVar[i]; } std::cout << std::endl; } void BasicLayer::showConcat(float* vec, const int len) const { for (int i = 0; i < len; i++) { std::cout << vec[i] << " "; } std::cout << std::endl; } void BasicLayer::denseweightbiasTruncInit(float* W, float* b, const int Wlen, const int blen) { int m = categories; int n = hiddenStates + 1; randInit(m, n); float* d_W, * d_b; //malloc device memory CHECK(hipMalloc((void**)& d_W, Wlen * sizeof(float))); CHECK(hipMalloc((void**)& d_b, blen * sizeof(float))); //transfer data from host to device CHECK(hipMemcpy(d_W, W, Wlen * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_b, b, blen * sizeof(float), hipMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); denseweightbiasTruncInitGPU << <grid, block >> > (n, m, d_W, d_b, rndstates, 1); //transfer data from device to host CHECK(hipMemcpy(W, d_W, Wlen * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(b, d_b, blen * sizeof(float), hipMemcpyDeviceToHost)); //free device memory CHECK(hipFree(d_W)); CHECK(hipFree(d_b)); } void BasicLayer::denseweightbiasGradInit(float* W, float* b, const int Wlen, const int blen) { int m = categories; int n = hiddenStates + 1; randInit(m, n); float* d_W, * d_b; //malloc device memory CHECK(hipMalloc((void**)& d_W, Wlen * sizeof(float))); CHECK(hipMalloc((void**)& d_b, blen * sizeof(float))); //transfer data from host to device CHECK(hipMemcpy(d_W, W, Wlen * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_b, b, blen * sizeof(float), hipMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); denseweightbiasGradInitGPU << <grid, block >> > (n, m, d_W, d_b, rndstates, 1); //transfer data from device to host CHECK(hipMemcpy(W, d_W, Wlen * sizeof(float), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(b, d_b, blen * sizeof(float), hipMemcpyDeviceToHost)); //free device memory CHECK(hipFree(d_W)); CHECK(hipFree(d_b)); } void BasicLayer::embedweightTruncInit(float* W, const int Wlen) { int m = embedSize; int n = Wlen / m; randInit(m, n); float* d_W; //malloc device memory CHECK(hipMalloc((void**)& d_W, Wlen * sizeof(float))); //transfer data from host to device CHECK(hipMemcpy(d_W, W, Wlen * sizeof(float), hipMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); embedweightbiasTruncInitGPU << <grid, block >> > (n, m, d_W, rndstates); //transfer data from device to host CHECK(hipMemcpy(W, d_W, Wlen * sizeof(float), hipMemcpyDeviceToHost)); //free device memory CHECK(hipFree(d_W)); } void BasicLayer::embedweightGradInit(float* W, const int Wlen) { int m = embedSize; int n = Wlen / m; float* d_W; //malloc device memory CHECK(hipMalloc((void**)& d_W, Wlen * sizeof(float))); //transfer data from host to device CHECK(hipMemcpy(d_W, W, Wlen * sizeof(float), hipMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); denseweightbiasGradInitGPU << <grid, block >> > (n, m, d_W, d_W, rndstates, 0); //transfer data from device to host CHECK(hipMemcpy(W, d_W, Wlen * sizeof(float), hipMemcpyDeviceToHost)); //free device memory CHECK(hipFree(d_W)); } }
e218e9b6ae657c54561cd24cdb1cc52cf0c99445.cu
/** * Author: Lingfeng Chen, Pengkun Li * PID: A53270085, A53270024 * This file defines the data structure for grid */ #include "basicLayer.h" #define BLOCK_SIZE 128 namespace basicLayer { __global__ void randInitGPU(int nx, int ny, curandState* rndstates) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) { curand_init(clock() + idx, idx, 0, &rndstates[idx]); } } __global__ void weightbiasTruncInitGPU(int hid, int emb, float* d_Wh, float* d_Wx, float* d_b, curandState* rndstates) { // truncated uniform unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idh = iy * hid + emb + ix; //for Wh if (ix < hid && iy < hid) { d_Wh[iy * hid + ix] = curand_uniform(&rndstates[idh]) * 2.0E-3 - 1.0E-3; }//for Wx else if (ix < hid && iy < hid + emb) { d_Wx[(iy - hid) * hid + ix] = curand_uniform(&rndstates[idh]) * 2.0E-3 - 1.0E-3; }//for bin else if (ix < hid && iy == hid + emb) { d_b[ix] = 0.0f; } } __global__ void weightbiasGradInitGPU(int hid, int emb, float* d_Wh, float* d_Wx, float* d_b) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idh = iy * (hid + emb) + ix; //for Wh if (ix < hid && iy < hid) { d_Wh[iy * hid + ix] = 0.0f; }//for Wx else if (ix < hid && iy < hid + emb) { d_Wx[(iy - hid) * hid + ix] = 0.0f; }//for bin else if (ix < hid && iy == hid + emb) { d_b[ix] = 0.0f; } } __global__ void concatVecGPU(const int nx, const int ny, float* d_out, float* d_A, float* d_B){ unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < nx) { d_out[idx] = d_A[idx]; } else if (idx < nx + ny) { d_out[idx] = d_B[idx - nx]; } } __global__ void denseweightbiasTruncInitGPU(int nx, int ny, float* d_W, float* d_b, curandState* rndstates, int layerId) {// truncated uniform unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (layerId == 1) {//for dense //for W if (ix < nx - 1 && iy < ny) { d_W[iy * (nx - 1) + ix] = curand_uniform(&rndstates[idx]) * 2.0E-3 - 1.0E-3; }//for b else if (ix == nx - 1 && iy < ny) { d_b[iy] = 0.0f; } } else {//for embed if (ix < nx && iy < ny) d_W[idx] = curand_uniform(&rndstates[idx]) * 2.0E-3 - 1.0E-3; } } __global__ void embedweightbiasTruncInitGPU(int nx, int ny, float* d_W, curandState* rndstates) {// truncated uniform unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) d_W[idx] = curand_uniform(&rndstates[idx]) * 2.0E-3 - 1.0E-3; } __global__ void denseweightbiasGradInitGPU(int nx, int ny, float* d_W, float* d_b, curandState* rndstates, int layerId) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (layerId == 1) {//for dense //for W if (ix < nx - 1 && iy < ny) { d_W[iy * (nx - 1) + ix] = 0.0f; }//for b else if (ix == nx - 1 && iy < ny) { d_b[iy] = 0.0f; } } else {//for dense if (ix < nx && iy < ny) d_W[idx] = 0.0f; } } __global__ void embedCalGradGPU(float* W, float* delta, int textCode, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx) { W[ix * ny + textCode] = delta[ix]; } } float* BasicLayer::embedCalGrad(float* W, float* delta, int textCode, int nx, int ny) { int n = nx * ny; float* out = new float[n]; float* d_W, * d_delta; CHECK(cudaMalloc((void**)& d_W, n * sizeof(float))); CHECK(cudaMalloc((void**)& d_delta, nx * sizeof(float))); CHECK(cudaMemcpy(d_W, W, n * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_delta, delta, nx * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x); embedCalGradGPU << <grid, block >> > (d_W, d_delta, textCode, nx, ny); //sync cudaDeviceSynchronize(); CHECK(cudaMemcpy(out, d_W, n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_W); cudaFree(d_delta); return out; } void BasicLayer::randInit(int m ,int n) { cudaMalloc((void**)& rndstates, m * n * sizeof(curandState)); dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); randInitGPU << <grid,block >> > (n, m, rndstates); } void BasicLayer::weightbiasTruncInit(float* Wh, float* Wx, float* b, const int Whlen, const int Wxlen, const int blen) { int m = hiddenStates + embedSize + 1; int n = blen; randInit(m, n); float* d_Wh, * d_Wx, * d_b; //malloc device memory CHECK(cudaMalloc((void**)& d_Wh, Whlen * sizeof(float))); CHECK(cudaMalloc((void**)& d_Wx, Wxlen * sizeof(float))); CHECK(cudaMalloc((void**)& d_b, blen * sizeof(float))); //transfer data from host to device CHECK(cudaMemcpy(d_Wh, Wh, Whlen * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_Wx, Wx, Wxlen * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_b, b, blen * sizeof(float), cudaMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); weightbiasTruncInitGPU << <grid, block >> > (hiddenStates, embedSize, d_Wh, d_Wx, d_b, rndstates); //transfer data from device to host CHECK(cudaMemcpy(Wh, d_Wh, Whlen * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(Wx, d_Wx, Wxlen * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(b, d_b, blen * sizeof(float), cudaMemcpyDeviceToHost)); //free device memory CHECK(cudaFree(d_Wh)); CHECK(cudaFree(d_Wx)); CHECK(cudaFree(d_b)); } void BasicLayer::weightbiasGradInit(float* Wh, float* Wx, float* b, const int Whlen, const int Wxlen, const int blen) { float* d_Wh, * d_Wx, * d_b; //malloc device memory CHECK(cudaMalloc((void**)& d_Wh, Whlen * sizeof(float))); CHECK(cudaMalloc((void**)& d_Wx, Wxlen * sizeof(float))); CHECK(cudaMalloc((void**)& d_b, blen * sizeof(float))); //Init as 0; int m = hiddenStates + embedSize + 1; int n = blen; dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); weightbiasGradInitGPU << <grid, block >> > (hiddenStates, embedSize, d_Wh, d_Wx, d_b); //transfer data from device to host CHECK(cudaMemcpy(Wh, d_Wh, Whlen * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(Wx, d_Wx, Wxlen * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(b, d_b, blen * sizeof(float), cudaMemcpyDeviceToHost)); //free device memory CHECK(cudaFree(d_Wh)); CHECK(cudaFree(d_Wx)); CHECK(cudaFree(d_b)); } float* BasicLayer::concatVec(float* vecA, float* vecB, const int alen, const int blen) { const int outlen = alen + blen; float* out = new float[outlen]; float* d_A, * d_B, * d_out; //malloc device memory CHECK(cudaMalloc((void**)& d_A, alen * sizeof(float))); CHECK(cudaMalloc((void**)& d_B, blen * sizeof(float))); CHECK(cudaMalloc((void**)& d_out, outlen * sizeof(float))); //transfer data from host to device CHECK(cudaMemcpy(d_A, vecA, alen * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, vecB, blen * sizeof(float), cudaMemcpyHostToDevice)); //Invoke kernel dim3 block(BLOCK_SIZE); dim3 grid((outlen + block.x - 1) / block.x); concatVecGPU << <grid, block >> > (alen, blen, d_out, d_A, d_B); //transfer data from device to host CHECK(cudaMemcpy(out, d_out, outlen * sizeof(float), cudaMemcpyDeviceToHost)); //free device memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_B)); CHECK(cudaFree(d_out)); return out; } void BasicLayer::showVar() const { for (int i = 0; i < 4; i++) { std::cout << allVar[i]; } std::cout << std::endl; } void BasicLayer::showConcat(float* vec, const int len) const { for (int i = 0; i < len; i++) { std::cout << vec[i] << " "; } std::cout << std::endl; } void BasicLayer::denseweightbiasTruncInit(float* W, float* b, const int Wlen, const int blen) { int m = categories; int n = hiddenStates + 1; randInit(m, n); float* d_W, * d_b; //malloc device memory CHECK(cudaMalloc((void**)& d_W, Wlen * sizeof(float))); CHECK(cudaMalloc((void**)& d_b, blen * sizeof(float))); //transfer data from host to device CHECK(cudaMemcpy(d_W, W, Wlen * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_b, b, blen * sizeof(float), cudaMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); denseweightbiasTruncInitGPU << <grid, block >> > (n, m, d_W, d_b, rndstates, 1); //transfer data from device to host CHECK(cudaMemcpy(W, d_W, Wlen * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(b, d_b, blen * sizeof(float), cudaMemcpyDeviceToHost)); //free device memory CHECK(cudaFree(d_W)); CHECK(cudaFree(d_b)); } void BasicLayer::denseweightbiasGradInit(float* W, float* b, const int Wlen, const int blen) { int m = categories; int n = hiddenStates + 1; randInit(m, n); float* d_W, * d_b; //malloc device memory CHECK(cudaMalloc((void**)& d_W, Wlen * sizeof(float))); CHECK(cudaMalloc((void**)& d_b, blen * sizeof(float))); //transfer data from host to device CHECK(cudaMemcpy(d_W, W, Wlen * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_b, b, blen * sizeof(float), cudaMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); denseweightbiasGradInitGPU << <grid, block >> > (n, m, d_W, d_b, rndstates, 1); //transfer data from device to host CHECK(cudaMemcpy(W, d_W, Wlen * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(b, d_b, blen * sizeof(float), cudaMemcpyDeviceToHost)); //free device memory CHECK(cudaFree(d_W)); CHECK(cudaFree(d_b)); } void BasicLayer::embedweightTruncInit(float* W, const int Wlen) { int m = embedSize; int n = Wlen / m; randInit(m, n); float* d_W; //malloc device memory CHECK(cudaMalloc((void**)& d_W, Wlen * sizeof(float))); //transfer data from host to device CHECK(cudaMemcpy(d_W, W, Wlen * sizeof(float), cudaMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); embedweightbiasTruncInitGPU << <grid, block >> > (n, m, d_W, rndstates); //transfer data from device to host CHECK(cudaMemcpy(W, d_W, Wlen * sizeof(float), cudaMemcpyDeviceToHost)); //free device memory CHECK(cudaFree(d_W)); } void BasicLayer::embedweightGradInit(float* W, const int Wlen) { int m = embedSize; int n = Wlen / m; float* d_W; //malloc device memory CHECK(cudaMalloc((void**)& d_W, Wlen * sizeof(float))); //transfer data from host to device CHECK(cudaMemcpy(d_W, W, Wlen * sizeof(float), cudaMemcpyHostToDevice)); //randomlize weights and bias dim3 block(BLOCK_SIZE, 1); dim3 grid((n + block.x - 1) / block.x, m); denseweightbiasGradInitGPU << <grid, block >> > (n, m, d_W, d_W, rndstates, 0); //transfer data from device to host CHECK(cudaMemcpy(W, d_W, Wlen * sizeof(float), cudaMemcpyDeviceToHost)); //free device memory CHECK(cudaFree(d_W)); } }
66a07679fc3ed0cd4521ed7f43ab03f80a44c2ef.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <chrono> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... // TODO: Part 1 - Caching first bounce intersections static ShadeableIntersection * dev_first_intersections = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need // TODO: Part 1 - Caching first bounce intersections #if CACHING hipMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection)); #endif checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_materials); hipFree(dev_intersections); // clean up any extra device memory you created // TODO: Part 1 - Cache first bounce intersections #if CACHING hipFree(dev_first_intersections); #endif checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // TODO: Part 2 - implement antialiasing by jittering the ray float xOffset = 0.0f; float yOffset = 0.0f; #if !CACHING // use random number generator to add offset to x and y thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, traceDepth); thrust::uniform_real_distribution<float> u01(0, 1); xOffset = u01(rng); yOffset = u01(rng); #endif segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)(x + xOffset) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)(y + yOffset) - (float)cam.resolution.y * 0.5f) ); segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth, int num_paths, PathSegment * pathSegments, Geom * geoms, int geoms_size, ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? else if (geom.type == CSG1) { t = csg1IntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == CSG2) { t = csg2IntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , int depth ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // not needed if using compact, needed if not using compact // without compact, you don't know which rays are finished #if !COMPACT if (pathSegments[idx].remainingBounces == 0) return; #endif if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { // float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); // pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; // pathSegments[idx].color *= u01(rng); // apply some noise because why not // TODO: Part 1 - Shading kernel with BSDF evaluation glm::vec3 intersectionPoint = getPointOnRay(pathSegments[idx].ray, intersection.t); scatterRay(pathSegments[idx], intersectionPoint, intersection.surfaceNormal, material, rng); pathSegments[idx].remainingBounces--; } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } // TODO: Part 1 - Stream Compaction // A predicate used for thrust::partition function to tell how to partition struct streamCompactPredicate { streamCompactPredicate() {}; __host__ __device__ bool operator()(const PathSegment &s) { return (s.remainingBounces > 0); } }; // TODO: Part 1 - Sorting rays, pathSegments, intersections // A predicate used for thrust::partition function to tell how to partition struct sortPredicate { sortPredicate() {}; __host__ __device__ bool operator()(const ShadeableIntersection &i1, const ShadeableIntersection &i2) { return (i1.materialId < i2.materialId); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; #if TIMING using time_point_t = std::chrono::high_resolution_clock::time_point; time_point_t startTime = std::chrono::high_resolution_clock::now(); #endif // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if CACHING if ((iter == 1 && depth == 0) || depth > 0) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections ); checkCUDAError("trace one bounce"); } // TODO: Part 1 - Caching first bounce intersections // iteration is the sample number of the pixel // iteration == 1 means the first ray shot out of pixel // depth is the the bounce number if (iter == 1 && depth == 0) { // store computed intersection into cache hipMemcpy(dev_first_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } // TODO: Part 1 - Caching first bounce intersection // if ray is not the first ray shot out of pixel, and // is the first bounce, use the saved cache else if (iter > 1 && depth == 0) { hipMemcpy(dev_intersections, dev_first_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } hipDeviceSynchronize(); depth++; // NO CACHING #else // tracing hipLaunchKernelGGL(( computeIntersections), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections ); checkCUDAError("trace one bounce"); hipDeviceSynchronize(); depth++; #endif // TODO: Part 1 - Sorting rays, pathSegments, intersections #if SORTING #if SORTTIMING using time_point_s = std::chrono::high_resolution_clock::time_point; time_point_s startTime2 = std::chrono::high_resolution_clock::now(); #endif thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, sortPredicate()); #if SORTTIMING hipDeviceSynchronize(); time_point_s endTime2 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> dur2 = endTime2 - startTime2; float elapsedTime2 = static_cast<decltype(elapsedTime2)>(dur2.count()); std::cout << "sorting time: " << elapsedTime2 << " milliseconds" << std::endl; #endif #endif // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths, dev_intersections, dev_paths, dev_materials, depth ); #if COMPACT // TODO: Part 1 - Stream Compaction PathSegment* endSegment = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, streamCompactPredicate()); num_paths = endSegment - dev_paths; iterationComplete = (depth >= traceDepth) || num_paths <= 0; // TODO: iterationComplete should be based off stream compaction results. #else iterationComplete = (depth >= traceDepth); #endif } #if TIMING hipDeviceSynchronize(); time_point_t endTime = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> dur = endTime - startTime; float elapsedTime = static_cast<decltype(elapsedTime)>(dur.count()); std::cout << "elapsed time: " << elapsedTime << " milliseconds" << std::endl; #endif // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> > (pixelcount, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
66a07679fc3ed0cd4521ed7f43ab03f80a44c2ef.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <chrono> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc // ... // TODO: Part 1 - Caching first bounce intersections static ShadeableIntersection * dev_first_intersections = NULL; void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need // TODO: Part 1 - Caching first bounce intersections #if CACHING cudaMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection)); #endif checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_materials); cudaFree(dev_intersections); // clean up any extra device memory you created // TODO: Part 1 - Cache first bounce intersections #if CACHING cudaFree(dev_first_intersections); #endif checkCUDAError("pathtraceFree"); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // TODO: Part 2 - implement antialiasing by jittering the ray float xOffset = 0.0f; float yOffset = 0.0f; #if !CACHING // use random number generator to add offset to x and y thrust::default_random_engine rng = makeSeededRandomEngine(iter, index, traceDepth); thrust::uniform_real_distribution<float> u01(0, 1); xOffset = u01(rng); yOffset = u01(rng); #endif segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)(x + xOffset) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)(y + yOffset) - (float)cam.resolution.y * 0.5f) ); segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth, int num_paths, PathSegment * pathSegments, Geom * geoms, int geoms_size, ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? else if (geom.type == CSG1) { t = csg1IntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == CSG2) { t = csg2IntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , int depth ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // not needed if using compact, needed if not using compact // without compact, you don't know which rays are finished #if !COMPACT if (pathSegments[idx].remainingBounces == 0) return; #endif if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, depth); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = 0; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { // float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); // pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; // pathSegments[idx].color *= u01(rng); // apply some noise because why not // TODO: Part 1 - Shading kernel with BSDF evaluation glm::vec3 intersectionPoint = getPointOnRay(pathSegments[idx].ray, intersection.t); scatterRay(pathSegments[idx], intersectionPoint, intersection.surfaceNormal, material, rng); pathSegments[idx].remainingBounces--; } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } // TODO: Part 1 - Stream Compaction // A predicate used for thrust::partition function to tell how to partition struct streamCompactPredicate { streamCompactPredicate() {}; __host__ __device__ bool operator()(const PathSegment &s) { return (s.remainingBounces > 0); } }; // TODO: Part 1 - Sorting rays, pathSegments, intersections // A predicate used for thrust::partition function to tell how to partition struct sortPredicate { sortPredicate() {}; __host__ __device__ bool operator()(const ShadeableIntersection &i1, const ShadeableIntersection &i2) { return (i1.materialId < i2.materialId); } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> > (cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; #if TIMING using time_point_t = std::chrono::high_resolution_clock::time_point; time_point_t startTime = std::chrono::high_resolution_clock::now(); #endif // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if CACHING if ((iter == 1 && depth == 0) || depth > 0) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections ); checkCUDAError("trace one bounce"); } // TODO: Part 1 - Caching first bounce intersections // iteration is the sample number of the pixel // iteration == 1 means the first ray shot out of pixel // depth is the the bounce number if (iter == 1 && depth == 0) { // store computed intersection into cache cudaMemcpy(dev_first_intersections, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } // TODO: Part 1 - Caching first bounce intersection // if ray is not the first ray shot out of pixel, and // is the first bounce, use the saved cache else if (iter > 1 && depth == 0) { cudaMemcpy(dev_intersections, dev_first_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } cudaDeviceSynchronize(); depth++; // NO CACHING #else // tracing computeIntersections<<<numblocksPathSegmentTracing, blockSize1d>>> ( depth, num_paths, dev_paths, dev_geoms, hst_scene->geoms.size(), dev_intersections ); checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); depth++; #endif // TODO: Part 1 - Sorting rays, pathSegments, intersections #if SORTING #if SORTTIMING using time_point_s = std::chrono::high_resolution_clock::time_point; time_point_s startTime2 = std::chrono::high_resolution_clock::now(); #endif thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths, dev_paths, sortPredicate()); #if SORTTIMING cudaDeviceSynchronize(); time_point_s endTime2 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> dur2 = endTime2 - startTime2; float elapsedTime2 = static_cast<decltype(elapsedTime2)>(dur2.count()); std::cout << "sorting time: " << elapsedTime2 << " milliseconds" << std::endl; #endif #endif // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths, dev_intersections, dev_paths, dev_materials, depth ); #if COMPACT // TODO: Part 1 - Stream Compaction PathSegment* endSegment = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, streamCompactPredicate()); num_paths = endSegment - dev_paths; iterationComplete = (depth >= traceDepth) || num_paths <= 0; // TODO: iterationComplete should be based off stream compaction results. #else iterationComplete = (depth >= traceDepth); #endif } #if TIMING cudaDeviceSynchronize(); time_point_t endTime = std::chrono::high_resolution_clock::now(); std::chrono::duration<double, std::milli> dur = endTime - startTime; float elapsedTime = static_cast<decltype(elapsedTime)>(dur.count()); std::cout << "elapsed time: " << elapsedTime << " milliseconds" << std::endl; #endif // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> > (pixelcount, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> > (pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
2d6d9cd30cc1e9905fe77a2d97e256c00140cb1e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #define size 4 using namespace std; __global__ void add(int *x,int *y,int *c){ const int tid = threadIdx.x+blockIdx.x*blockDim.x; if(tid<size){ c[tid] = x[tid] + y[tid]; } } __global__ void multiplyVectorAndMatrix(int *p, int *q, int *r){ const int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid<size){ for(int i=0;i<size;i++){ r[tid] += p[(tid*size)+i] * q[i]; } } } __global__ void matrixMultiplication(int *g, int *h, int *ii){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if((row<size) && (col<size)){ for(int i=0;i<size;i++){ sum += g[(row*size)+i] * h[(i*size)+col]; } __syncthreads(); ii[(row*size)+col] = sum; } } int main(){ //ADDITION OF TWO VECTORS int x[size],y[size],z[size]; for(int i=0;i<size;i++){ x[i] = rand()%100+1; y[i] = rand()%50+1; z[i] = 0; } cout<<"1st Vector: "; for(int i=0;i<size;i++){ cout<<x[i]<<" "; } cout<<endl<<"2nd Vector: "; for(int i=0;i<size;i++){ cout<<y[i]<<" "; } cout<<endl; int byte_size = size*sizeof(int); cout<<"Addition using CPU: "; for(int i=0;i<size;i++){ cout<<x[i]+y[i]<<" "; } cout<<endl; cout<<"Addition using GPU: "; int *a,*b,*c; hipMalloc(&a,byte_size); hipMemcpy(a,x,byte_size,hipMemcpyHostToDevice); hipMalloc(&b,byte_size); hipMemcpy(b,y,byte_size,hipMemcpyHostToDevice); hipMalloc(&c,byte_size); hipMemcpy(c,z,byte_size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( add), dim3(2),dim3(size), 0, 0, a,b,c); hipMemcpy(&z,c,byte_size,hipMemcpyDeviceToHost); for(int i=0;i<size;i++){ cout<<z[i]<<" "; } cout<<endl; //MULTIPLICATION: MATRIX AND VECTOR int m[size][size],n[size],o[size]; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ m[i][j] = rand()%10+1; } n[i] = rand()%10+1; o[i] = 0; } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<m[i][j]<<" "; } cout<<endl; } cout<<endl<<"Vector: "; for(int i=0;i<size;i++){ cout<<n[i]<<" "; } cout<<endl<<endl; size_t matrix_size = size*size*sizeof(int); size_t vector_size = size*sizeof(int); cout<<"Multiplication using CPU: "; for(int i=0;i<size;i++){ o[i] = 0; for(int j=0;j<size;j++){ o[i]+=m[i][j]*n[j]; } } for(int i=0;i<size;i++){ cout<<o[i]<<" "; o[i] = 0; } cout<<endl; cout<<"Multiplication using GPU: "; int *p,*q,*r; hipMalloc(&p,matrix_size); hipMemcpy(p,m,matrix_size,hipMemcpyHostToDevice); hipMalloc(&q,vector_size); hipMemcpy(q,n,vector_size,hipMemcpyHostToDevice); hipMalloc(&r,vector_size); hipMemcpy(r,o,vector_size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( multiplyVectorAndMatrix), dim3(2),dim3(size/2), 0, 0, p,q,r); hipMemcpy(&o,r,vector_size,hipMemcpyDeviceToHost); for(int i=0;i<size;i++){ cout<<o[i]<<" "; } cout<<endl; //Matrix Multiplication int d[size][size],e[size][size],f[size][size]; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ d[i][j] = rand()%10+1; e[i][j] = rand()%10+1; } } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<d[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<e[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<"Multiplication using CPU:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ f[i][j] = 0; for(int k=0;k<size;k++){ f[i][j] += d[i][k] * e[k][j]; } } } for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<f[i][j]<<" "; f[i][j] = 0; } cout<<endl; } cout<<endl; cout<<"Multiplication using GPU:"<<endl; int *g,*h,*ii; hipMalloc(&g,matrix_size); hipMemcpy(g,d,matrix_size,hipMemcpyHostToDevice); hipMalloc(&h,matrix_size); hipMemcpy(h,e,matrix_size,hipMemcpyHostToDevice); hipMalloc(&ii,matrix_size); hipMemcpy(ii,f,matrix_size,hipMemcpyHostToDevice); dim3 threadsPerblock(size,size); dim3 blocksPerGrid(1,1); if(size*size>512) { threadsPerblock.x = 512; threadsPerblock.y=512; blocksPerGrid.x = ceil(double(size)/double(threadsPerblock.x)); blocksPerGrid.y = ceil(double(size)/double(threadsPerblock.y)); } hipLaunchKernelGGL(( matrixMultiplication), dim3(blocksPerGrid),dim3(threadsPerblock), 0, 0, g,h,ii); hipMemcpy(&f,ii,matrix_size,hipMemcpyDeviceToHost); for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<f[i][j]<<" "; } cout<<endl; } cout<<endl; }
2d6d9cd30cc1e9905fe77a2d97e256c00140cb1e.cu
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> #define size 4 using namespace std; __global__ void add(int *x,int *y,int *c){ const int tid = threadIdx.x+blockIdx.x*blockDim.x; if(tid<size){ c[tid] = x[tid] + y[tid]; } } __global__ void multiplyVectorAndMatrix(int *p, int *q, int *r){ const int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid<size){ for(int i=0;i<size;i++){ r[tid] += p[(tid*size)+i] * q[i]; } } } __global__ void matrixMultiplication(int *g, int *h, int *ii){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if((row<size) && (col<size)){ for(int i=0;i<size;i++){ sum += g[(row*size)+i] * h[(i*size)+col]; } __syncthreads(); ii[(row*size)+col] = sum; } } int main(){ //ADDITION OF TWO VECTORS int x[size],y[size],z[size]; for(int i=0;i<size;i++){ x[i] = rand()%100+1; y[i] = rand()%50+1; z[i] = 0; } cout<<"1st Vector: "; for(int i=0;i<size;i++){ cout<<x[i]<<" "; } cout<<endl<<"2nd Vector: "; for(int i=0;i<size;i++){ cout<<y[i]<<" "; } cout<<endl; int byte_size = size*sizeof(int); cout<<"Addition using CPU: "; for(int i=0;i<size;i++){ cout<<x[i]+y[i]<<" "; } cout<<endl; cout<<"Addition using GPU: "; int *a,*b,*c; cudaMalloc(&a,byte_size); cudaMemcpy(a,x,byte_size,cudaMemcpyHostToDevice); cudaMalloc(&b,byte_size); cudaMemcpy(b,y,byte_size,cudaMemcpyHostToDevice); cudaMalloc(&c,byte_size); cudaMemcpy(c,z,byte_size,cudaMemcpyHostToDevice); add<<<2,size>>>(a,b,c); cudaMemcpy(&z,c,byte_size,cudaMemcpyDeviceToHost); for(int i=0;i<size;i++){ cout<<z[i]<<" "; } cout<<endl; //MULTIPLICATION: MATRIX AND VECTOR int m[size][size],n[size],o[size]; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ m[i][j] = rand()%10+1; } n[i] = rand()%10+1; o[i] = 0; } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<m[i][j]<<" "; } cout<<endl; } cout<<endl<<"Vector: "; for(int i=0;i<size;i++){ cout<<n[i]<<" "; } cout<<endl<<endl; size_t matrix_size = size*size*sizeof(int); size_t vector_size = size*sizeof(int); cout<<"Multiplication using CPU: "; for(int i=0;i<size;i++){ o[i] = 0; for(int j=0;j<size;j++){ o[i]+=m[i][j]*n[j]; } } for(int i=0;i<size;i++){ cout<<o[i]<<" "; o[i] = 0; } cout<<endl; cout<<"Multiplication using GPU: "; int *p,*q,*r; cudaMalloc(&p,matrix_size); cudaMemcpy(p,m,matrix_size,cudaMemcpyHostToDevice); cudaMalloc(&q,vector_size); cudaMemcpy(q,n,vector_size,cudaMemcpyHostToDevice); cudaMalloc(&r,vector_size); cudaMemcpy(r,o,vector_size,cudaMemcpyHostToDevice); multiplyVectorAndMatrix<<<2,size/2>>>(p,q,r); cudaMemcpy(&o,r,vector_size,cudaMemcpyDeviceToHost); for(int i=0;i<size;i++){ cout<<o[i]<<" "; } cout<<endl; //Matrix Multiplication int d[size][size],e[size][size],f[size][size]; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ d[i][j] = rand()%10+1; e[i][j] = rand()%10+1; } } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<d[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<"Matrix:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<e[i][j]<<" "; } cout<<endl; } cout<<endl; cout<<"Multiplication using CPU:"<<endl; for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ f[i][j] = 0; for(int k=0;k<size;k++){ f[i][j] += d[i][k] * e[k][j]; } } } for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<f[i][j]<<" "; f[i][j] = 0; } cout<<endl; } cout<<endl; cout<<"Multiplication using GPU:"<<endl; int *g,*h,*ii; cudaMalloc(&g,matrix_size); cudaMemcpy(g,d,matrix_size,cudaMemcpyHostToDevice); cudaMalloc(&h,matrix_size); cudaMemcpy(h,e,matrix_size,cudaMemcpyHostToDevice); cudaMalloc(&ii,matrix_size); cudaMemcpy(ii,f,matrix_size,cudaMemcpyHostToDevice); dim3 threadsPerblock(size,size); dim3 blocksPerGrid(1,1); if(size*size>512) { threadsPerblock.x = 512; threadsPerblock.y=512; blocksPerGrid.x = ceil(double(size)/double(threadsPerblock.x)); blocksPerGrid.y = ceil(double(size)/double(threadsPerblock.y)); } matrixMultiplication<<<blocksPerGrid,threadsPerblock>>>(g,h,ii); cudaMemcpy(&f,ii,matrix_size,cudaMemcpyDeviceToHost); for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ cout<<f[i][j]<<" "; } cout<<endl; } cout<<endl; }
f56614fddeed8dbbce0a104421c11374011b5e63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/adagrad_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace { template <typename T, int block_size> __global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows, T* grad_merge, const int64_t* grad_merge_rows, size_t grad_merge_rows_size, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ size_t grad_merge_idx; if (tid == 0) { for (size_t i = 0; i < grad_merge_rows_size; i++) { if (grad_rows[ty] == grad_merge_rows[i]) { grad_merge_idx = i; } } } __syncthreads(); grad += ty * row_numel; grad_merge += grad_merge_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]); } } template <typename T, int block_size> __global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows, const T* learning_rate, T* param, T* moment, int64_t row_numel, T epsilon) { const int ty = blockIdx.y; int tid = threadIdx.x; grad += ty * row_numel; param += rows[ty] * row_numel; moment += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(param + index, -1.0 * learning_rate[0] * grad[index] / (sqrt(moment[index]) + epsilon)); } } } // namespace template <typename T> struct SparseAdagradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) auto grad_width = grad.value().dims()[1]; math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data<T>(); framework::Vector<int64_t> merge_rows(grad_merge.rows()); // 2. m += g_m * g_m auto grad_square = SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge); math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor; functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data<T>(); auto* param_data = param->data<T>(); auto* moment_data = moment->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid2(1, merge_rows.size()); hipLaunchKernelGGL(( SparseAdagradFunctorKernel< T, 256>), dim3(grid2), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr, param_data, moment_data, grad_width, epsilon); } }; template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>; template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
f56614fddeed8dbbce0a104421c11374011b5e63.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/adagrad_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace { template <typename T, int block_size> __global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows, T* grad_merge, const int64_t* grad_merge_rows, size_t grad_merge_rows_size, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ size_t grad_merge_idx; if (tid == 0) { for (size_t i = 0; i < grad_merge_rows_size; i++) { if (grad_rows[ty] == grad_merge_rows[i]) { grad_merge_idx = i; } } } __syncthreads(); grad += ty * row_numel; grad_merge += grad_merge_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]); } } template <typename T, int block_size> __global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows, const T* learning_rate, T* param, T* moment, int64_t row_numel, T epsilon) { const int ty = blockIdx.y; int tid = threadIdx.x; grad += ty * row_numel; param += rows[ty] * row_numel; moment += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(param + index, -1.0 * learning_rate[0] * grad[index] / (sqrt(moment[index]) + epsilon)); } } } // namespace template <typename T> struct SparseAdagradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) auto grad_width = grad.value().dims()[1]; math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data<T>(); framework::Vector<int64_t> merge_rows(grad_merge.rows()); // 2. m += g_m * g_m auto grad_square = SquareSelectedRows<platform::CUDADeviceContext, T>(context, grad_merge); math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor; functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data<T>(); auto* param_data = param->data<T>(); auto* moment_data = moment->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid2(1, merge_rows.size()); SparseAdagradFunctorKernel< T, 256><<<grid2, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr, param_data, moment_data, grad_width, epsilon); } }; template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>; template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
f8ca1636e76b97f62de41635b7be7465fdb285f7.hip
// !!! This is a file automatically generated by hipify!!! #define CUDPP_STATIC_LIB #include <algorithm> #include "graph.h" //#include <cutil_inline.h> #include "/Volumes/Macintosh HD/Developer/NVIDIA/CUDA-7.5/samples/common/inc/helper_cuda.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "utils.h" #include "cudpp.h" #include "component.h" #ifdef EULER_NDEBUG #define DEBUG_EULER_CU(x) #else #define DEBUG_EULER_CU(x) x #endif //#define DEBUG_EULER_CU(x) x #define DEBUG_CALL(x) DEBUG_EULER_CU(x) void printSuccessorGraph(Vertex * d_v , unsigned int length){ Vertex * h_v =NULL; h_v=(Vertex * ) malloc(length* sizeof(Vertex)); checkCudaErrors(hipMemcpy(h_v,d_v,length*sizeof(Vertex),hipMemcpyDeviceToHost)); printf("$graph G {\n"); for (unsigned int i =0; i< length; i++){ if(h_v[i].n1 < length) printf("$\t%u -- %u\n",h_v[i].vid, h_v[i].n1); if(h_v[i].n2 < length) printf("$\t%u -- %u\n",h_v[i].vid, h_v[i].n2); } printf("$}\n"); free(h_v); } void printCircuitGraph(CircuitEdge * d_ce , unsigned int length){ CircuitEdge * h_ce =NULL; h_ce=(CircuitEdge * ) malloc(length* sizeof(CircuitEdge)); checkCudaErrors(hipMemcpy(h_ce,d_ce,length*sizeof(CircuitEdge),hipMemcpyDeviceToHost)); printf("$graph G {\n"); for (unsigned int i =0; i< length; i++){ printf("$\t%u -- %u [ label= e1:%u-e2:%u ]\n",h_ce[i].c1,h_ce[i].c2,h_ce[i].e1,h_ce[i].e2); } printf("$}\n"); free(h_ce); } /*** Assig Successor**/ __global__ void assignSuccessor(EulerVertex * ev,unsigned int * l, unsigned int * e, unsigned vcount, EulerEdge * ee ,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int eidx=0; if(tid<vcount){ while(eidx<ev[tid].ecount && eidx<ev[tid].lcount){ ee[e[ev[tid].ep+eidx]].s=l[ev[tid].lp+eidx] ; eidx++; } } } void validateSuccessors(EulerEdge * d_ee, unsigned int ecount) { EulerEdge * h_ee; h_ee= (EulerEdge * ) malloc( sizeof(EulerEdge) *ecount); checkCudaErrors(hipMemcpy(h_ee,d_ee,ecount * sizeof(EulerEdge),hipMemcpyDeviceToHost)); unsigned int snot=0; for(unsigned int i =0;i< ecount; i++){ if( h_ee[i].s==ecount) {snot++;} } printf("total edges with succesors not set :%u\n",snot); free(h_ee); } /** Constuct Succesor Graph**/ //Redundant __global__ void constructSuccessorGraphP1(EulerEdge* e, Vertex * v, unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<ecount){ v[tid].n1=ecount;v[tid].n2=ecount;//v[tid].n3=ecount;v[tid].n4=ecount; v[tid].vid=e[tid].eid; v[tid].n1=e[tid].s; } } __global__ void constructSuccessorGraphP2(EulerEdge* e, Vertex * v, unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<ecount){ /* if(v[v[tid].n1].n1 < ecount){ v[v[tid].n1].n2=v[tid].vid; }else{ v[v[tid].n1].n1=v[tid].vid; }*/ if(v[tid].n1 <ecount ){ v[v[tid].n1].n2=v[tid].vid; } } } /*** Calculate Circuit Graph Vertex ***/ __global__ void calculateCircuitGraphVertexData( unsigned int * D,unsigned int * C,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if( tid <ecount) { unsigned int c=D[tid]; atomicExch(C+c,1); } } /*** construct circuit graph vertex **/ __global__ void constructCircuitGraphVertex(unsigned int * C,unsigned int * offset,unsigned int ecount, unsigned int * cv, unsigned int cvCount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid < ecount){ if(C[tid]!=0){ cv[offset[tid]]=tid; } } } /*** Calculate Circuit Graph Edges***/ __global__ void calculateCircuitGraphEdgeData(EulerVertex* v,unsigned int * e,unsigned vCount,unsigned int * D,unsigned int * map,unsigned int ecount, unsigned int * cedgeCount/*, unsigned int cvCount*/){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int index=0; unsigned int maxIndex=0; index=0; maxIndex=0; if(tid<vCount && v[tid].ecount>0 ){ index=v[tid].ep; maxIndex=index+v[tid].ecount-1; while (index < maxIndex ){ unsigned int c1=map[D[e[index]]]; unsigned int c2=map[D[e[index+1]]]; if( c1 !=c2){ unsigned int c=min(c1,c2); atomicInc(cedgeCount+c,ecount); } index++; } } } __global__ void assignCircuitGraphEdgeData(EulerVertex* v, unsigned int * e, unsigned vCount, unsigned int * D, unsigned int * map, unsigned int ecount, unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int cvCount, CircuitEdge * cedge, unsigned int cecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int index=0; unsigned int maxIndex=0; if(tid<vCount && v[tid].ecount>0){ index=v[tid].ep; maxIndex=index+v[tid].ecount-1; while (index<maxIndex ){ unsigned int c1=map[D[e[index]]]; unsigned int c2=map[D[e[index+1]]]; if( c1 !=c2){ unsigned int c=min(c1,c2); unsigned int t=max(c1,c2); unsigned int i=atomicDec(cedgeCount+c,ecount); i=i-1; cedge[cedgeOffset[c]+i].c1=c; cedge[cedgeOffset[c]+i].c2=t; cedge[cedgeOffset[c]+i].e1=e[index]; cedge[cedgeOffset[c]+i].e2=e[index+1]; } index++; } } } /* __global__ void markSegments( unsigned short * d_mark, unsigned int circuitGraphEdgeCount, unsigned int * d_cg_edge_start, unsigned int * d_cedgeCount, unsigned int circuitVertexSize){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<circuitVertexSize){ d_mark[ d_cg_edge_start[tid]]=d_cedgeCount[tid]; } } */ /* __global__ void sortCircuit( unsigned int cedgeOffset, unsigned int cedgeCount, unsigned int circuitCount, CircuitEdge * cedge ){ unsigned int bid=0; unsigned int tid=0; unsigned int keyCount=0; unsigned int offset=cedgeOffset[bid]; unsigned int itemCount=circuitCount[bid]; unsigned int chunks=itemCount/256; unsigned int chunkSize=0; unsigned int chunkIdx=0; __shared__ unsigned int keys[256]; if(bid<circuitCount){ while(chunkIdx<Chunks){ if(tid< itemCount) keys[tid]=edge[offset+tid].e2; __syncthreads(); if(tid<itemCount){ for(int i=0;i<256;i++){ if(keys[tid]>keys[i]) keyCount++; } } __syncthreads(); CircuitEdge temp; if(tid<itemCount){ temp=cedge[tid]; } __syncthreads(); } } }*/ __device__ unsigned int getValue(CircuitEdge cedge, unsigned char radix){ switch(radix){ case 0: return cedge.e2; case 1: return cedge.e1; case 2: return cedge.c2; } return 0xFFFFFFFF; } /* __global__ void sortCircuitGraphEdgeData3( unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int circuitCount, CircuitEdge * cedge, unsigned short * mark, unsigned int edgeCount, unsigned char radix){ unsigned int chunks=blockDim.x; unsigned int chunkSize=cedgeCount[bid]/chunks; //fix off by 1 unsigned int offset=cedgeOffset[bid]+chunkSize*threadIdx.x; //now scan while(mark[offset]==0 && offset<cedgeCount[bid]) offset++; //__syncthreads(); //everyone looking at its own chunk and we have to sort (mark[Offset] sized data) unsigned int count=mark[Offset]; for(int i=0;i<count;i++){ minIndex=offset+i; minValue=getValue(cedge[minIndex],radix);// cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ unsigned int nextValue=getValue(cedge[j],radix); if( minValue > nextValue){ minIndex=j; minValue=nextValue; } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } mark[Offset]=0; offset+=count; //scan onemore time to count same unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int minIndex=0; unsigned int minValue=0; unsigned int i =0; unsigned int j=0; unsigned int count; unsigned int offset=0; if(tid<circuitCount){ count=cedgeCount[tid]; offset=cedgeOffset[tid]; for (i=0;i<count;i++){ minIndex=offset+i; minValue=getValue(cedge[minIndex],radix);// cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ unsigned int nextValue=getValue(cedge[j],radix); if( minValue > nextValue){ minIndex=j; minValue=nextValue; } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } } } */ __global__ void sortCircuitGraphEdgeData2( unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int circuitCount, CircuitEdge * cedge,unsigned char radix){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int minIndex=0; unsigned int minValue=0; unsigned int i =0; unsigned int j=0; unsigned int count; unsigned int offset=0; if(tid<circuitCount){ count=cedgeCount[tid]; offset=cedgeOffset[tid]; for (i=0;i<count;i++){ minIndex=offset+i; minValue=getValue(cedge[minIndex],radix);// cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ unsigned int nextValue=getValue(cedge[j],radix); if( minValue > nextValue){ minIndex=j; minValue=nextValue; } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } }/* } */ } __global__ void sortCircuitGraphEdgeData( unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int circuitCount, CircuitEdge * cedge){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int minIndex=0; unsigned int minValue=0; unsigned int i =0; unsigned int j=0; unsigned int count; unsigned int offset=0; if(tid<circuitCount){ count=cedgeCount[tid]; offset=cedgeOffset[tid]; for (i=0;i<count;i++){ minIndex=offset+i; minValue=cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ if(minValue > cedge[j].c2){ minIndex=j; minValue=cedge[j].c2; }else if( minValue == cedge[j].c2){ if( cedge[minIndex].e1> cedge[j].e1){ minIndex=j; minValue=cedge[j].c2; }else if(cedge[minIndex].e1 == cedge[j].e1){ if(cedge[minIndex].e2 > cedge[j].e2) { minIndex=j; minValue=cedge[j].c2; } } } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } }/* if(tid<vCount && v[tid].ecount>0){ index=v[tid].ep; maxIndex=index+v[tid].ecount-1; while (index<maxIndex ){ unsigned int c1=map[D[e[index]]]; unsigned int c2=map[D[e[index+1]]]; if( c1 !=c2){ unsigned int c=min(c1,c2); unsigned int t=max(c1,c2); unsigned int i=atomicDec(cedgeCount+c,ecount); i=i-1; cedge[cedgeOffset[c]+i].c1=c; cedge[cedgeOffset[c]+i].c2=t; cedge[cedgeOffset[c]+i].e1=e[index]; cedge[cedgeOffset[c]+i].e2=e[index+1]; } index++; } }*/ } __global__ void identifyContigStart( EulerEdge * e ,unsigned char * contigStart,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<ecount){ if(e[tid].s < ecount){ contigStart[e[tid].s]=0; //atomicExch(contigStart+e[tid].s,0); } } } __global__ void markSpanningEulerEdges(EulerEdge * ee, unsigned int * mark , unsigned int ecount,CircuitEdge * cg_edge,unsigned int cg_edgeCount,unsigned int * tree, unsigned int treeCount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid < treeCount) { /*if(tree[tid]==1)*/{ atomicExch(mark+min(cg_edge[tree[tid]].e1,cg_edge[tree[tid]].e2),1); // important: assumption if(mark[i]=1) means mark[i]and mark[i+1] are swipe //atomicExch(mark+cg_edge[tree[tid]].e2,1); } } } __global__ void executeSwipe(EulerVertex * ev,unsigned int * e, unsigned int vcount , EulerEdge * ee, unsigned int * mark,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int t; unsigned int index=0; unsigned int maxIndex; unsigned int s; if( tid< vcount){ index=ev[tid].ep; maxIndex=index+ev[tid].ecount-1; while( index<maxIndex){ if(mark[ee[e[index]].eid]==1){ t=index; s=ee[e[index]].s; while(mark[ee[e[index]].eid]==1 && index < maxIndex){ ee[e[index]].s=ee[e[index+1]].s; index=index+1; } if(t!=index){ ee[e[index]].s=s; } } index++; } } } void executeSwipeHost(EulerVertex * ev,unsigned int * e, unsigned int vcount , EulerEdge * ee, unsigned int * mark,unsigned int ecount, unsigned int tid){ // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int t; unsigned int index=0; unsigned int maxIndex; unsigned int s; if( tid< vcount){ index=ev[tid].ep; maxIndex=index+ev[tid].ecount-1; while( index<maxIndex){ if(mark[ee[e[index]].eid]==1){ t=index; s=ee[e[index]].s; while(mark[ee[e[index]].eid]==1 && index < maxIndex){ ee[e[index]].s=ee[e[index+1]].s; index=index+1; } if(t!=index){ ee[e[index]].s=s; } } index++; } } } void executeSwipeHostLaunch(EulerVertex * d_ev, unsigned int * d_e, unsigned int vcount, EulerEdge * d_ee, unsigned int * d_mark , unsigned int ecount){ EulerVertex * h_ev; unsigned int * h_e; EulerEdge * h_ee; unsigned int * h_mark; logMessage(LOG_LVL_DETAIL,"executeSwipeHostLaunch"); h_ev=(EulerVertex *)malloc(vcount*sizeof(EulerVertex)); h_e=(unsigned int *) malloc(vcount * sizeof(unsigned int )); h_ee =(EulerEdge *) malloc(ecount * sizeof(EulerEdge)); h_mark=(unsigned int *) malloc(ecount * sizeof(EulerEdge)); checkCudaErrors( hipMemcpy(h_ev,d_ev, vcount*sizeof(EulerVertex), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_e,d_e, vcount*sizeof(unsigned int), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_ee,d_ee, ecount*sizeof(EulerEdge), hipMemcpyDeviceToHost)); checkCudaErrors( hipMemcpy(h_mark,d_mark, ecount*sizeof(unsigned int), hipMemcpyDeviceToHost)); for(unsigned tid =0;tid<vcount;tid++){ executeSwipeHost(h_ev,h_e,vcount,h_ee,h_mark,ecount,tid); } free(h_ev); free(h_e); free(h_ee); free(h_mark); } extern "C" void markContigStart(EulerEdge * d_ee, unsigned char * d_contigStart, unsigned int ecount){ dim3 grid; dim3 block; hipMemset(d_contigStart,1,ecount); getOptimalLaunchConfiguration(ecount,&grid,&block); hipLaunchKernelGGL(( identifyContigStart), dim3(grid),dim3(block), 0, 0, d_ee,d_contigStart,ecount); hipDeviceSynchronize(); CheckCUDAError(); } extern "C" void executeSwipeDevice(EulerVertex * d_ev,unsigned int * d_e, unsigned int vcount, EulerEdge * d_ee, unsigned int ecount, CircuitEdge * d_cg_edge,unsigned int cg_edgeCount , unsigned int * d_tree,unsigned int treeCount){ dim3 grid ; dim3 block; unsigned int * d_mark; allocateMemory((void**) &d_mark, ecount* sizeof(unsigned int)); hipMemset(d_mark,1,ecount* sizeof(unsigned int)); getOptimalLaunchConfiguration(treeCount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: markSpanningEulerEdges"); hipLaunchKernelGGL(( markSpanningEulerEdges), dim3(grid),dim3(block), 0, 0, d_ee, d_mark , ecount,d_cg_edge,cg_edgeCount,d_tree, treeCount); hipDeviceSynchronize(); CheckCUDAError(); //DEBUG_CALL(executeSwipeHostLaunch(d_ev,d_e,vcount,d_ee,d_mark,ecount)); getOptimalLaunchConfiguration(vcount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: executeSwipe"); hipLaunchKernelGGL(( executeSwipe), dim3(grid),dim3(block), 0, 0, d_ev,d_e,vcount , d_ee, d_mark,ecount); hipDeviceSynchronize(); CheckCUDAError(); //printData(d_ev,vcount,d_ee,ecount); deallocateMemory(d_mark); } /**ok ! this is not something pleasent to the eyes :-\*/ inline bool edgeComp(CircuitEdge a, CircuitEdge b){ if(a.c1<b.c1) { return true;} else if(a.c1==b.c1){ if(a.c2<b.c2){ return true; } else if (a.c2==b.c2){ if(a.e1<b.e1) {return true;} else if(a.e1==b.e1){ if(a.e2<b.e2) {return true;} else return false; } else return false; } else return false; }else return false; } extern "C" void findEulerDevice(EulerVertex * d_ev,unsigned int * d_l, unsigned int * d_e, unsigned int vcount,EulerEdge * d_ee,unsigned int ecount,CircuitEdge ** d_cg_edge, unsigned int * cg_edgeCount,unsigned int * cg_vertexCount, unsigned int kmerLength){ Vertex * d_v=NULL; unsigned int * d_D; unsigned int * d_C; unsigned int * d_cg_offset; unsigned int * d_cedgeCount; unsigned int * d_cv; unsigned int * d_cg_edge_start; dim3 grid; dim3 block; allocateMemory((void**) &d_v, ecount* sizeof(Vertex)); //step 1: // assign sucessors getOptimalLaunchConfiguration(vcount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: assignSuccessor"); hipLaunchKernelGGL(( assignSuccessor), dim3(grid),dim3(block), 0, 0, d_ev,d_l,d_e,vcount,d_ee,ecount); hipDeviceSynchronize(); CheckCUDAError(); //validateSuccessors(d_ee,ecount); //printDebruijnGraph(d_ev,vcount,d_l,d_e,d_ee,ecount,kmerLength,0); //step 2 successor graph //constructSuccessorGraph P1 getOptimalLaunchConfiguration(ecount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: constructSuccessorGraph P1"); hipLaunchKernelGGL(( constructSuccessorGraphP1), dim3(grid),dim3(block), 0, 0, d_ee,d_v,ecount); hipDeviceSynchronize(); CheckCUDAError(); // printSuccessorGraph( d_v , ecount); /* synchronize */ logMessage(LOG_LVL_DETAIL,"kernel: constructSuccessorGraph P2"); hipLaunchKernelGGL(( constructSuccessorGraphP2), dim3(grid),dim3(block), 0, 0, d_ee,d_v,ecount); hipDeviceSynchronize(); CheckCUDAError(); //printSuccessorGraph( d_v , ecount); //step 3findComponent allocateMemory((void**) &d_D, ecount * sizeof(unsigned int)); findComponentDevice(d_v,&d_D,ecount); //step 4 circuit graph construction //step 4.a vertex calculation allocateMemory((void**) &d_C, ecount * sizeof(unsigned int)); getOptimalLaunchConfiguration(ecount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: calculateCircuitGraphVertexData"); hipLaunchKernelGGL(( calculateCircuitGraphVertexData), dim3(grid),dim3(block), 0, 0, d_D,d_C,ecount); hipDeviceSynchronize(); CheckCUDAError(); //printData(d_C,ecount); //step 4.b offset calculation .find prefix sum CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config,ecount, 1, 0); // Run the scan allocateMemory((void**) &d_cg_offset, ecount * sizeof(unsigned int)); cudppScan(scanplan, d_cg_offset, d_C, ecount); cudppDestroyPlan(scanplan); //printData(d_cg_offset,ecount); //step 4.c create circuitGraph unsigned int buffer[2]; readData(buffer,d_cg_offset+ecount-1,1,sizeof(unsigned int)); readData(buffer+1,d_C+ecount-1,1,sizeof(unsigned int)); unsigned int circuitVertexSize=buffer[0]+buffer[1]; *cg_vertexCount=circuitVertexSize; logMessage(LOG_LVL_MSG,"#Circuit Graph Vertex : %d",circuitVertexSize); allocateMemory( (void**) &d_cv, circuitVertexSize * sizeof(unsigned int)); getOptimalLaunchConfiguration(ecount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: constructCircuitGraphVertex"); hipLaunchKernelGGL(( constructCircuitGraphVertex), dim3(grid),dim3(block), 0, 0, d_C,d_cg_offset,ecount, d_cv, circuitVertexSize); hipDeviceSynchronize(); CheckCUDAError(); // printData(d_cv,circuitVertexSize); if(circuitVertexSize>1){ //step 4.d calculate edge information allocateMemory((void**) &d_cedgeCount, circuitVertexSize * sizeof(unsigned int )); getOptimalLaunchConfiguration(vcount,&grid,&block); hipLaunchKernelGGL(( calculateCircuitGraphEdgeData), dim3(grid),dim3(block), 0, 0, d_ev,d_e,vcount , d_D,d_cg_offset, ecount, d_cedgeCount/*, circuitVertexSize*/); hipDeviceSynchronize(); CheckCUDAError(); //printData(d_cedgeCount,circuitVertexSize); //step 4.e calculate edge offsets config.op = CUDPP_ADD; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; scanplan = 0; result = cudppPlan(&scanplan, config,ecount, 1, 0); // Run the scan allocateMemory((void**) &d_cg_edge_start, circuitVertexSize * sizeof(unsigned int)); cudppScan(scanplan, d_cg_edge_start, d_cedgeCount, circuitVertexSize); cudppDestroyPlan(scanplan); //printData(d_cg_edge_start,circuitVertexSize); //step 4.f construct edges readData(buffer,d_cg_edge_start+circuitVertexSize-1,1,sizeof(unsigned int)); readData(buffer+1,d_cedgeCount+circuitVertexSize-1,1,sizeof(unsigned int)); unsigned int circuitGraphEdgeCount=buffer[0]+buffer[1]; *cg_edgeCount=circuitGraphEdgeCount; logMessage(LOG_LVL_MSG,"#Circuit Graph Edges : %d\n",circuitGraphEdgeCount); allocateMemory((void**) d_cg_edge, circuitGraphEdgeCount * sizeof(CircuitEdge)); //unsigned int * h_cedgeCount=NULL; // h_cedgeCount = (unsigned int *) malloc(circuitVertexSize*sizeof(unsigned int)); // checkCudaErrors( hipMemcpy(h_cedgeCount, d_cedgeCount, circuitVertexSize*sizeof(unsigned int), hipMemcpyDeviceToHost)); getOptimalLaunchConfiguration(vcount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: assignCircuitGraphEdgeData"); hipLaunchKernelGGL(( assignCircuitGraphEdgeData), dim3(grid),dim3(block), 0, 0, d_ev,d_e, vcount,d_D,d_cg_offset,ecount, d_cg_edge_start,d_cedgeCount, circuitVertexSize, *d_cg_edge, circuitGraphEdgeCount); hipDeviceSynchronize(); CheckCUDAError(); // checkCudaErrors( hipMemcpy(d_cedgeCount,h_cedgeCount, circuitVertexSize*sizeof(unsigned int), hipMemcpyHostToDevice)); // free(h_cedgeCount); /**try1***/ /* getOptimalLaunchConfigCustomized(circuitVertexSize,&grid,&block,1); for(unsigned char radix=0;radix<3;radix++){ sortCircuitGraphEdgeData2<<<grid,block>>>(d_cg_edge_start,d_cedgeCount, circuitVertexSize, *d_cg_edge,radix); hipDeviceSynchronize(); CheckCUDAError(); } */ /**try 2***/ /* getOptimalLaunchConfigCustomized(circuitVertexSize,&grid,&block,1); unsigned short * d_mark; unsigned short * d_t2; allocateMemory((void**),d_mark,circuitGraphEdgeCount*sizeof(unsigned short)); allocateMemory((void**),d_t1,circuitGraphEdgeCount*sizeof(unsigned short)); getOptimalLaunchConfiguration(CircuitVertexSize,&grid,&block); markSegments<<<grid,block>>>(d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize); unsigned int thread=1; for(unsigned char radix=2;radix>=0;radix--){ getOptimalLaunchConfigCustomized(circuitVertexSize,&grid,&block,threads); sortCircuitGraphEdgeData3<<<grid,block>>>(d_cg_edge_start,d_cedgeCount, circuitVertexSize, *d_cg_edge,d_mark,circuitGraphEdgeCount,radix); hipDeviceSynchronize(); CheckCUDAError(); thread++; } deallocateMemory(d_mark); deallocateMemory(d_t2);*/ //*** try 3 cpu sorting**/ CircuitEdge * h_cg_edge=(CircuitEdge *)malloc(circuitGraphEdgeCount*sizeof(CircuitEdge)); checkCudaErrors( hipMemcpy(h_cg_edge,*d_cg_edge, circuitGraphEdgeCount*sizeof(CircuitEdge), hipMemcpyDeviceToHost)); std::sort(h_cg_edge,h_cg_edge+circuitGraphEdgeCount,edgeComp); checkCudaErrors( hipMemcpy(*d_cg_edge,h_cg_edge, circuitGraphEdgeCount*sizeof(CircuitEdge), hipMemcpyHostToDevice)); free(h_cg_edge); // printCircuitGraph(*d_cg_edge,circuitGraphEdgeCount); deallocateMemory(d_cg_edge_start); deallocateMemory(d_cedgeCount); } else{ *cg_edgeCount=0; } //printData(*d_cg_edge,circuitGraphEdgeCount); /*****/ //step 6 swipe execution deallocateMemory(d_cv); deallocateMemory(d_cg_offset); deallocateMemory(d_C); deallocateMemory(d_D); deallocateMemory(d_v); /*****/ //calcuate contig start /***/ } /* extern "C" void findEulerGold(EulerVertex * h_ev, unsigned int * h_l, unsigned int * h_e, unsigned int vcount, EulerEdge * h_ee, unsigned int ecount, unsigned int kmerLength){ //find start vertices. unsigned char vertexMap; vertexMap=(unsigned char *) malloc(sizeof(unsigned char)*vcount); for(unsigned int i=0; i<vcount; i++){ if(h_ev[i].ecount>h_ev[i].lcount){ vertexMap[i]=1; //SINK } else if(h_ev[i].lcount>h_ev[i].ecount){ vertexMap[i]=2; //SOURCE } else vertexMap[i]=0; // USED=100; } free(vertexMap); } */
f8ca1636e76b97f62de41635b7be7465fdb285f7.cu
#define CUDPP_STATIC_LIB #include <algorithm> #include "graph.h" //#include <cutil_inline.h> #include "/Volumes/Macintosh HD/Developer/NVIDIA/CUDA-7.5/samples/common/inc/helper_cuda.h" #include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "utils.h" #include "cudpp.h" #include "component.h" #ifdef EULER_NDEBUG #define DEBUG_EULER_CU(x) #else #define DEBUG_EULER_CU(x) x #endif //#define DEBUG_EULER_CU(x) x #define DEBUG_CALL(x) DEBUG_EULER_CU(x) void printSuccessorGraph(Vertex * d_v , unsigned int length){ Vertex * h_v =NULL; h_v=(Vertex * ) malloc(length* sizeof(Vertex)); checkCudaErrors(cudaMemcpy(h_v,d_v,length*sizeof(Vertex),cudaMemcpyDeviceToHost)); printf("$graph G {\n"); for (unsigned int i =0; i< length; i++){ if(h_v[i].n1 < length) printf("$\t%u -- %u\n",h_v[i].vid, h_v[i].n1); if(h_v[i].n2 < length) printf("$\t%u -- %u\n",h_v[i].vid, h_v[i].n2); } printf("$}\n"); free(h_v); } void printCircuitGraph(CircuitEdge * d_ce , unsigned int length){ CircuitEdge * h_ce =NULL; h_ce=(CircuitEdge * ) malloc(length* sizeof(CircuitEdge)); checkCudaErrors(cudaMemcpy(h_ce,d_ce,length*sizeof(CircuitEdge),cudaMemcpyDeviceToHost)); printf("$graph G {\n"); for (unsigned int i =0; i< length; i++){ printf("$\t%u -- %u [ label= e1:%u-e2:%u ]\n",h_ce[i].c1,h_ce[i].c2,h_ce[i].e1,h_ce[i].e2); } printf("$}\n"); free(h_ce); } /*** Assig Successor**/ __global__ void assignSuccessor(EulerVertex * ev,unsigned int * l, unsigned int * e, unsigned vcount, EulerEdge * ee ,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int eidx=0; if(tid<vcount){ while(eidx<ev[tid].ecount && eidx<ev[tid].lcount){ ee[e[ev[tid].ep+eidx]].s=l[ev[tid].lp+eidx] ; eidx++; } } } void validateSuccessors(EulerEdge * d_ee, unsigned int ecount) { EulerEdge * h_ee; h_ee= (EulerEdge * ) malloc( sizeof(EulerEdge) *ecount); checkCudaErrors(cudaMemcpy(h_ee,d_ee,ecount * sizeof(EulerEdge),cudaMemcpyDeviceToHost)); unsigned int snot=0; for(unsigned int i =0;i< ecount; i++){ if( h_ee[i].s==ecount) {snot++;} } printf("total edges with succesors not set :%u\n",snot); free(h_ee); } /** Constuct Succesor Graph**/ //Redundant __global__ void constructSuccessorGraphP1(EulerEdge* e, Vertex * v, unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<ecount){ v[tid].n1=ecount;v[tid].n2=ecount;//v[tid].n3=ecount;v[tid].n4=ecount; v[tid].vid=e[tid].eid; v[tid].n1=e[tid].s; } } __global__ void constructSuccessorGraphP2(EulerEdge* e, Vertex * v, unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<ecount){ /* if(v[v[tid].n1].n1 < ecount){ v[v[tid].n1].n2=v[tid].vid; }else{ v[v[tid].n1].n1=v[tid].vid; }*/ if(v[tid].n1 <ecount ){ v[v[tid].n1].n2=v[tid].vid; } } } /*** Calculate Circuit Graph Vertex ***/ __global__ void calculateCircuitGraphVertexData( unsigned int * D,unsigned int * C,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if( tid <ecount) { unsigned int c=D[tid]; atomicExch(C+c,1); } } /*** construct circuit graph vertex **/ __global__ void constructCircuitGraphVertex(unsigned int * C,unsigned int * offset,unsigned int ecount, unsigned int * cv, unsigned int cvCount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid < ecount){ if(C[tid]!=0){ cv[offset[tid]]=tid; } } } /*** Calculate Circuit Graph Edges***/ __global__ void calculateCircuitGraphEdgeData(EulerVertex* v,unsigned int * e,unsigned vCount,unsigned int * D,unsigned int * map,unsigned int ecount, unsigned int * cedgeCount/*, unsigned int cvCount*/){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int index=0; unsigned int maxIndex=0; index=0; maxIndex=0; if(tid<vCount && v[tid].ecount>0 ){ index=v[tid].ep; maxIndex=index+v[tid].ecount-1; while (index < maxIndex ){ unsigned int c1=map[D[e[index]]]; unsigned int c2=map[D[e[index+1]]]; if( c1 !=c2){ unsigned int c=min(c1,c2); atomicInc(cedgeCount+c,ecount); } index++; } } } __global__ void assignCircuitGraphEdgeData(EulerVertex* v, unsigned int * e, unsigned vCount, unsigned int * D, unsigned int * map, unsigned int ecount, unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int cvCount, CircuitEdge * cedge, unsigned int cecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int index=0; unsigned int maxIndex=0; if(tid<vCount && v[tid].ecount>0){ index=v[tid].ep; maxIndex=index+v[tid].ecount-1; while (index<maxIndex ){ unsigned int c1=map[D[e[index]]]; unsigned int c2=map[D[e[index+1]]]; if( c1 !=c2){ unsigned int c=min(c1,c2); unsigned int t=max(c1,c2); unsigned int i=atomicDec(cedgeCount+c,ecount); i=i-1; cedge[cedgeOffset[c]+i].c1=c; cedge[cedgeOffset[c]+i].c2=t; cedge[cedgeOffset[c]+i].e1=e[index]; cedge[cedgeOffset[c]+i].e2=e[index+1]; } index++; } } } /* __global__ void markSegments( unsigned short * d_mark, unsigned int circuitGraphEdgeCount, unsigned int * d_cg_edge_start, unsigned int * d_cedgeCount, unsigned int circuitVertexSize){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<circuitVertexSize){ d_mark[ d_cg_edge_start[tid]]=d_cedgeCount[tid]; } } */ /* __global__ void sortCircuit( unsigned int cedgeOffset, unsigned int cedgeCount, unsigned int circuitCount, CircuitEdge * cedge ){ unsigned int bid=0; unsigned int tid=0; unsigned int keyCount=0; unsigned int offset=cedgeOffset[bid]; unsigned int itemCount=circuitCount[bid]; unsigned int chunks=itemCount/256; unsigned int chunkSize=0; unsigned int chunkIdx=0; __shared__ unsigned int keys[256]; if(bid<circuitCount){ while(chunkIdx<Chunks){ if(tid< itemCount) keys[tid]=edge[offset+tid].e2; __syncthreads(); if(tid<itemCount){ for(int i=0;i<256;i++){ if(keys[tid]>keys[i]) keyCount++; } } __syncthreads(); CircuitEdge temp; if(tid<itemCount){ temp=cedge[tid]; } __syncthreads(); } } }*/ __device__ unsigned int getValue(CircuitEdge cedge, unsigned char radix){ switch(radix){ case 0: return cedge.e2; case 1: return cedge.e1; case 2: return cedge.c2; } return 0xFFFFFFFF; } /* __global__ void sortCircuitGraphEdgeData3( unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int circuitCount, CircuitEdge * cedge, unsigned short * mark, unsigned int edgeCount, unsigned char radix){ unsigned int chunks=blockDim.x; unsigned int chunkSize=cedgeCount[bid]/chunks; //fix off by 1 unsigned int offset=cedgeOffset[bid]+chunkSize*threadIdx.x; //now scan while(mark[offset]==0 && offset<cedgeCount[bid]) offset++; //__syncthreads(); //everyone looking at its own chunk and we have to sort (mark[Offset] sized data) unsigned int count=mark[Offset]; for(int i=0;i<count;i++){ minIndex=offset+i; minValue=getValue(cedge[minIndex],radix);// cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ unsigned int nextValue=getValue(cedge[j],radix); if( minValue > nextValue){ minIndex=j; minValue=nextValue; } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } mark[Offset]=0; offset+=count; //scan onemore time to count same unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int minIndex=0; unsigned int minValue=0; unsigned int i =0; unsigned int j=0; unsigned int count; unsigned int offset=0; if(tid<circuitCount){ count=cedgeCount[tid]; offset=cedgeOffset[tid]; for (i=0;i<count;i++){ minIndex=offset+i; minValue=getValue(cedge[minIndex],radix);// cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ unsigned int nextValue=getValue(cedge[j],radix); if( minValue > nextValue){ minIndex=j; minValue=nextValue; } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } } } */ __global__ void sortCircuitGraphEdgeData2( unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int circuitCount, CircuitEdge * cedge,unsigned char radix){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int minIndex=0; unsigned int minValue=0; unsigned int i =0; unsigned int j=0; unsigned int count; unsigned int offset=0; if(tid<circuitCount){ count=cedgeCount[tid]; offset=cedgeOffset[tid]; for (i=0;i<count;i++){ minIndex=offset+i; minValue=getValue(cedge[minIndex],radix);// cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ unsigned int nextValue=getValue(cedge[j],radix); if( minValue > nextValue){ minIndex=j; minValue=nextValue; } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } }/* } */ } __global__ void sortCircuitGraphEdgeData( unsigned int * cedgeOffset, unsigned int * cedgeCount, unsigned int circuitCount, CircuitEdge * cedge){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int minIndex=0; unsigned int minValue=0; unsigned int i =0; unsigned int j=0; unsigned int count; unsigned int offset=0; if(tid<circuitCount){ count=cedgeCount[tid]; offset=cedgeOffset[tid]; for (i=0;i<count;i++){ minIndex=offset+i; minValue=cedge[minIndex].c2; for( j=offset+i+1;j<offset+count;j++){ if(minValue > cedge[j].c2){ minIndex=j; minValue=cedge[j].c2; }else if( minValue == cedge[j].c2){ if( cedge[minIndex].e1> cedge[j].e1){ minIndex=j; minValue=cedge[j].c2; }else if(cedge[minIndex].e1 == cedge[j].e1){ if(cedge[minIndex].e2 > cedge[j].e2) { minIndex=j; minValue=cedge[j].c2; } } } } if(minIndex != offset+i){ CircuitEdge temp=cedge[offset+i]; cedge[offset+i]=cedge[minIndex]; cedge[minIndex]=temp; } } }/* if(tid<vCount && v[tid].ecount>0){ index=v[tid].ep; maxIndex=index+v[tid].ecount-1; while (index<maxIndex ){ unsigned int c1=map[D[e[index]]]; unsigned int c2=map[D[e[index+1]]]; if( c1 !=c2){ unsigned int c=min(c1,c2); unsigned int t=max(c1,c2); unsigned int i=atomicDec(cedgeCount+c,ecount); i=i-1; cedge[cedgeOffset[c]+i].c1=c; cedge[cedgeOffset[c]+i].c2=t; cedge[cedgeOffset[c]+i].e1=e[index]; cedge[cedgeOffset[c]+i].e2=e[index+1]; } index++; } }*/ } __global__ void identifyContigStart( EulerEdge * e ,unsigned char * contigStart,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid<ecount){ if(e[tid].s < ecount){ contigStart[e[tid].s]=0; //atomicExch(contigStart+e[tid].s,0); } } } __global__ void markSpanningEulerEdges(EulerEdge * ee, unsigned int * mark , unsigned int ecount,CircuitEdge * cg_edge,unsigned int cg_edgeCount,unsigned int * tree, unsigned int treeCount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; if(tid < treeCount) { /*if(tree[tid]==1)*/{ atomicExch(mark+min(cg_edge[tree[tid]].e1,cg_edge[tree[tid]].e2),1); // important: assumption if(mark[i]=1) means mark[i]and mark[i+1] are swipe //atomicExch(mark+cg_edge[tree[tid]].e2,1); } } } __global__ void executeSwipe(EulerVertex * ev,unsigned int * e, unsigned int vcount , EulerEdge * ee, unsigned int * mark,unsigned int ecount){ unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int t; unsigned int index=0; unsigned int maxIndex; unsigned int s; if( tid< vcount){ index=ev[tid].ep; maxIndex=index+ev[tid].ecount-1; while( index<maxIndex){ if(mark[ee[e[index]].eid]==1){ t=index; s=ee[e[index]].s; while(mark[ee[e[index]].eid]==1 && index < maxIndex){ ee[e[index]].s=ee[e[index+1]].s; index=index+1; } if(t!=index){ ee[e[index]].s=s; } } index++; } } } void executeSwipeHost(EulerVertex * ev,unsigned int * e, unsigned int vcount , EulerEdge * ee, unsigned int * mark,unsigned int ecount, unsigned int tid){ // unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x; unsigned int t; unsigned int index=0; unsigned int maxIndex; unsigned int s; if( tid< vcount){ index=ev[tid].ep; maxIndex=index+ev[tid].ecount-1; while( index<maxIndex){ if(mark[ee[e[index]].eid]==1){ t=index; s=ee[e[index]].s; while(mark[ee[e[index]].eid]==1 && index < maxIndex){ ee[e[index]].s=ee[e[index+1]].s; index=index+1; } if(t!=index){ ee[e[index]].s=s; } } index++; } } } void executeSwipeHostLaunch(EulerVertex * d_ev, unsigned int * d_e, unsigned int vcount, EulerEdge * d_ee, unsigned int * d_mark , unsigned int ecount){ EulerVertex * h_ev; unsigned int * h_e; EulerEdge * h_ee; unsigned int * h_mark; logMessage(LOG_LVL_DETAIL,"executeSwipeHostLaunch"); h_ev=(EulerVertex *)malloc(vcount*sizeof(EulerVertex)); h_e=(unsigned int *) malloc(vcount * sizeof(unsigned int )); h_ee =(EulerEdge *) malloc(ecount * sizeof(EulerEdge)); h_mark=(unsigned int *) malloc(ecount * sizeof(EulerEdge)); checkCudaErrors( cudaMemcpy(h_ev,d_ev, vcount*sizeof(EulerVertex), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_e,d_e, vcount*sizeof(unsigned int), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_ee,d_ee, ecount*sizeof(EulerEdge), cudaMemcpyDeviceToHost)); checkCudaErrors( cudaMemcpy(h_mark,d_mark, ecount*sizeof(unsigned int), cudaMemcpyDeviceToHost)); for(unsigned tid =0;tid<vcount;tid++){ executeSwipeHost(h_ev,h_e,vcount,h_ee,h_mark,ecount,tid); } free(h_ev); free(h_e); free(h_ee); free(h_mark); } extern "C" void markContigStart(EulerEdge * d_ee, unsigned char * d_contigStart, unsigned int ecount){ dim3 grid; dim3 block; cudaMemset(d_contigStart,1,ecount); getOptimalLaunchConfiguration(ecount,&grid,&block); identifyContigStart<<<grid,block>>>(d_ee,d_contigStart,ecount); cudaThreadSynchronize(); CheckCUDAError(); } extern "C" void executeSwipeDevice(EulerVertex * d_ev,unsigned int * d_e, unsigned int vcount, EulerEdge * d_ee, unsigned int ecount, CircuitEdge * d_cg_edge,unsigned int cg_edgeCount , unsigned int * d_tree,unsigned int treeCount){ dim3 grid ; dim3 block; unsigned int * d_mark; allocateMemory((void**) &d_mark, ecount* sizeof(unsigned int)); cudaMemset(d_mark,1,ecount* sizeof(unsigned int)); getOptimalLaunchConfiguration(treeCount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: markSpanningEulerEdges"); markSpanningEulerEdges<<<grid,block>>>(d_ee, d_mark , ecount,d_cg_edge,cg_edgeCount,d_tree, treeCount); cudaThreadSynchronize(); CheckCUDAError(); //DEBUG_CALL(executeSwipeHostLaunch(d_ev,d_e,vcount,d_ee,d_mark,ecount)); getOptimalLaunchConfiguration(vcount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: executeSwipe"); executeSwipe<<<grid,block>>>(d_ev,d_e,vcount , d_ee, d_mark,ecount); cudaThreadSynchronize(); CheckCUDAError(); //printData(d_ev,vcount,d_ee,ecount); deallocateMemory(d_mark); } /**ok ! this is not something pleasent to the eyes :-\*/ inline bool edgeComp(CircuitEdge a, CircuitEdge b){ if(a.c1<b.c1) { return true;} else if(a.c1==b.c1){ if(a.c2<b.c2){ return true; } else if (a.c2==b.c2){ if(a.e1<b.e1) {return true;} else if(a.e1==b.e1){ if(a.e2<b.e2) {return true;} else return false; } else return false; } else return false; }else return false; } extern "C" void findEulerDevice(EulerVertex * d_ev,unsigned int * d_l, unsigned int * d_e, unsigned int vcount,EulerEdge * d_ee,unsigned int ecount,CircuitEdge ** d_cg_edge, unsigned int * cg_edgeCount,unsigned int * cg_vertexCount, unsigned int kmerLength){ Vertex * d_v=NULL; unsigned int * d_D; unsigned int * d_C; unsigned int * d_cg_offset; unsigned int * d_cedgeCount; unsigned int * d_cv; unsigned int * d_cg_edge_start; dim3 grid; dim3 block; allocateMemory((void**) &d_v, ecount* sizeof(Vertex)); //step 1: // assign sucessors getOptimalLaunchConfiguration(vcount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: assignSuccessor"); assignSuccessor<<<grid,block>>>(d_ev,d_l,d_e,vcount,d_ee,ecount); cudaThreadSynchronize(); CheckCUDAError(); //validateSuccessors(d_ee,ecount); //printDebruijnGraph(d_ev,vcount,d_l,d_e,d_ee,ecount,kmerLength,0); //step 2 successor graph //constructSuccessorGraph P1 getOptimalLaunchConfiguration(ecount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: constructSuccessorGraph P1"); constructSuccessorGraphP1<<<grid,block>>>(d_ee,d_v,ecount); cudaThreadSynchronize(); CheckCUDAError(); // printSuccessorGraph( d_v , ecount); /* synchronize */ logMessage(LOG_LVL_DETAIL,"kernel: constructSuccessorGraph P2"); constructSuccessorGraphP2<<<grid,block>>>(d_ee,d_v,ecount); cudaThreadSynchronize(); CheckCUDAError(); //printSuccessorGraph( d_v , ecount); //step 3findComponent allocateMemory((void**) &d_D, ecount * sizeof(unsigned int)); findComponentDevice(d_v,&d_D,ecount); //step 4 circuit graph construction //step 4.a vertex calculation allocateMemory((void**) &d_C, ecount * sizeof(unsigned int)); getOptimalLaunchConfiguration(ecount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: calculateCircuitGraphVertexData"); calculateCircuitGraphVertexData<<<grid,block>>>( d_D,d_C,ecount); cudaThreadSynchronize(); CheckCUDAError(); //printData(d_C,ecount); //step 4.b offset calculation .find prefix sum CUDPPConfiguration config; config.op = CUDPP_ADD; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; CUDPPHandle scanplan = 0; CUDPPResult result = cudppPlan(&scanplan, config,ecount, 1, 0); // Run the scan allocateMemory((void**) &d_cg_offset, ecount * sizeof(unsigned int)); cudppScan(scanplan, d_cg_offset, d_C, ecount); cudppDestroyPlan(scanplan); //printData(d_cg_offset,ecount); //step 4.c create circuitGraph unsigned int buffer[2]; readData(buffer,d_cg_offset+ecount-1,1,sizeof(unsigned int)); readData(buffer+1,d_C+ecount-1,1,sizeof(unsigned int)); unsigned int circuitVertexSize=buffer[0]+buffer[1]; *cg_vertexCount=circuitVertexSize; logMessage(LOG_LVL_MSG,"#Circuit Graph Vertex : %d",circuitVertexSize); allocateMemory( (void**) &d_cv, circuitVertexSize * sizeof(unsigned int)); getOptimalLaunchConfiguration(ecount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: constructCircuitGraphVertex"); constructCircuitGraphVertex<<<grid,block>>>(d_C,d_cg_offset,ecount, d_cv, circuitVertexSize); cudaThreadSynchronize(); CheckCUDAError(); // printData(d_cv,circuitVertexSize); if(circuitVertexSize>1){ //step 4.d calculate edge information allocateMemory((void**) &d_cedgeCount, circuitVertexSize * sizeof(unsigned int )); getOptimalLaunchConfiguration(vcount,&grid,&block); calculateCircuitGraphEdgeData<<<grid,block>>>(d_ev,d_e,vcount , d_D,d_cg_offset, ecount, d_cedgeCount/*, circuitVertexSize*/); cudaThreadSynchronize(); CheckCUDAError(); //printData(d_cedgeCount,circuitVertexSize); //step 4.e calculate edge offsets config.op = CUDPP_ADD; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_EXCLUSIVE; scanplan = 0; result = cudppPlan(&scanplan, config,ecount, 1, 0); // Run the scan allocateMemory((void**) &d_cg_edge_start, circuitVertexSize * sizeof(unsigned int)); cudppScan(scanplan, d_cg_edge_start, d_cedgeCount, circuitVertexSize); cudppDestroyPlan(scanplan); //printData(d_cg_edge_start,circuitVertexSize); //step 4.f construct edges readData(buffer,d_cg_edge_start+circuitVertexSize-1,1,sizeof(unsigned int)); readData(buffer+1,d_cedgeCount+circuitVertexSize-1,1,sizeof(unsigned int)); unsigned int circuitGraphEdgeCount=buffer[0]+buffer[1]; *cg_edgeCount=circuitGraphEdgeCount; logMessage(LOG_LVL_MSG,"#Circuit Graph Edges : %d\n",circuitGraphEdgeCount); allocateMemory((void**) d_cg_edge, circuitGraphEdgeCount * sizeof(CircuitEdge)); //unsigned int * h_cedgeCount=NULL; // h_cedgeCount = (unsigned int *) malloc(circuitVertexSize*sizeof(unsigned int)); // checkCudaErrors( cudaMemcpy(h_cedgeCount, d_cedgeCount, circuitVertexSize*sizeof(unsigned int), cudaMemcpyDeviceToHost)); getOptimalLaunchConfiguration(vcount,&grid,&block); logMessage(LOG_LVL_DETAIL,"kernel: assignCircuitGraphEdgeData"); assignCircuitGraphEdgeData<<<grid,block>>>(d_ev,d_e, vcount,d_D,d_cg_offset,ecount, d_cg_edge_start,d_cedgeCount, circuitVertexSize, *d_cg_edge, circuitGraphEdgeCount); cudaThreadSynchronize(); CheckCUDAError(); // checkCudaErrors( cudaMemcpy(d_cedgeCount,h_cedgeCount, circuitVertexSize*sizeof(unsigned int), cudaMemcpyHostToDevice)); // free(h_cedgeCount); /**try1***/ /* getOptimalLaunchConfigCustomized(circuitVertexSize,&grid,&block,1); for(unsigned char radix=0;radix<3;radix++){ sortCircuitGraphEdgeData2<<<grid,block>>>(d_cg_edge_start,d_cedgeCount, circuitVertexSize, *d_cg_edge,radix); cudaThreadSynchronize(); CheckCUDAError(); } */ /**try 2***/ /* getOptimalLaunchConfigCustomized(circuitVertexSize,&grid,&block,1); unsigned short * d_mark; unsigned short * d_t2; allocateMemory((void**),d_mark,circuitGraphEdgeCount*sizeof(unsigned short)); allocateMemory((void**),d_t1,circuitGraphEdgeCount*sizeof(unsigned short)); getOptimalLaunchConfiguration(CircuitVertexSize,&grid,&block); markSegments<<<grid,block>>>(d_mark,circuitGraphEdgeCount,d_cg_edge_start,d_cedgeCount,circuitVertexSize); unsigned int thread=1; for(unsigned char radix=2;radix>=0;radix--){ getOptimalLaunchConfigCustomized(circuitVertexSize,&grid,&block,threads); sortCircuitGraphEdgeData3<<<grid,block>>>(d_cg_edge_start,d_cedgeCount, circuitVertexSize, *d_cg_edge,d_mark,circuitGraphEdgeCount,radix); cudaThreadSynchronize(); CheckCUDAError(); thread++; } deallocateMemory(d_mark); deallocateMemory(d_t2);*/ //*** try 3 cpu sorting**/ CircuitEdge * h_cg_edge=(CircuitEdge *)malloc(circuitGraphEdgeCount*sizeof(CircuitEdge)); checkCudaErrors( cudaMemcpy(h_cg_edge,*d_cg_edge, circuitGraphEdgeCount*sizeof(CircuitEdge), cudaMemcpyDeviceToHost)); std::sort(h_cg_edge,h_cg_edge+circuitGraphEdgeCount,edgeComp); checkCudaErrors( cudaMemcpy(*d_cg_edge,h_cg_edge, circuitGraphEdgeCount*sizeof(CircuitEdge), cudaMemcpyHostToDevice)); free(h_cg_edge); // printCircuitGraph(*d_cg_edge,circuitGraphEdgeCount); deallocateMemory(d_cg_edge_start); deallocateMemory(d_cedgeCount); } else{ *cg_edgeCount=0; } //printData(*d_cg_edge,circuitGraphEdgeCount); /*****/ //step 6 swipe execution deallocateMemory(d_cv); deallocateMemory(d_cg_offset); deallocateMemory(d_C); deallocateMemory(d_D); deallocateMemory(d_v); /*****/ //calcuate contig start /***/ } /* extern "C" void findEulerGold(EulerVertex * h_ev, unsigned int * h_l, unsigned int * h_e, unsigned int vcount, EulerEdge * h_ee, unsigned int ecount, unsigned int kmerLength){ //find start vertices. unsigned char vertexMap; vertexMap=(unsigned char *) malloc(sizeof(unsigned char)*vcount); for(unsigned int i=0; i<vcount; i++){ if(h_ev[i].ecount>h_ev[i].lcount){ vertexMap[i]=1; //SINK } else if(h_ev[i].lcount>h_ev[i].ecount){ vertexMap[i]=2; //SOURCE } else vertexMap[i]=0; // USED=100; } free(vertexMap); } */
91b0233ae120e5113aad0d3fa4170d2481f7e72a.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file * test_mst.cu * * @brief Simple test driver for computing Minimum Spanning Tree. */ #include <stdio.h> #include <string> #include <vector> #include <utility> #include <iostream> #include <cstdlib> #include <algorithm> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utilities #include <gunrock/graphio/market.cuh> // MST includes #include <gunrock/app/cc/cc_app.cu> #include <gunrock/app/mst/mst_enactor.cuh> #include <gunrock/app/mst/mst_problem.cuh> #include <gunrock/app/mst/mst_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // CPU Kruskal MST reference #include <boost/config.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/kruskal_min_spanning_tree.hpp> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::mst; /////////////////////////////////////////////////////////////////////////////// // Housekeeping and utility routines /////////////////////////////////////////////////////////////////////////////// void Usage() { printf( " ------------------------------------------------------------------\n" " test_mst <graph type> <graph type args> [--device=<device_index>]\n" " [--instrumented] [--quick] [--v]\n\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-format graph of directed/undirected\n" " edges from STDIN (or from the optionally-specified file)\n" " --device=<device_index> Set GPU device for running the graph primitive\n" " --instrumented If set then kernels keep track of queue-search_depth\n" " and barrier duty (a relative indicator of load imbalance)\n" " --quick If set will skip the CPU validation code\n" " --v If set will enable debug mode\n\n" " ------------------------------------------------------------------\n"); } /** * @brief Test_Parameter structure. */ struct MST_Test_Parameter : gunrock::app::TestParameter_Base { public: MST_Test_Parameter() { } ~MST_Test_Parameter() { } void Init(CommandLineArgs &args) { TestParameter_Base::Init(args); } }; /** * @brief Displays the MST result. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph. * @param[in] mst_output Pointer to the MST edge mask. */ template<typename VertexId, typename Value, typename SizeT> void DisplaySolution( const Csr<VertexId, Value, SizeT> &graph, int *mst_output) { fflush(stdout); int count = 0; int print_limit = graph.nodes; if (print_limit > 10) { print_limit = 10; } // find source vertex ids for display results VertexId *source = new VertexId[graph.edges]; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j) { source[j] = i; } } // print source-destination pairs of minimum spanning tree edges printf("GPU Minimum Spanning Tree [First %d edges]\n", print_limit); printf("src dst\n"); for (int i = 0; i < graph.edges; ++i) { if (mst_output[i] == 1 && count <= print_limit) { printf("%d %d\n", source[i], graph.column_indices[i]); ++count; } } // clean up if necessary if (source) { delete [] source; } } /////////////////////////////////////////////////////////////////////////////// // CPU validation routines /////////////////////////////////////////////////////////////////////////////// /** * @brief A simple CPU-based reference MST implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] edge_values Weight value associated with each edge. * @param[in] graph Reference to the CSR graph we process on. * * \return long long int which indicates the total weight of the graph. */ template<typename VertexId, typename Value, typename SizeT> Value SimpleReferenceMST( const Value *edge_values, const Csr<VertexId, Value, SizeT> &graph) { printf("\nMST CPU REFERENCE TEST\n"); // Kruskal's minimum spanning tree preparations using namespace boost; typedef adjacency_list< vecS, vecS, undirectedS, no_property, property<edge_weight_t, int> > Graph; typedef graph_traits < Graph >::edge_descriptor Edge; typedef graph_traits < Graph >::vertex_descriptor Vertex; typedef std::pair<VertexId, VertexId> E; E *edge_pairs = new E[graph.edges]; int idx = 0; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j) { edge_pairs[idx++] = std::make_pair(i, graph.column_indices[j]); } } Graph g(edge_pairs, edge_pairs + graph.edges, edge_values, graph.nodes); property_map < Graph, edge_weight_t >::type weight = get(edge_weight, g); std::vector < Edge > spanning_tree; CpuTimer cpu_timer; // record the kernel running time cpu_timer.Start(); // compute reference using kruskal_min_spanning_tree algorithm kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree)); cpu_timer.Stop(); float elapsed_cpu = cpu_timer.ElapsedMillis(); // analyze reference results SizeT num_selected_cpu = 0; Value total_weight_cpu = 0; if (graph.nodes <= 50) { printf("CPU Minimum Spanning Tree\n"); } for (std::vector < Edge >::iterator ei = spanning_tree.begin(); ei != spanning_tree.end(); ++ei) { if (graph.nodes <= 50) { // print the edge pairs in the minimum spanning tree printf("%ld %ld\n", source(*ei, g), target(*ei, g)); // printf(" with weight of %f\n", weight[*ei]); } ++num_selected_cpu; total_weight_cpu += weight[*ei]; } // clean up if necessary if (edge_pairs) { delete [] edge_pairs; } printf("CPU - Computation Complete in %lf msec.\n", elapsed_cpu); // printf("CPU - Number of Edges in MST: %d\n", num_selected_cpu); return total_weight_cpu; } /////////////////////////////////////////////////////////////////////////////// // GPU MST test routines /////////////////////////////////////////////////////////////////////////////// /** * @brief Sample test entry * * @tparam VertexId * @tparam SizeT * @tparam Value * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] parameter Test parameter settings. */ template < typename VertexId, typename SizeT, typename Value, bool DEBUG, bool SIZE_CHECK > void RunTest(MST_Test_Parameter *parameter) { printf("\nMINIMUM SPANNING TREE TEST\n"); // define the problem data structure for graph primitive typedef MSTProblem< VertexId, SizeT, Value, true, // MARK_PREDECESSORS false, // ENABLE_IDEMPOTENCE true > // USE_DOUBLE_BUFFER Problem; Csr<VertexId, Value, SizeT>* graph = (Csr<VertexId, Value, SizeT>*)parameter->graph; ContextPtr* context = (ContextPtr*)parameter -> context; std::string partition_method = parameter -> partition_method; int max_grid_size = parameter -> max_grid_size; int num_gpus = parameter -> num_gpus; int* gpu_idx = parameter -> gpu_idx; int iterations = parameter -> iterations; bool g_quick = parameter -> g_quick; bool g_stream_from_host = parameter -> g_stream_from_host; double max_queue_sizing = parameter -> max_queue_sizing; // allocate MST enactor map MSTEnactor< Problem, false, // INSTRUMENT DEBUG, // DEBUG SIZE_CHECK > // SIZE_CHECK mst_enactor(gpu_idx); // allocate problem on GPU create a pointer of the MSTProblem type Problem * mst_problem = new Problem; // host results spaces VertexId * h_mst_output = new VertexId[graph->edges]; // copy data from CPU to GPU initialize data members in DataSlice util::GRError(mst_problem->Init(g_stream_from_host, *graph, num_gpus), "Problem MST Initialization Failed", __FILE__, __LINE__); // // perform calculations // GpuTimer gpu_timer; // record the kernel running time float elapsed_gpu = 0.0f; // device elapsed running time for (int iter = 0; iter < iterations; ++iter) { // reset values in DataSlice util::GRError(mst_problem->Reset( mst_enactor.GetFrontierType(), max_queue_sizing), "MST Problem Data Reset Failed", __FILE__, __LINE__); gpu_timer.Start(); // launch MST enactor util::GRError(mst_enactor.template Enact<Problem>( *context, mst_problem, max_grid_size), "MST Problem Enact Failed", __FILE__, __LINE__); gpu_timer.Stop(); elapsed_gpu += gpu_timer.ElapsedMillis(); } elapsed_gpu /= iterations; printf("GPU - Computation Complete in %lf msec.\n", elapsed_gpu); // copy results back to CPU from GPU using Extract util::GRError(mst_problem->Extract(h_mst_output), "MST Problem Data Extraction Failed", __FILE__, __LINE__); if (!g_quick) // run CPU reference test { // calculate GPU final number of selected edges int num_selected_gpu = 0; for (int iter = 0; iter < graph->edges; ++iter) { num_selected_gpu += h_mst_output[iter]; } // printf("\nGPU - Number of Edges in MST: %d\n", num_selected_gpu); // calculate GPU total selected MST weights for validation Value total_weight_gpu = 0; for (int iter = 0; iter < graph->edges; ++iter) { total_weight_gpu += h_mst_output[iter] * graph->edge_values[iter]; } // correctness validation Value total_weight_cpu = SimpleReferenceMST(graph->edge_values, *graph); if (total_weight_cpu == total_weight_gpu) { // print the edge pairs in the minimum spanning tree DisplaySolution(*graph, h_mst_output); printf("\nCORRECT.\n"); std::cout << "CPU Total Weight = " << total_weight_cpu << std::endl; std::cout << "GPU Total Weight = " << total_weight_gpu << std::endl; } else { printf("INCORRECT.\n"); std::cout << "CPU Total Weight = " << total_weight_cpu << std::endl; std::cout << "GPU Total Weight = " << total_weight_gpu << std::endl; } } // clean up if necessary if (mst_problem) delete mst_problem; if (h_mst_output) delete [] h_mst_output; } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam DEBUG * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool DEBUG > void RunTests_size_check(MST_Test_Parameter *parameter) { if (parameter->size_check) { RunTest <VertexId, Value, SizeT, DEBUG, true>(parameter); } else { RunTest <VertexId, Value, SizeT, DEBUG, false>(parameter); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT > void RunTests_debug(MST_Test_Parameter *parameter) { if (parameter->debug) { RunTests_size_check <VertexId, Value, SizeT, true>(parameter); } else { RunTests_size_check <VertexId, Value, SizeT, false>(parameter); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Pointer to the CSR graph we process on. * @param[in] args Reference to the command line arguments. * @param[in] num_gpus Number of GPUs. * @param[in] context CudaContext pointer for ModernGPU APIs. * @param[in] gpu_idx GPU index to run algorithm. * @param[in] streams CUDA streams. */ template <typename VertexId, typename Value, typename SizeT> void RunTest( Csr<VertexId, Value, SizeT>* graph, CommandLineArgs& args, int num_gpus, ContextPtr* context, int* gpu_idx, hipStream_t* streams = NULL) { /* // test graph connectivity because MST // only supports fully-connected graph Test_Parameter *cc_parameters = new Test_Parameter; cc_parameters -> Init(args); cc_parameters -> g_quick = true; cc_parameters -> graph = graph; cc_parameters -> num_gpus = num_gpus; cc_parameters -> context = context; cc_parameters -> gpu_idx = gpu_idx; cc_parameters -> streams = streams; cc_parameters->PrintParameters(); // temporary storage for connected component algorithm GRGraph *temp_storage = (GRGraph*)malloc(sizeof(GRGraph)); // perform connected component runCC < VertexId, Value, SizeT, false, // INSTRUMENT false, // DEBUG true > // SIZE_CHECK (temp_storage, cc_parameters); // run test only if the graph is fully-connected int* numcomponent = (int*)temp_storage->aggregation; if (*numcomponent != 1) { fprintf(stderr, "Unsupported non-fully connected graph input.\n"); exit(1); } if (temp_storage) free(temp_storage); */ // perform minimum spanning tree test MST_Test_Parameter *parameter = new MST_Test_Parameter; parameter -> Init(args); parameter -> graph = graph; parameter -> num_gpus = num_gpus; parameter -> context = context; parameter -> gpu_idx = gpu_idx; parameter -> streams = streams; RunTests_debug<VertexId, Value, SizeT>(parameter); } /////////////////////////////////////////////////////////////////////////////// // Main function /////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { CommandLineArgs args(argc, argv); if ((argc < 3) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } int device = 0; args.GetCmdLineArgument("device", device); mgpu::ContextPtr context = mgpu::CreateCudaDevice(device); bool g_undirected = true; // graph-construction parameters std::string graph_type = argv[1]; int flags = args.ParsedArgc(); int graph_args = argc - flags - 1; if (graph_args < 1) { Usage(); return 1; } // // construct graph and perform algorithm // if (graph_type == "market") { // matrix-market coordinate-formatted graph file // currently support Value type: int, float, double typedef int VertexId; // use as the vertex identifier typedef int Value; // use as the value type typedef int SizeT; // use as the graph size // default value for stream_from_host is false if (graph_args < 1) { Usage(); return 1; } char * market_filename = (graph_args == 2) ? argv[2] : NULL; // buildMarketGraph() reads a .mtx file into CSR data structure // template argument = true because the graph has edge values Csr<VertexId, Value, SizeT> csr(false); if (graphio::BuildMarketGraph<true>( market_filename, csr, g_undirected, false) != 0) { return 1; } // display input graph with weights // csr.DisplayGraph(true); // // Minimum Spanning Tree only supports undirected, connected graph // RunTest<VertexId, Value, SizeT>(&csr, args, 1, &context, &device); } else { fprintf(stderr, "Unspecified graph type.\n"); return 1; } return 0; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End
91b0233ae120e5113aad0d3fa4170d2481f7e72a.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file * test_mst.cu * * @brief Simple test driver for computing Minimum Spanning Tree. */ #include <stdio.h> #include <string> #include <vector> #include <utility> #include <iostream> #include <cstdlib> #include <algorithm> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph construction utilities #include <gunrock/graphio/market.cuh> // MST includes #include <gunrock/app/cc/cc_app.cu> #include <gunrock/app/mst/mst_enactor.cuh> #include <gunrock/app/mst/mst_problem.cuh> #include <gunrock/app/mst/mst_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // CPU Kruskal MST reference #include <boost/config.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/kruskal_min_spanning_tree.hpp> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::mst; /////////////////////////////////////////////////////////////////////////////// // Housekeeping and utility routines /////////////////////////////////////////////////////////////////////////////// void Usage() { printf( " ------------------------------------------------------------------\n" " test_mst <graph type> <graph type args> [--device=<device_index>]\n" " [--instrumented] [--quick] [--v]\n\n" "Graph types and args:\n" " market [<file>]\n" " Reads a Matrix-Market coordinate-format graph of directed/undirected\n" " edges from STDIN (or from the optionally-specified file)\n" " --device=<device_index> Set GPU device for running the graph primitive\n" " --instrumented If set then kernels keep track of queue-search_depth\n" " and barrier duty (a relative indicator of load imbalance)\n" " --quick If set will skip the CPU validation code\n" " --v If set will enable debug mode\n\n" " ------------------------------------------------------------------\n"); } /** * @brief Test_Parameter structure. */ struct MST_Test_Parameter : gunrock::app::TestParameter_Base { public: MST_Test_Parameter() { } ~MST_Test_Parameter() { } void Init(CommandLineArgs &args) { TestParameter_Base::Init(args); } }; /** * @brief Displays the MST result. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph. * @param[in] mst_output Pointer to the MST edge mask. */ template<typename VertexId, typename Value, typename SizeT> void DisplaySolution( const Csr<VertexId, Value, SizeT> &graph, int *mst_output) { fflush(stdout); int count = 0; int print_limit = graph.nodes; if (print_limit > 10) { print_limit = 10; } // find source vertex ids for display results VertexId *source = new VertexId[graph.edges]; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j) { source[j] = i; } } // print source-destination pairs of minimum spanning tree edges printf("GPU Minimum Spanning Tree [First %d edges]\n", print_limit); printf("src dst\n"); for (int i = 0; i < graph.edges; ++i) { if (mst_output[i] == 1 && count <= print_limit) { printf("%d %d\n", source[i], graph.column_indices[i]); ++count; } } // clean up if necessary if (source) { delete [] source; } } /////////////////////////////////////////////////////////////////////////////// // CPU validation routines /////////////////////////////////////////////////////////////////////////////// /** * @brief A simple CPU-based reference MST implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] edge_values Weight value associated with each edge. * @param[in] graph Reference to the CSR graph we process on. * * \return long long int which indicates the total weight of the graph. */ template<typename VertexId, typename Value, typename SizeT> Value SimpleReferenceMST( const Value *edge_values, const Csr<VertexId, Value, SizeT> &graph) { printf("\nMST CPU REFERENCE TEST\n"); // Kruskal's minimum spanning tree preparations using namespace boost; typedef adjacency_list< vecS, vecS, undirectedS, no_property, property<edge_weight_t, int> > Graph; typedef graph_traits < Graph >::edge_descriptor Edge; typedef graph_traits < Graph >::vertex_descriptor Vertex; typedef std::pair<VertexId, VertexId> E; E *edge_pairs = new E[graph.edges]; int idx = 0; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j) { edge_pairs[idx++] = std::make_pair(i, graph.column_indices[j]); } } Graph g(edge_pairs, edge_pairs + graph.edges, edge_values, graph.nodes); property_map < Graph, edge_weight_t >::type weight = get(edge_weight, g); std::vector < Edge > spanning_tree; CpuTimer cpu_timer; // record the kernel running time cpu_timer.Start(); // compute reference using kruskal_min_spanning_tree algorithm kruskal_minimum_spanning_tree(g, std::back_inserter(spanning_tree)); cpu_timer.Stop(); float elapsed_cpu = cpu_timer.ElapsedMillis(); // analyze reference results SizeT num_selected_cpu = 0; Value total_weight_cpu = 0; if (graph.nodes <= 50) { printf("CPU Minimum Spanning Tree\n"); } for (std::vector < Edge >::iterator ei = spanning_tree.begin(); ei != spanning_tree.end(); ++ei) { if (graph.nodes <= 50) { // print the edge pairs in the minimum spanning tree printf("%ld %ld\n", source(*ei, g), target(*ei, g)); // printf(" with weight of %f\n", weight[*ei]); } ++num_selected_cpu; total_weight_cpu += weight[*ei]; } // clean up if necessary if (edge_pairs) { delete [] edge_pairs; } printf("CPU - Computation Complete in %lf msec.\n", elapsed_cpu); // printf("CPU - Number of Edges in MST: %d\n", num_selected_cpu); return total_weight_cpu; } /////////////////////////////////////////////////////////////////////////////// // GPU MST test routines /////////////////////////////////////////////////////////////////////////////// /** * @brief Sample test entry * * @tparam VertexId * @tparam SizeT * @tparam Value * @tparam DEBUG * @tparam SIZE_CHECK * * @param[in] parameter Test parameter settings. */ template < typename VertexId, typename SizeT, typename Value, bool DEBUG, bool SIZE_CHECK > void RunTest(MST_Test_Parameter *parameter) { printf("\nMINIMUM SPANNING TREE TEST\n"); // define the problem data structure for graph primitive typedef MSTProblem< VertexId, SizeT, Value, true, // MARK_PREDECESSORS false, // ENABLE_IDEMPOTENCE true > // USE_DOUBLE_BUFFER Problem; Csr<VertexId, Value, SizeT>* graph = (Csr<VertexId, Value, SizeT>*)parameter->graph; ContextPtr* context = (ContextPtr*)parameter -> context; std::string partition_method = parameter -> partition_method; int max_grid_size = parameter -> max_grid_size; int num_gpus = parameter -> num_gpus; int* gpu_idx = parameter -> gpu_idx; int iterations = parameter -> iterations; bool g_quick = parameter -> g_quick; bool g_stream_from_host = parameter -> g_stream_from_host; double max_queue_sizing = parameter -> max_queue_sizing; // allocate MST enactor map MSTEnactor< Problem, false, // INSTRUMENT DEBUG, // DEBUG SIZE_CHECK > // SIZE_CHECK mst_enactor(gpu_idx); // allocate problem on GPU create a pointer of the MSTProblem type Problem * mst_problem = new Problem; // host results spaces VertexId * h_mst_output = new VertexId[graph->edges]; // copy data from CPU to GPU initialize data members in DataSlice util::GRError(mst_problem->Init(g_stream_from_host, *graph, num_gpus), "Problem MST Initialization Failed", __FILE__, __LINE__); // // perform calculations // GpuTimer gpu_timer; // record the kernel running time float elapsed_gpu = 0.0f; // device elapsed running time for (int iter = 0; iter < iterations; ++iter) { // reset values in DataSlice util::GRError(mst_problem->Reset( mst_enactor.GetFrontierType(), max_queue_sizing), "MST Problem Data Reset Failed", __FILE__, __LINE__); gpu_timer.Start(); // launch MST enactor util::GRError(mst_enactor.template Enact<Problem>( *context, mst_problem, max_grid_size), "MST Problem Enact Failed", __FILE__, __LINE__); gpu_timer.Stop(); elapsed_gpu += gpu_timer.ElapsedMillis(); } elapsed_gpu /= iterations; printf("GPU - Computation Complete in %lf msec.\n", elapsed_gpu); // copy results back to CPU from GPU using Extract util::GRError(mst_problem->Extract(h_mst_output), "MST Problem Data Extraction Failed", __FILE__, __LINE__); if (!g_quick) // run CPU reference test { // calculate GPU final number of selected edges int num_selected_gpu = 0; for (int iter = 0; iter < graph->edges; ++iter) { num_selected_gpu += h_mst_output[iter]; } // printf("\nGPU - Number of Edges in MST: %d\n", num_selected_gpu); // calculate GPU total selected MST weights for validation Value total_weight_gpu = 0; for (int iter = 0; iter < graph->edges; ++iter) { total_weight_gpu += h_mst_output[iter] * graph->edge_values[iter]; } // correctness validation Value total_weight_cpu = SimpleReferenceMST(graph->edge_values, *graph); if (total_weight_cpu == total_weight_gpu) { // print the edge pairs in the minimum spanning tree DisplaySolution(*graph, h_mst_output); printf("\nCORRECT.\n"); std::cout << "CPU Total Weight = " << total_weight_cpu << std::endl; std::cout << "GPU Total Weight = " << total_weight_gpu << std::endl; } else { printf("INCORRECT.\n"); std::cout << "CPU Total Weight = " << total_weight_cpu << std::endl; std::cout << "GPU Total Weight = " << total_weight_gpu << std::endl; } } // clean up if necessary if (mst_problem) delete mst_problem; if (h_mst_output) delete [] h_mst_output; } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam DEBUG * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT, bool DEBUG > void RunTests_size_check(MST_Test_Parameter *parameter) { if (parameter->size_check) { RunTest <VertexId, Value, SizeT, DEBUG, true>(parameter); } else { RunTest <VertexId, Value, SizeT, DEBUG, false>(parameter); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] parameter Pointer to test parameter settings */ template < typename VertexId, typename Value, typename SizeT > void RunTests_debug(MST_Test_Parameter *parameter) { if (parameter->debug) { RunTests_size_check <VertexId, Value, SizeT, true>(parameter); } else { RunTests_size_check <VertexId, Value, SizeT, false>(parameter); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Pointer to the CSR graph we process on. * @param[in] args Reference to the command line arguments. * @param[in] num_gpus Number of GPUs. * @param[in] context CudaContext pointer for ModernGPU APIs. * @param[in] gpu_idx GPU index to run algorithm. * @param[in] streams CUDA streams. */ template <typename VertexId, typename Value, typename SizeT> void RunTest( Csr<VertexId, Value, SizeT>* graph, CommandLineArgs& args, int num_gpus, ContextPtr* context, int* gpu_idx, cudaStream_t* streams = NULL) { /* // test graph connectivity because MST // only supports fully-connected graph Test_Parameter *cc_parameters = new Test_Parameter; cc_parameters -> Init(args); cc_parameters -> g_quick = true; cc_parameters -> graph = graph; cc_parameters -> num_gpus = num_gpus; cc_parameters -> context = context; cc_parameters -> gpu_idx = gpu_idx; cc_parameters -> streams = streams; cc_parameters->PrintParameters(); // temporary storage for connected component algorithm GRGraph *temp_storage = (GRGraph*)malloc(sizeof(GRGraph)); // perform connected component runCC < VertexId, Value, SizeT, false, // INSTRUMENT false, // DEBUG true > // SIZE_CHECK (temp_storage, cc_parameters); // run test only if the graph is fully-connected int* numcomponent = (int*)temp_storage->aggregation; if (*numcomponent != 1) { fprintf(stderr, "Unsupported non-fully connected graph input.\n"); exit(1); } if (temp_storage) free(temp_storage); */ // perform minimum spanning tree test MST_Test_Parameter *parameter = new MST_Test_Parameter; parameter -> Init(args); parameter -> graph = graph; parameter -> num_gpus = num_gpus; parameter -> context = context; parameter -> gpu_idx = gpu_idx; parameter -> streams = streams; RunTests_debug<VertexId, Value, SizeT>(parameter); } /////////////////////////////////////////////////////////////////////////////// // Main function /////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { CommandLineArgs args(argc, argv); if ((argc < 3) || (args.CheckCmdLineFlag("help"))) { Usage(); return 1; } int device = 0; args.GetCmdLineArgument("device", device); mgpu::ContextPtr context = mgpu::CreateCudaDevice(device); bool g_undirected = true; // graph-construction parameters std::string graph_type = argv[1]; int flags = args.ParsedArgc(); int graph_args = argc - flags - 1; if (graph_args < 1) { Usage(); return 1; } // // construct graph and perform algorithm // if (graph_type == "market") { // matrix-market coordinate-formatted graph file // currently support Value type: int, float, double typedef int VertexId; // use as the vertex identifier typedef int Value; // use as the value type typedef int SizeT; // use as the graph size // default value for stream_from_host is false if (graph_args < 1) { Usage(); return 1; } char * market_filename = (graph_args == 2) ? argv[2] : NULL; // buildMarketGraph() reads a .mtx file into CSR data structure // template argument = true because the graph has edge values Csr<VertexId, Value, SizeT> csr(false); if (graphio::BuildMarketGraph<true>( market_filename, csr, g_undirected, false) != 0) { return 1; } // display input graph with weights // csr.DisplayGraph(true); // // Minimum Spanning Tree only supports undirected, connected graph // RunTest<VertexId, Value, SizeT>(&csr, args, 1, &context, &device); } else { fprintf(stderr, "Unspecified graph type.\n"); return 1; } return 0; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End
8709ddbd3d32728b3d4fd7e80f4338813ec61489.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.y * blockDim.y + threadIdx.y; // row index int j = blockIdx.x * blockDim.x + threadIdx.x; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (i * (i+1) / 2) + j; int32_cuda index_A = i * dmat.stride + j; if (j <= i) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_as_vec(Real* vec_out, const MatrixElement<OtherReal>* smat_in, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; vec_out[smat_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _copy_col_from_vec(Real* mat, const Real* v, int col, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < d.rows ) { int32_cuda index = col + i * d.stride; mat[index] = v[i]; } } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. In // this kernel, following the new pattern, x corresponds to row-index and y to // column-index. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index. int j = blockIdx.y * blockDim.y + threadIdx.y; // column index. if (i < d.rows && j < d.cols) { int index = i * d.stride + j; m_out[index] = v_in[j]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat(Real* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int s) { int i = threadIdx.x; if (i >= s) return; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j < loop_end; j++) { *(data + x[j].row * dim.stride + x[j].column) += alpha * x[j].weight; } } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[i], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = reorder[i] * src_stride + j; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] = src[i][j]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] = src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (reorder[i] >= 0) { int src_index = reorder[i] * src_stride + j; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] += alpha * src[i][j]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] += alpha * src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } // Since this is a newer kernel, x is the row-index and y is the // column-index. template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dim.rows && j < dim.cols) { int dst_index = i * dim.stride + j, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_col_from_vec(int Gr, int Bl, float* mat, const float* v, int col, MatrixDim d) { hipLaunchKernelGGL(( _copy_col_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat,v,col,d); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<float,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat<float,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int s) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, s); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_copy_col_from_mat(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_col_from_vec(int Gr, int Bl, double* mat, const double* v, int col, MatrixDim d) { hipLaunchKernelGGL(( _copy_col_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat,v,col,d); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _transpose_matrix), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _calc_pnorm_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _add_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_df), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { hipLaunchKernelGGL(( _copy_from_vec_fd), dim3(Gr),dim3(Bl), 0, 0, v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_min), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { hipLaunchKernelGGL(( _vec_max), dim3(1),dim3(CU1DBLOCK), 0, 0, v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans<double,4>) , dim3(4),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat<double,2>) , dim3(2),dim3(CU1DBLOCK), 0, 0, A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { hipLaunchKernelGGL(( _add_diag_mat_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_sum), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { hipLaunchKernelGGL(( _pvec_sum), dim3(Gr),dim3(Bl), 0, 0, v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int s) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, s); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_max), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_as_vec_ff(dim3 Gr, dim3 Bl, float* vec_out, const MatrixElement<float>* smat_in, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_as_vec), dim3(Gr),dim3(Bl), 0, 0, vec_out, smat_in, d_in); } void cuda_copy_from_smat_as_vec_fd(dim3 Gr, dim3 Bl, float* vec_out, const MatrixElement<double>* smat_in, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_as_vec), dim3(Gr),dim3(Bl), 0, 0, vec_out, smat_in, d_in); } void cuda_copy_from_smat_as_vec_df(dim3 Gr, dim3 Bl, double* vec_out, const MatrixElement<float>* smat_in, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_as_vec), dim3(Gr),dim3(Bl), 0, 0, vec_out, smat_in, d_in); } void cuda_copy_from_smat_as_vec_dd(dim3 Gr, dim3 Bl, double* vec_out, const MatrixElement<double>* smat_in, MatrixIndexT_cuda d_in) { hipLaunchKernelGGL(( _copy_from_smat_as_vec), dim3(Gr),dim3(Bl), 0, 0, vec_out, smat_in, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr),dim3(Bl), 0, 0, mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
8709ddbd3d32728b3d4fd7e80f4338813ec61489.cu
// cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if(threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static Real _min_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (min) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp < buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two } // the result return buffer[0]; } template<typename Real> __device__ static Real _max_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (max) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) { Real temp = buffer[threadIdx.x + halfPoint]; if (temp > buffer[threadIdx.x]) buffer[threadIdx.x] = temp; } } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } template<typename Real> __device__ static int32_cuda _max_id_reduce(Real val[], int32_cuda idx[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (get index of maximum) while(nTotalThreads > 1) { int32_cuda halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread Real temp = -1e20; if(threadIdx.x+halfPoint < nTotalThreads) { temp = val[idx[threadIdx.x + halfPoint]]; } if (temp > val[idx[threadIdx.x]]) idx[threadIdx.x]=idx[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } // the result return idx[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.y * blockDim.y + threadIdx.y; // row index int j = blockIdx.x * blockDim.x + threadIdx.x; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * vec[i] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (i * (i+1) / 2) + j; int32_cuda index_A = i * dmat.stride + j; if (j <= i) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j+1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } // for this kernel, following the newer pattern, the x-dim is the row-index, the // y-dim is the col-index. template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index. int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = j + i * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } // for this kernel, the x-dim is the row-index at the output, the y-dim is the // col-index at the output template<typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index out int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index out int32_cuda index_out = j + i * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.rows && j < d_out.cols) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].row * d_out.stride + smat_in[smat_index].column; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat_out, const MatrixElement<OtherReal>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; int data_index = smat_in[smat_index].column * d_out.stride + smat_in[smat_index].row; mat_out[data_index] = smat_in[smat_index].weight; } template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_as_vec(Real* vec_out, const MatrixElement<OtherReal>* smat_in, MatrixIndexT_cuda d_in) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= d_in) return; vec_out[smat_index] = smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].row * mat_d_in.stride + smat_in[smat_index].column; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _trace_mat_smat(const Real* mat_in, const MatrixElement<Real>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, Real* trace_vec_out) { int smat_index = blockIdx.x * blockDim.x + threadIdx.x; if (smat_index >= smat_d_in) return; int mat_index = smat_in[smat_index].column * mat_d_in.stride + smat_in[smat_index].row; trace_vec_out[smat_index] = mat_in[mat_index] * smat_in[smat_index].weight; } template<typename Real> __global__ static void _transpose_matrix(Real* mat, MatrixDim d) { // Transposes a square matrix in-place. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j >= i || i >= d.rows) { return; } // Only half the threads act. int32_cuda index_a = j + i * d.stride, index_b = i + j * d.stride; Real a = mat[index_a], b = mat[index_b]; mat[index_a] = b; mat[index_b] = a; } template<typename Real> __global__ static void _copy_col_from_vec(Real* mat, const Real* v, int col, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < d.rows ) { int32_cuda index = col + i * d.stride; mat[index] = v[i]; } } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if ( i < d.cols && j < d.rows ) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i*d.stride; if ( i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1)*(i+2)/2) - 1; if ( i < dim ) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j*dst_d.stride, src_index = i + j*src_stride; if ( i < dst_d.cols && j < dst_d.rows ) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = (a > b ? a : b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } /// y is the derivative we will output; vec is the input we're computing /// the group p-norm on, "norm" is the previously computed group p-norm. template<typename Real> __global__ static void _calc_pnorm_deriv(Real *deriv, const Real *vec, const Real *norm, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. norm_element = norm[src_index]; // this is the pnorm Real vec_element_sign = (vec_element > 0 ? 1 : -1); Real ans; if (norm_element <= 0.0) ans = 0.0; // The derivative is either zero or undefined at the origin. else ans = vec_element_sign * pow(std::abs(vec_element), power - 1) * pow(norm_element, 1 - power); deriv[dst_index] = ans; } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols ) { int dst_index = i + j * d.stride, src_index = i / group_size + j * src_stride; Real vec_element = vec[dst_index], // this is the element of the original vector. max_element = maxv[src_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[dst_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (j >= d.rows ) return; //invert divider in shared memory __shared__ Real inv[16]; if(threadIdx.x==0) { inv[threadIdx.y] = 1.0/vec_div[j]; } __syncthreads(); //multiply elements if (i < d.cols && j < d.rows) mat[index] *= inv[threadIdx.y]; } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; int32_cuda index_src = i + j*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j *d.stride; int32_cuda index_src = j + i*src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha*src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, a_index = i + j*stride_a, b_index = i + j*stride_b, c_index = i + j*stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, // this function does S = beta S + alpha * L^T L. This is used in PSD matrix inversion. // The i index is the row of the destination S and the j the column (although of // course the output is symmetric so it doesn't matter in a sense). The main point // of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*col[j] + beta*dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha*row[i] + beta*dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*dmat.stride; int32_cuda index2 = i + j*dmask.stride; if ( i < dmat.cols && j < dmat.rows ) if(mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { // Note from Dan: in this kernel, we make the x dimension correspond to the // row index and y to the column index. That was not always the case for // earlier kernels written by others. int i = blockIdx.x * blockDim.x + threadIdx.x; // row index int j = blockIdx.y * blockDim.y + threadIdx.y; // column index int index = i * mat_dim.stride + j, index2 = i * mat2_row_stride + j * mat2_col_stride; if (i < mat_dim.rows && j < mat_dim.cols) { mat[index] = alpha * mat2[index2] * vec[j] + beta * mat[index]; } } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j*dim.stride; int32_cuda srcA_index = i + j*srcA_stride; int32_cuda srcB_index = i + j*srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index] ; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { Real ratio = a[i] / param_3; if ( ( ratio < 0.0 ) || ( ratio >= 1.01 )) { *flag = 1; return; } if ( ratio < param_1 ) { Real factor = ((param_1/ratio) > param_2) ? param_2 : (param_1/ratio); v[i] = v[i] / factor; } else if ( ratio > param_1 ) { Real factor = ((ratio/param_1) > param_2) ? param_2 : (ratio/param_1); v[i] = v[i] * factor; } } } template<typename Real> __global__ static void _copy_from_vec_df(double* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v_out[i] = (double) v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. In // this kernel, following the new pattern, x corresponds to row-index and y to // column-index. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row index. int j = blockIdx.y * blockDim.y + threadIdx.y; // column index. if (i < d.rows && j < d.cols) { int index = i * d.stride + j; m_out[index] = v_in[j]; } } template<typename Real> __global__ static void _copy_from_vec_fd(float* v_out, const Real* v_in, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if ( i < dim) { v_out[i] = (float) v_in[i]; } } template<typename Real> __global__ static void _vec_min(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= CU1DBLOCK) return; __shared__ Real row_data[CU1DBLOCK]; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real min = 1.0 / 0.0; // infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j < min) min = v_j; } row_data[i] = min; __syncthreads(); //get the sum *value = _min_reduce(row_data); } template<typename Real> __global__ static void _vec_max(const Real* v, Real* value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.y > 0) return; __shared__ Real row_data[CU1DBLOCK]; if(i >= CU1DBLOCK) return; int block_size = (dim + CU1DBLOCK - 1) / CU1DBLOCK; Real max = -1.0 / 0.0; // -infinity. for (int j = i * block_size; j < (i+1) * block_size && j < dim; j++) { Real v_j = v[j]; if (v_j > max) max = v_j; } row_data[i] = max; __syncthreads(); //get the sum *value = _max_reduce(row_data); } // _trace_mat_mat expects to be called with 1 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 0]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; int block_size = (num_elements + num_threads - 1) / num_threads; int loop_start = i * block_size, loop_end = (i + 1) * block_size; if (loop_end > num_elements) loop_end = num_elements; Real sum = 0.0; for (int j = loop_start; j < loop_end; j++) { // for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = row + col * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // _trace_mat_mat_trans expects to be called with 4 blocks, each of dimension // CU1DBLOCK. Each block outputs a partial sum to value[blockIdx.x], // i.e. value[0 through 3]. template<typename Real, int num_blocks> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if(blockIdx.x > num_blocks || threadIdx.x > CU1DBLOCK) return; int num_elements = dA.rows * dA.cols, num_threads = CU1DBLOCK * num_blocks; // int block_size = (num_elements + num_threads - 1) / num_threads; // int loop_start = i * block_size, loop_end = (i + 1) * block_size; // if (loop_end > num_elements) // loop_end = num_elements; Real sum = 0.0; // for (int j = loop_start; j < loop_end; j++) { for (int j = i; j < num_elements; j += num_threads) { int row = j / dA.cols, col = j % dA.cols; // "row" is row-index in A, "col" is // col-index in A; in B, it's reversed. int index_A = col + row * dA.stride, index_B = col + row * B_stride; sum += A[index_A] * B[index_B]; } __shared__ Real row_data[CU1DBLOCK]; row_data[threadIdx.x] = sum; __syncthreads(); Real ans = _sum_reduce(row_data); if (threadIdx.x == 0) value[blockIdx.x] = ans; } // Adds diag(M N) to v, where M and N are matrices. We supply row_stride and // col_stride arguments for M and N, and swapping them allows us to transpose // those matrices. Note: we imagine row-major indexing here, just like Kaldi // and CBLAS (but unlike CUBLAS). // This kernel expects the blockDim to be (CU1DBLOCK, 1) and the // gridDim times CU1DBLOCK to be at least num-rows-of-v * threads_per_element. // threads_per_element should be a power of 2. template<typename Real> __global__ static void _add_diag_mat_mat( Real alpha, Real* v, int v_dim, const Real* M, int M_cols, int M_row_stride, int M_col_stride, const Real *N, int N_row_stride, int N_col_stride, int threads_per_element, Real beta) { // we actually assume blockDim.x == CU1DBLOCK here. // Each diagonal element of v is processed by "threads_per_element" threads. __shared__ Real temp_data[CU1DBLOCK]; int i = blockIdx.x * blockDim.x + threadIdx.x; int v_idx = i / threads_per_element, // v_idx is the index into v that we are supposed to sub_idx = i % threads_per_element; // add to; 0 <= sub_idx < threads_per_element tells // us which block of elements we sum up. if (v_idx >= v_dim) return; Real sum = 0.0; for (int j = sub_idx; j < M_cols; j += threads_per_element) { int M_index = v_idx * M_row_stride + j * M_col_stride, N_index = j * N_row_stride + v_idx * N_col_stride; sum += M[M_index] * N[N_index]; } temp_data[threadIdx.x] = sum; // start_idx = threadIdx.x - sub_idx; // start of the position in temp_data // that we want to sum up. // The following is a tree-based reduction of the elements of temp_data from // start_idx to start_idx + threads_per_element - 1; our own index is "sub_idx". __syncthreads(); int num_total_threads = threads_per_element; while (num_total_threads > 1) { int half_point = ((1 + num_total_threads) >> 1); if (sub_idx < half_point) { Real temp = 0.0; if (sub_idx + half_point < num_total_threads) { temp = temp_data[threadIdx.x + half_point]; } temp_data[threadIdx.x] += temp; } __syncthreads(); num_total_threads = half_point; } if (sub_idx == 0) { v[v_idx] = beta * v[v_idx] + alpha * temp_data[threadIdx.x]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat(Real* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j< loop_end; j++) { int m = (x + j)->row; //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int label = (x + j)->column; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); Real weight = (x + j)->weight; //*(Real*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) ) + 2 * sizeof(int)); tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); *(z2 + m * d2.stride + label ) += weight / this_prob;// there might be problems here.... } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t+1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int s) { int i = threadIdx.x; if (i >= s) return; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i*size; loop_end = threshold + (i+1)*size; } for(int j = loop_start; j < loop_end; j++) { *(data + x[j].row * dim.stride + x[j].column) += alpha * x[j].weight; } } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; //col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; //row int32_cuda index_mat1 = i + j*mat1_dim.stride; int32_cuda index_mat2 = i + j*mat2_stride; int32_cuda index_mask = i + j*mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } template<typename Real> __global__ static void _vec_sum(Real *v, Real *sum, int dim, int inc) { int i = threadIdx.x; __shared__ Real row_data[CU1DBLOCK]; if (i >= CU1DBLOCK) return; Real tmp_sum = 0; int size = dim / CU1DBLOCK; //the least size in a loop (later part) int threshold = dim - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if(i < threshold) { loop_start = i * (size + 1); loop_end = (i+1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i+1) * size; } for(int j = loop_start; j< loop_end; j++) { tmp_sum += v[j * inc]; } row_data[threadIdx.x] = tmp_sum; __syncthreads(); *sum = _sum_reduce(row_data); } template<typename Real> __global__ static void _pvec_sum(Real* v, Real* g, int dim, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int start = size * i; if (start >= dim) return; int end = start + size; if (end > dim) end = dim; __shared__ Real row_data[CU1DBLOCK]; Real sum = 0; for (int j = start; j < end; j++) sum += v[j]; row_data[threadIdx.x] = sum; __syncthreads(); g[blockIdx.x] = _sum_reduce(row_data); } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim) { if ( v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } // Caution, here i/block{idx,dim}.x is the row index and j/block{idx,dim}.y is the col index. // this is for no reason, really, I just happened to prefer this // at the time. [dan] template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i * d.stride + j; if (i < d.rows && j < d.cols) { mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (mat[index] < floor_val) mat[index] = floor_val; } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[j], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = i * src_stride + reorder[j]; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { // Note: in this kernel, the x dimension corresponds to rows and the y to columns, // as it will be going forward. int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int index = reorder[i], dst_index = i * dst_dim.stride + j; if (index >= 0) { int src_index = reorder[i] * src_stride + j; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] = src[i][j]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] = src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (reorder[i] >= 0) { int src_index = reorder[i] * src_stride + j; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dst_dim.rows && j < dst_dim.cols) { int dst_index = i * dst_dim.stride + j; if (src[i] != NULL) { dst[dst_index] += alpha * src[i][j]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const* dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < src_dim.rows && j < src_dim.cols) { if (dst[i] != NULL) { dst[i][j] += alpha * src[i * src_dim.stride + j]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows ) { if (mat[index] > ceiling_val) mat[index] = ceiling_val; } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j*d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0/data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // Cast from void; // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } // Since this is a newer kernel, x is the row-index and y is the // column-index. template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; // compute the function y[index] = log(1 + exp(x[index])) if(i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index ++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; Real max_abs_value = (max_value > -min_value ? max_value : -min_value); // let max_value be the // largest abs(value) if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } template<typename Real> __global__ static void _group_max(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_begin_index = i * group_size + j * src_stride; Real max_value = -1e20; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index ++) { if (!isnan(x[src_index]) && x[src_index] > max_value) max_value = x[src_index]; } y[dst_index] = max_value; } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j*src_stride; if(i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = y[y_index]*(1.0-y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride, src_index = i + j * src_stride; if(i < d.cols && j < d.rows) { Real exp_2x = exp(2.0*x[src_index]); Real res; if(isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j*d.stride; int e_index = i + j*e_stride; int y_index = i + j*y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (1.0 - y[y_index]*y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; //copy input to aux aux[threadIdx.x] = x[threadIdx.x+j*d.stride]; for(int i=1; i<steps; ++i) { if(threadIdx.x+i*THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x+i*THREADS+j*d.stride]) aux[threadIdx.x] = x[threadIdx.x+i*THREADS+j*d.stride]; } //get the maximum value int nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x+halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real max = aux[0]; __syncthreads(); // subtract max, apply exp, sum up... y[threadIdx.x+j*d.stride] = exp(x[threadIdx.x+j*d.stride] - max); aux[threadIdx.x] = y[threadIdx.x+j*d.stride]; for(int i=1; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = exp(x[threadIdx.x+i*THREADS+j*d.stride] - max); aux[threadIdx.x] += y[threadIdx.x+i*THREADS+j*d.stride]; } } nTotalThreads = THREADS; __syncthreads(); while(nTotalThreads > 1) { int halfPoint = ((1+nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x < halfPoint) { // Get the shared value stored by another thread if(threadIdx.x+halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1+nTotalThreads) >> 1); // divide by two. } Real sum = aux[0]; __syncthreads(); //normalize by sum... for(int i=0; i<steps; i++) { if(threadIdx.x+i*THREADS < d.cols) { y[threadIdx.x+i*THREADS+j*d.stride] = y[threadIdx.x+i*THREADS+j*d.stride] / sum; } } } template<typename Real> __global__ static void _log_softmax_reduce(Real *y, const Real *x, MatrixDim d, int src_stride) { int j = blockIdx.x; int THREADS = blockDim.x; if (j >= d.rows) return; __shared__ Real aux[CU1DBLOCK]; int steps = (d.cols - 1) / THREADS + 1; // Maximum step 1: loads input data to <aux>. If <d.cols> is larger than // <blockDim.x>, then we do a first pass filtering and only // keep a <blockDim.x> size array. aux[threadIdx.x] = x[threadIdx.x + j * d.stride]; for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols && aux[threadIdx.x] < x[threadIdx.x + i * THREADS + j * d.stride]) aux[threadIdx.x] = x[threadIdx.x + i * THREADS + j * d.stride]; } // Maximum step 2: the standard max reduce. int nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads && aux[threadIdx.x] < aux[threadIdx.x + halfPoint]) aux[threadIdx.x] = aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real max = aux[0]; __syncthreads(); // Log sum step 1: substracts max, and takes exponentials. y[threadIdx.x + j * d.stride] = x[threadIdx.x + j * d.stride] - max; aux[threadIdx.x] = exp(y[threadIdx.x + j * d.stride]); for (int i = 1; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] = x[threadIdx.x + i * THREADS + j * d.stride] - max; aux[threadIdx.x] += exp(y[threadIdx.x + i * THREADS + j * d.stride]); } } // Log sum step 2: comptes summation and then takes logarithm. nTotalThreads = THREADS; __syncthreads(); while (nTotalThreads > 1) { int halfPoint = ((1 + nTotalThreads) >> 1); if (threadIdx.x < halfPoint) { if (threadIdx.x + halfPoint < nTotalThreads) aux[threadIdx.x] += aux[threadIdx.x + halfPoint]; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); } Real log_sum = log(aux[0]); __syncthreads(); // Computes log softmax. for (int i = 0; i < steps; ++i) { if (threadIdx.x + i * THREADS < d.cols) { y[threadIdx.x + i * THREADS + j * d.stride] -= log_sum; } } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if(src_row < 0) src_row = 0; if(src_row >= d_in.rows) src_row = d_in.rows-1; y[index] = x[src_col + src_row*d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i+1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j+1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i+1) * (i+2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < dim.rows && j < dim.cols) { int dst_index = i * dim.stride + j, src_index; if (j <= i) { // no transpose src_index = (i * (i+1) / 2) + j; } else { // transpose. src_index = (j * (j+1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_col = copy_from[i]; if(src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j*d_in.stride]; } else { y[index] = 1.0/0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < dim ) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d_out.stride; if (i < d_out.cols && j < d_out.rows ) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row*d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j*d.stride, grad_index = i + j*stride_grad; if (i < d.cols && j < d.rows) { if(wei[index]==0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if(wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; Real after = wei[index] -lr*grad[grad_index] -l1_signed;//simulate update if((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(blockIdx.x > 0) return; if(blockDim.y != 1) return; __shared__ Real value[CU1DBLOCK]; __shared__ int32_cuda index[CU1DBLOCK]; //copy to shared memory value[threadIdx.x] = mat[i+j*d.stride]; index[threadIdx.x] = threadIdx.x; __syncthreads(); //get the id of the max value int32_cuda out_max = _max_id_reduce(value, index); __syncthreads(); //see if it's bigger value if(threadIdx.x == 0) { if(vec_val[j] <= mat[out_max+j*d.stride]) { vec_val[j] = mat[out_max+j*d.stride]; vec_id[j] = voff+out_max; } } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if(i>0) return; if(j<d.rows) { int32_cuda index = vec_tgt[j] + j*d.stride; Real value = mat_net_out[index]; if(value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cudaI32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_col_from_vec(int Gr, int Bl, float* mat, const float* v, int col, MatrixDim d) { _copy_col_from_vec<<<Gr,Bl>>>(mat,v,col,d); } void cudaF_transpose_matrix(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const* dst, const float* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const* dst, const float* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_calc_pnorm_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size, float power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaF_copy_from_vec_df(int Gr, int Bl, double* v_out, const float* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_copy_from_vec_fd(int Gr, int Bl, float* v_out, const float* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaF_vec_min(const float* v, float* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_vec_max(const float* v, float* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaF_trace_mat_mat_trans(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat_trans<float,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat<float,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat(int Gr, int Bl, float alpha, float* v, int v_dim, const float* M, int M_cols, int M_row_stride, int M_col_stride, const float *N, int N_row_stride, int N_col_stride, int threads_per_element, float beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v, value, dim, inc); } void cudaF_pvec_sum(int Gr, int Bl, float* v, float* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v, pvec_sum, dim, size); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int s) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, s); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr,dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_sigmoid (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_tanh (dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_tanh (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_log_softmax_reduce (size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaF_one(int Gr, int Bl, float* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { _copy_from_sp<<<Gr,Bl>>>(x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaF_copy_col_from_mat(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA); } void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA); } void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_col_from_vec(int Gr, int Bl, double* mat, const double* v, int col, MatrixDim d) { _copy_col_from_vec<<<Gr,Bl>>>(mat,v,col,d); } void cudaD_transpose_matrix(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _transpose_matrix<<<Gr,Bl>>>(mat, d); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const* src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const* dst, const double* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const* src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const* dst, const double* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_calc_pnorm_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size, double power) { _calc_pnorm_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim d, int src_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, d, src_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _add_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaD_copy_from_vec_df(int Gr, int Bl, double* v_out, const double* v_in, int dim) { _copy_from_vec_df<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_copy_from_vec_fd(int Gr, int Bl, float* v_out, const double* v_in, int dim) { _copy_from_vec_fd<<<Gr,Bl>>>(v_out,v_in,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaD_vec_min(const double* v, double* value, int dim) { _vec_min<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_vec_max(const double* v, double* value, int dim) { _vec_max<<<1,CU1DBLOCK>>>(v, value, dim); } void cudaD_trace_mat_mat_trans(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat_trans<double,4> <<<4,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat<double,2> <<<2,CU1DBLOCK>>>(A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat(int Gr, int Bl, double alpha, double* v, int v_dim, const double* M, int M_cols, int M_row_stride, int M_col_stride, const double *N, int N_row_stride, int N_col_stride, int threads_per_element, double beta) { _add_diag_mat_mat<<<Gr,Bl>>>(alpha, v, v_dim, M, M_cols, M_row_stride, M_col_stride, N, N_row_stride, N_col_stride, threads_per_element, beta); } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { _vec_sum<<<Gr,Bl>>>(v,value,dim,inc); } void cudaD_pvec_sum(int Gr, int Bl, double* v, double* pvec_sum, int dim, int size) { _pvec_sum<<<Gr,Bl>>>(v,pvec_sum,dim,size); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int s) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, s); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _group_max<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_sigmoid (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_sigmoid (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_tanh (dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_tanh (dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_log_softmax_reduce (size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { _copy_from_sp<<<Gr,Bl>>>(x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d,int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, voff, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* Some conversion kernels for which it's more convenient to not name them F or D. */ void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<float>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat_out, const MatrixElement<double>* smat_in, MatrixDim d_out, MatrixIndexT_cuda d_in) { _copy_from_smat_trans<<<Gr,Bl>>>(mat_out, smat_in, d_out, d_in); } void cuda_copy_from_smat_as_vec_ff(dim3 Gr, dim3 Bl, float* vec_out, const MatrixElement<float>* smat_in, MatrixIndexT_cuda d_in) { _copy_from_smat_as_vec<<<Gr,Bl>>>(vec_out, smat_in, d_in); } void cuda_copy_from_smat_as_vec_fd(dim3 Gr, dim3 Bl, float* vec_out, const MatrixElement<double>* smat_in, MatrixIndexT_cuda d_in) { _copy_from_smat_as_vec<<<Gr,Bl>>>(vec_out, smat_in, d_in); } void cuda_copy_from_smat_as_vec_df(dim3 Gr, dim3 Bl, double* vec_out, const MatrixElement<float>* smat_in, MatrixIndexT_cuda d_in) { _copy_from_smat_as_vec<<<Gr,Bl>>>(vec_out, smat_in, d_in); } void cuda_copy_from_smat_as_vec_dd(dim3 Gr, dim3 Bl, double* vec_out, const MatrixElement<double>* smat_in, MatrixIndexT_cuda d_in) { _copy_from_smat_as_vec<<<Gr,Bl>>>(vec_out, smat_in, d_in); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat_in, const MatrixElement<float>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, float* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat_in, const MatrixElement<double>* smat_in, MatrixDim mat_d_in, MatrixIndexT_cuda smat_d_in, double* trace_vec_out) { _trace_mat_smat_trans<<<Gr,Bl>>>(mat_in, smat_in, mat_d_in, smat_d_in, trace_vec_out); }
762bdf9f4ba4ddde0f6f67f9a755310312852aec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void transformation(size_t num_values, float_t* src, float_t* dest, size_t ld_src, size_t ld_dest) { size_t index = blockIdx.x*blockDim.x + threadIdx.x; if(index < num_values) { size_t dest_index = (index/ld_src)*ld_src + ((index%ld_src)%8)*ld_dest+ (index%ld_src)/8; dest[dest_index] = src[index]; } }
762bdf9f4ba4ddde0f6f67f9a755310312852aec.cu
#include "includes.h" __global__ void transformation(size_t num_values, float_t* src, float_t* dest, size_t ld_src, size_t ld_dest) { size_t index = blockIdx.x*blockDim.x + threadIdx.x; if(index < num_values) { size_t dest_index = (index/ld_src)*ld_src + ((index%ld_src)%8)*ld_dest+ (index%ld_src)/8; dest[dest_index] = src[index]; } }
c8501370afe680545c3b838cd085546f9625b228.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "common.hpp" #include "util.hpp" #include "lib/local-string.cuh" #include "lib/sha512crypt.cuh" static bool init(Options *options, Dictionary *dict, FILE **shadow_file, FILE **result_file, int argc, char **argv); static bool init_cuda(); static void cleanup(FILE *shadow_file, FILE *result_file, Dictionary *dict); static void crack(ExtendedCrackResult *result, Options *options, Dictionary *dict, ShadowEntry *entry); static bool prepare_job(CrackJob *job, ShadowEntry *entry, ProbeConfig *config, Options *options, Dictionary *dict); static bool get_next_probe(ProbeConfig *config, Options *options, Dictionary *dict); static void handle_result(Options *options, ExtendedCrackResult *result, OverviewCrackResult *overview_result, FILE *result_file); static void handle_overview_result(Options *options, OverviewCrackResult *overview_result); __global__ static void crack_job(CrackResult results[], CrackJob jobs[]); #define GRID_SIZE 40 // 40 streaming multiprocessors on my GPU, so clean number. May adjust later. // The number of threads per block should be a round multiple of the warp size #define BLOCK_SIZE 32 * 2 // My GPU has 64 CUDA cores, so clean number. May adjust later. #define TOTAL_THREAD_COUNT GRID_SIZE *BLOCK_SIZE #define USED_DEVICE 0 /* * Main entrypoint. */ int main(int argc, char **argv) { Options options = {}; Dictionary dict = {}; FILE *shadow_file = NULL; FILE *result_file = NULL; if (!init(&options, &dict, &shadow_file, &result_file, argc, argv)) { cleanup(shadow_file, result_file, &dict); return EXIT_FAILURE; } // Iterate and crack shadow entries if (!options.quiet) { printf("\nEntries:\n"); } OverviewCrackResult overview_result = {}; ShadowEntry shadow_entry; while (get_next_shadow_entry(&shadow_entry, shadow_file)) { ExtendedCrackResult result; crack(&result, &options, &dict, &shadow_entry); if (result.status == STATUS_ERROR) { fprintf(stderr, "Stopping due to an error.\n"); break; } handle_result(&options, &result, &overview_result, result_file); } // Handle overall result handle_overview_result(&options, &overview_result); cleanup(shadow_file, result_file, &dict); return EXIT_SUCCESS; } /* * Initialize general stuff. */ static bool init(Options *options, Dictionary *dict, FILE **shadow_file, FILE **result_file, int argc, char **argv) { // Parse CLI args if (!parse_cli_args(options, argc, argv)) { return false; } // Print some useful info if (!options->quiet) { printf("Chosen CUDA grid size: %d\n", GRID_SIZE); printf("Chosen CUDA block size: %d\n", BLOCK_SIZE); printf("Total thread count: %d\n", TOTAL_THREAD_COUNT); printf("Max symbols: %ld\n", options->max_length); printf("Symbol separator: \"%s\"\n", options->separator); } // Open shadow file if (!options->quiet) { printf("Shadow file: %s\n", options->shadow_file); } if (!open_file(shadow_file, options->shadow_file, "r")) { return false; } // Open output file if provided if (options->result_file[0] != 0) { if (!options->quiet) { printf("Output file: %s\n", options->result_file); } if (!open_file(result_file, options->result_file, "w")) { return false; } } // Read full directory if (!options->quiet) { printf("Dictionary file: %s\n", options->dict_file); } if (!read_dictionary(dict, options, options->dict_file)) { return false; } // Init CUDA if (!init_cuda()) { return false; } return true; } /* * Initialize CUDA stuff. */ static bool init_cuda() { // Make sure at least one CUDA-capable device exists int device_count; hipGetDeviceCount(&device_count); printf("CUDA device count: %d\n", device_count); if (device_count < 1) { fprintf(stderr, "No CUDA devices present.\n"); return false; } // Print some useful info hipDeviceProp_t prop; hipGetDeviceProperties(&prop, USED_DEVICE); printf("CUDA device #0:\n"); printf("\tName: %s\n", prop.name); printf("\tCompute capability: %d.%d\n", prop.major, prop.minor); printf("\tMultiprocessors: %d\n", prop.multiProcessorCount); printf("\tWarp size: %d\n", prop.warpSize); printf("\tGlobal memory: %.1fGiB bytes\n", prop.totalGlobalMem / 1024.0 / 1024.0 / 1024.0); printf("\tPer-block shared memory: %.1fkiB\n", prop.sharedMemPerBlock / 1024.0); printf("\tPer-block registers: %d\n", prop.regsPerBlock); // Check for any previous errors hipError_t error = hipPeekAtLastError(); if (error) { fprintf(stderr, "A CUDA error has occurred while initializing: %s\n", hipGetErrorString(error)); return false; } return true; } /* * Cleanup stuff. */ static void cleanup(FILE *shadow_file, FILE *result_file, Dictionary *dict) { if (shadow_file) { fclose(shadow_file); } if (result_file) { fclose(result_file); } if (dict->elements) { free(dict->elements); } } /* * Crack a shadow password entry. */ static void crack(ExtendedCrackResult *total_result, Options *options, Dictionary *dict, ShadowEntry *entry) { // Initialize main result early in case of early return memset(total_result, 0, sizeof(ExtendedCrackResult)); strncpy(total_result->user, entry->user, MAX_USER_LENGTH); strncpy(total_result->passfield, entry->passfield, MAX_PASSFIELD_LENGTH); total_result->alg = entry->alg; // Skip if not SHA512 if (entry->alg != ALG_SHA512) { total_result->status = STATUS_SKIP; return; } ProbeConfig config = {}; config.dict_positions = (size_t *)malloc(options->max_length * sizeof(size_t)); config.symbols = (char(*)[MAX_DICT_ELEMENT_LENGTH + 1]) malloc(options->max_length * (MAX_DICT_ELEMENT_LENGTH + 1) * sizeof(char)); int number_of_jobs = TOTAL_THREAD_COUNT; // TODO // Host (CPU) arrays CrackJob *h_crackJobs = (CrackJob *)malloc(number_of_jobs * sizeof(CrackJob)); CrackResult *h_crackResults = (CrackResult *)malloc(number_of_jobs * sizeof(CrackResult)); // Device (GPU) arrays CrackJob *d_crackJobs; CrackResult *d_crackResults; hipMalloc((void **)&d_crackJobs, number_of_jobs * sizeof(CrackJob)); hipMalloc((void **)&d_crackResults, number_of_jobs * sizeof(CrackResult)); // Start time measurement struct timespec start_time, end_time; clock_gettime(CLOCK_MONOTONIC, &start_time); // Try probes until the status changes (when a match is found or the search space is exhausted) while (total_result->status == STATUS_PENDING) { printf("New while loop started\n"); // Prepare new jobs bool more_probes = true; for (size_t i = 0; i < number_of_jobs; i++) { if (!prepare_job(&h_crackJobs[i], entry, &config, options, dict)) { more_probes = false; break; } } // Copy jobs to device hipMemcpy(d_crackJobs, h_crackJobs, number_of_jobs * sizeof(CrackJob), hipMemcpyHostToDevice); // Start kernel hipLaunchKernelGGL(( crack_job), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, d_crackResults, d_crackJobs); // crack_job(d_crackResults, d_crackJobs); // Copy results from device hipMemcpy(h_crackResults, d_crackResults, number_of_jobs * sizeof(CrackResult), hipMemcpyDeviceToHost); // Check for error hipError_t error = hipPeekAtLastError(); if (error) { fprintf(stderr, "A CUDA error has occurred while cracking: %s\n", hipGetErrorString(error)); total_result->status = STATUS_ERROR; break; } // Handle results for (size_t i = 0; i < number_of_jobs; i++) { CrackResult *result = &h_crackResults[i]; // Skip if skip if (total_result->status == STATUS_SKIP) { continue; } // Keep track of probes tested total_result->attempts++; // Accept if success (currently the only one it makes sense to stop on) if (result->status == STATUS_SUCCESS) { total_result->status = result->status; strncpy(total_result->password, result->password, MAX_PASSWORD_LENGTH); // Ignore all job results after this one break; } } // Check if search space is exhausted and not match has been found if (!more_probes && total_result->status == STATUS_PENDING) { total_result->status = STATUS_FAIL; } } // End time measurement and record duration clock_gettime(CLOCK_MONOTONIC, &end_time); total_result->duration = ((double)(end_time.tv_sec - start_time.tv_sec)) + ((double)(end_time.tv_nsec - start_time.tv_nsec)) * 1e-9; // Cleanup free(config.dict_positions); free(config.symbols); free(h_crackJobs); free(h_crackResults); hipFree(d_crackJobs); hipFree(d_crackResults); } static bool prepare_job(CrackJob *job, ShadowEntry *entry, ProbeConfig *config, Options *options, Dictionary *dict) { // Zeroize memset(job, 0, sizeof(CrackJob)); bool more_probes = get_next_probe(config, options, dict); if (more_probes) { job->action = ACTION_WORK; strncpy(job->passfield, entry->passfield, MAX_PASSWORD_LENGTH); job->alg = entry->alg; job->salt_end = entry->salt_end; strncpy(job->probe, config->probe, MAX_PASSWORD_LENGTH); } else { job->action = ACTION_WAIT; } if (options->verbose) { printf("%s\n", job->probe); } return more_probes; } /* * Build the next probe. Returns false with an empty probe when the search space is exhausted. */ static bool get_next_probe(ProbeConfig *config, Options *options, Dictionary *dict) { // Check if dict is empty if (dict->length == 0) { return false; } // Find last symbol which can be replaced with the next one, if any exists ssize_t last_replaceable_pos = -1; for (size_t i = 0; i < config->size; i++) { if (config->dict_positions[i] < dict->length - 1) { last_replaceable_pos = i; } } // A symbol can be replaced, replace last one and reset all behind it if (last_replaceable_pos >= 0) { size_t new_dict_pos = config->dict_positions[last_replaceable_pos] + 1; config->dict_positions[last_replaceable_pos] = new_dict_pos; strncpy(config->symbols[last_replaceable_pos], dict->elements[new_dict_pos], MAX_DICT_ELEMENT_LENGTH); for (size_t i = last_replaceable_pos + 1; i < config->size; i++) { config->dict_positions[i] = 0; strncpy(config->symbols[i], dict->elements[0], MAX_DICT_ELEMENT_LENGTH); } } // No symbols can be replaced and no more symbols are allowed, return error else if (config->size == options->max_length) { config->probe[0] = 0; return false; } // New symbol can be added, reset all previous positions and add it else { config->size++; for (size_t i = 0; i < config->size; i++) { config->dict_positions[i] = 0; strncpy(config->symbols[i], dict->elements[0], MAX_DICT_ELEMENT_LENGTH); } } // Build probe config->probe[0] = 0; for (size_t i = 0; i < config->size; i++) { if (i > 0) { strncat(config->probe, options->separator, MAX_PASSWORD_LENGTH); } strncat(config->probe, config->symbols[i], MAX_PASSWORD_LENGTH); } return true; } /* * Handle result from trying to crack a single password. */ static void handle_result(Options *options, ExtendedCrackResult *result, OverviewCrackResult *overview_result, FILE *result_file) { // Make representations char const *alg_str = cryptalg_to_string(result->alg); char const *status_str = crack_result_status_to_string(result->status); double attempts_per_second = result->attempts / result->duration; // Format and print size_t const static max_output_length = 1023; char *output = (char *)malloc(max_output_length + 1); snprintf(output, max_output_length + 1, "user=\"%s\", alg=\"%s\" status=\"%s\" duration=\"%fs\" attempts=\"%ld\" attempts_per_second=\"%f\" password=\"%s\"", result->user, alg_str, status_str, result->duration, result->attempts, attempts_per_second, result->password); if (!options->quiet) { printf("%s\n", output); } if (result_file) { fprintf(result_file, "%s\n", output); fflush(result_file); } free(output); // Update overview overview_result->statuses[result->status]++; overview_result->duration += result->duration; overview_result->attempts += result->attempts; } /* * Handle result from trying to crack all passwords. */ static void handle_overview_result(Options *options, OverviewCrackResult *result) { if (!options->quiet) { printf("\nOverview:\n"); printf("Total duration: %.3fs\n", result->duration); printf("Total attempts: %ld\n", result->attempts); printf("Total attempts per second: %.3f\n", result->attempts / result->duration); printf("Skipped: %ld\n", result->statuses[STATUS_SKIP]); printf("Successful: %ld\n", result->statuses[STATUS_SUCCESS]); printf("Failed: %ld\n", result->statuses[STATUS_FAIL]); } } /* * Hash probe and compare. */ __global__ static void crack_job(CrackResult results[], CrackJob jobs[]) { // TODO set using unique index into arrays int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; CrackResult *result = &results[index]; CrackJob *job = &jobs[index]; // Zeroize result result->status = STATUS_PENDING; result->password[0] = 0; // Nothing to do here if (job->action == ACTION_WAIT) { result->status = STATUS_SKIP; return; } // Only accept SHA512 (redundant check) if (job->alg != ALG_SHA512) { result->status = STATUS_SKIP; return; } // Copy probe into shared memory char *probe = job->probe; // Copy salt part of passfield into shared memory (same value is used by all threads) char salt[MAX_SALT_LENGTH]; local_strncpy(salt, job->passfield, MIN(job->salt_end, MAX_SALT_LENGTH)); salt[job->salt_end] = 0; // Call sha512_crypt_r directly using register buffer char new_passfield[MAX_PASSFIELD_LENGTH + 1]; sha512_crypt_r(probe, salt, new_passfield, MAX_PASSFIELD_LENGTH + 1); if (new_passfield != NULL && local_strneq(job->passfield, new_passfield, MAX_PASSFIELD_LENGTH)) { // Match found, abort search result->status = STATUS_SUCCESS; local_strncpy(result->password, probe, MAX_PASSWORD_LENGTH); } }
c8501370afe680545c3b838cd085546f9625b228.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "common.hpp" #include "util.hpp" #include "lib/local-string.cuh" #include "lib/sha512crypt.cuh" static bool init(Options *options, Dictionary *dict, FILE **shadow_file, FILE **result_file, int argc, char **argv); static bool init_cuda(); static void cleanup(FILE *shadow_file, FILE *result_file, Dictionary *dict); static void crack(ExtendedCrackResult *result, Options *options, Dictionary *dict, ShadowEntry *entry); static bool prepare_job(CrackJob *job, ShadowEntry *entry, ProbeConfig *config, Options *options, Dictionary *dict); static bool get_next_probe(ProbeConfig *config, Options *options, Dictionary *dict); static void handle_result(Options *options, ExtendedCrackResult *result, OverviewCrackResult *overview_result, FILE *result_file); static void handle_overview_result(Options *options, OverviewCrackResult *overview_result); __global__ static void crack_job(CrackResult results[], CrackJob jobs[]); #define GRID_SIZE 40 // 40 streaming multiprocessors on my GPU, so clean number. May adjust later. // The number of threads per block should be a round multiple of the warp size #define BLOCK_SIZE 32 * 2 // My GPU has 64 CUDA cores, so clean number. May adjust later. #define TOTAL_THREAD_COUNT GRID_SIZE *BLOCK_SIZE #define USED_DEVICE 0 /* * Main entrypoint. */ int main(int argc, char **argv) { Options options = {}; Dictionary dict = {}; FILE *shadow_file = NULL; FILE *result_file = NULL; if (!init(&options, &dict, &shadow_file, &result_file, argc, argv)) { cleanup(shadow_file, result_file, &dict); return EXIT_FAILURE; } // Iterate and crack shadow entries if (!options.quiet) { printf("\nEntries:\n"); } OverviewCrackResult overview_result = {}; ShadowEntry shadow_entry; while (get_next_shadow_entry(&shadow_entry, shadow_file)) { ExtendedCrackResult result; crack(&result, &options, &dict, &shadow_entry); if (result.status == STATUS_ERROR) { fprintf(stderr, "Stopping due to an error.\n"); break; } handle_result(&options, &result, &overview_result, result_file); } // Handle overall result handle_overview_result(&options, &overview_result); cleanup(shadow_file, result_file, &dict); return EXIT_SUCCESS; } /* * Initialize general stuff. */ static bool init(Options *options, Dictionary *dict, FILE **shadow_file, FILE **result_file, int argc, char **argv) { // Parse CLI args if (!parse_cli_args(options, argc, argv)) { return false; } // Print some useful info if (!options->quiet) { printf("Chosen CUDA grid size: %d\n", GRID_SIZE); printf("Chosen CUDA block size: %d\n", BLOCK_SIZE); printf("Total thread count: %d\n", TOTAL_THREAD_COUNT); printf("Max symbols: %ld\n", options->max_length); printf("Symbol separator: \"%s\"\n", options->separator); } // Open shadow file if (!options->quiet) { printf("Shadow file: %s\n", options->shadow_file); } if (!open_file(shadow_file, options->shadow_file, "r")) { return false; } // Open output file if provided if (options->result_file[0] != 0) { if (!options->quiet) { printf("Output file: %s\n", options->result_file); } if (!open_file(result_file, options->result_file, "w")) { return false; } } // Read full directory if (!options->quiet) { printf("Dictionary file: %s\n", options->dict_file); } if (!read_dictionary(dict, options, options->dict_file)) { return false; } // Init CUDA if (!init_cuda()) { return false; } return true; } /* * Initialize CUDA stuff. */ static bool init_cuda() { // Make sure at least one CUDA-capable device exists int device_count; cudaGetDeviceCount(&device_count); printf("CUDA device count: %d\n", device_count); if (device_count < 1) { fprintf(stderr, "No CUDA devices present.\n"); return false; } // Print some useful info cudaDeviceProp prop; cudaGetDeviceProperties(&prop, USED_DEVICE); printf("CUDA device #0:\n"); printf("\tName: %s\n", prop.name); printf("\tCompute capability: %d.%d\n", prop.major, prop.minor); printf("\tMultiprocessors: %d\n", prop.multiProcessorCount); printf("\tWarp size: %d\n", prop.warpSize); printf("\tGlobal memory: %.1fGiB bytes\n", prop.totalGlobalMem / 1024.0 / 1024.0 / 1024.0); printf("\tPer-block shared memory: %.1fkiB\n", prop.sharedMemPerBlock / 1024.0); printf("\tPer-block registers: %d\n", prop.regsPerBlock); // Check for any previous errors cudaError_t error = cudaPeekAtLastError(); if (error) { fprintf(stderr, "A CUDA error has occurred while initializing: %s\n", cudaGetErrorString(error)); return false; } return true; } /* * Cleanup stuff. */ static void cleanup(FILE *shadow_file, FILE *result_file, Dictionary *dict) { if (shadow_file) { fclose(shadow_file); } if (result_file) { fclose(result_file); } if (dict->elements) { free(dict->elements); } } /* * Crack a shadow password entry. */ static void crack(ExtendedCrackResult *total_result, Options *options, Dictionary *dict, ShadowEntry *entry) { // Initialize main result early in case of early return memset(total_result, 0, sizeof(ExtendedCrackResult)); strncpy(total_result->user, entry->user, MAX_USER_LENGTH); strncpy(total_result->passfield, entry->passfield, MAX_PASSFIELD_LENGTH); total_result->alg = entry->alg; // Skip if not SHA512 if (entry->alg != ALG_SHA512) { total_result->status = STATUS_SKIP; return; } ProbeConfig config = {}; config.dict_positions = (size_t *)malloc(options->max_length * sizeof(size_t)); config.symbols = (char(*)[MAX_DICT_ELEMENT_LENGTH + 1]) malloc(options->max_length * (MAX_DICT_ELEMENT_LENGTH + 1) * sizeof(char)); int number_of_jobs = TOTAL_THREAD_COUNT; // TODO // Host (CPU) arrays CrackJob *h_crackJobs = (CrackJob *)malloc(number_of_jobs * sizeof(CrackJob)); CrackResult *h_crackResults = (CrackResult *)malloc(number_of_jobs * sizeof(CrackResult)); // Device (GPU) arrays CrackJob *d_crackJobs; CrackResult *d_crackResults; cudaMalloc((void **)&d_crackJobs, number_of_jobs * sizeof(CrackJob)); cudaMalloc((void **)&d_crackResults, number_of_jobs * sizeof(CrackResult)); // Start time measurement struct timespec start_time, end_time; clock_gettime(CLOCK_MONOTONIC, &start_time); // Try probes until the status changes (when a match is found or the search space is exhausted) while (total_result->status == STATUS_PENDING) { printf("New while loop started\n"); // Prepare new jobs bool more_probes = true; for (size_t i = 0; i < number_of_jobs; i++) { if (!prepare_job(&h_crackJobs[i], entry, &config, options, dict)) { more_probes = false; break; } } // Copy jobs to device cudaMemcpy(d_crackJobs, h_crackJobs, number_of_jobs * sizeof(CrackJob), cudaMemcpyHostToDevice); // Start kernel crack_job<<<GRID_SIZE, BLOCK_SIZE>>>(d_crackResults, d_crackJobs); // crack_job(d_crackResults, d_crackJobs); // Copy results from device cudaMemcpy(h_crackResults, d_crackResults, number_of_jobs * sizeof(CrackResult), cudaMemcpyDeviceToHost); // Check for error cudaError_t error = cudaPeekAtLastError(); if (error) { fprintf(stderr, "A CUDA error has occurred while cracking: %s\n", cudaGetErrorString(error)); total_result->status = STATUS_ERROR; break; } // Handle results for (size_t i = 0; i < number_of_jobs; i++) { CrackResult *result = &h_crackResults[i]; // Skip if skip if (total_result->status == STATUS_SKIP) { continue; } // Keep track of probes tested total_result->attempts++; // Accept if success (currently the only one it makes sense to stop on) if (result->status == STATUS_SUCCESS) { total_result->status = result->status; strncpy(total_result->password, result->password, MAX_PASSWORD_LENGTH); // Ignore all job results after this one break; } } // Check if search space is exhausted and not match has been found if (!more_probes && total_result->status == STATUS_PENDING) { total_result->status = STATUS_FAIL; } } // End time measurement and record duration clock_gettime(CLOCK_MONOTONIC, &end_time); total_result->duration = ((double)(end_time.tv_sec - start_time.tv_sec)) + ((double)(end_time.tv_nsec - start_time.tv_nsec)) * 1e-9; // Cleanup free(config.dict_positions); free(config.symbols); free(h_crackJobs); free(h_crackResults); cudaFree(d_crackJobs); cudaFree(d_crackResults); } static bool prepare_job(CrackJob *job, ShadowEntry *entry, ProbeConfig *config, Options *options, Dictionary *dict) { // Zeroize memset(job, 0, sizeof(CrackJob)); bool more_probes = get_next_probe(config, options, dict); if (more_probes) { job->action = ACTION_WORK; strncpy(job->passfield, entry->passfield, MAX_PASSWORD_LENGTH); job->alg = entry->alg; job->salt_end = entry->salt_end; strncpy(job->probe, config->probe, MAX_PASSWORD_LENGTH); } else { job->action = ACTION_WAIT; } if (options->verbose) { printf("%s\n", job->probe); } return more_probes; } /* * Build the next probe. Returns false with an empty probe when the search space is exhausted. */ static bool get_next_probe(ProbeConfig *config, Options *options, Dictionary *dict) { // Check if dict is empty if (dict->length == 0) { return false; } // Find last symbol which can be replaced with the next one, if any exists ssize_t last_replaceable_pos = -1; for (size_t i = 0; i < config->size; i++) { if (config->dict_positions[i] < dict->length - 1) { last_replaceable_pos = i; } } // A symbol can be replaced, replace last one and reset all behind it if (last_replaceable_pos >= 0) { size_t new_dict_pos = config->dict_positions[last_replaceable_pos] + 1; config->dict_positions[last_replaceable_pos] = new_dict_pos; strncpy(config->symbols[last_replaceable_pos], dict->elements[new_dict_pos], MAX_DICT_ELEMENT_LENGTH); for (size_t i = last_replaceable_pos + 1; i < config->size; i++) { config->dict_positions[i] = 0; strncpy(config->symbols[i], dict->elements[0], MAX_DICT_ELEMENT_LENGTH); } } // No symbols can be replaced and no more symbols are allowed, return error else if (config->size == options->max_length) { config->probe[0] = 0; return false; } // New symbol can be added, reset all previous positions and add it else { config->size++; for (size_t i = 0; i < config->size; i++) { config->dict_positions[i] = 0; strncpy(config->symbols[i], dict->elements[0], MAX_DICT_ELEMENT_LENGTH); } } // Build probe config->probe[0] = 0; for (size_t i = 0; i < config->size; i++) { if (i > 0) { strncat(config->probe, options->separator, MAX_PASSWORD_LENGTH); } strncat(config->probe, config->symbols[i], MAX_PASSWORD_LENGTH); } return true; } /* * Handle result from trying to crack a single password. */ static void handle_result(Options *options, ExtendedCrackResult *result, OverviewCrackResult *overview_result, FILE *result_file) { // Make representations char const *alg_str = cryptalg_to_string(result->alg); char const *status_str = crack_result_status_to_string(result->status); double attempts_per_second = result->attempts / result->duration; // Format and print size_t const static max_output_length = 1023; char *output = (char *)malloc(max_output_length + 1); snprintf(output, max_output_length + 1, "user=\"%s\", alg=\"%s\" status=\"%s\" duration=\"%fs\" attempts=\"%ld\" attempts_per_second=\"%f\" password=\"%s\"", result->user, alg_str, status_str, result->duration, result->attempts, attempts_per_second, result->password); if (!options->quiet) { printf("%s\n", output); } if (result_file) { fprintf(result_file, "%s\n", output); fflush(result_file); } free(output); // Update overview overview_result->statuses[result->status]++; overview_result->duration += result->duration; overview_result->attempts += result->attempts; } /* * Handle result from trying to crack all passwords. */ static void handle_overview_result(Options *options, OverviewCrackResult *result) { if (!options->quiet) { printf("\nOverview:\n"); printf("Total duration: %.3fs\n", result->duration); printf("Total attempts: %ld\n", result->attempts); printf("Total attempts per second: %.3f\n", result->attempts / result->duration); printf("Skipped: %ld\n", result->statuses[STATUS_SKIP]); printf("Successful: %ld\n", result->statuses[STATUS_SUCCESS]); printf("Failed: %ld\n", result->statuses[STATUS_FAIL]); } } /* * Hash probe and compare. */ __global__ static void crack_job(CrackResult results[], CrackJob jobs[]) { // TODO set using unique index into arrays int index = blockIdx.x * BLOCK_SIZE + threadIdx.x; CrackResult *result = &results[index]; CrackJob *job = &jobs[index]; // Zeroize result result->status = STATUS_PENDING; result->password[0] = 0; // Nothing to do here if (job->action == ACTION_WAIT) { result->status = STATUS_SKIP; return; } // Only accept SHA512 (redundant check) if (job->alg != ALG_SHA512) { result->status = STATUS_SKIP; return; } // Copy probe into shared memory char *probe = job->probe; // Copy salt part of passfield into shared memory (same value is used by all threads) char salt[MAX_SALT_LENGTH]; local_strncpy(salt, job->passfield, MIN(job->salt_end, MAX_SALT_LENGTH)); salt[job->salt_end] = 0; // Call sha512_crypt_r directly using register buffer char new_passfield[MAX_PASSFIELD_LENGTH + 1]; sha512_crypt_r(probe, salt, new_passfield, MAX_PASSFIELD_LENGTH + 1); if (new_passfield != NULL && local_strneq(job->passfield, new_passfield, MAX_PASSFIELD_LENGTH)) { // Match found, abort search result->status = STATUS_SUCCESS; local_strncpy(result->password, probe, MAX_PASSWORD_LENGTH); } }
6d84923c5c1b0d05e5b612ec7b81bd3446770d5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Standard Lib #include <iostream> #include <time.h> #include <float.h> //CUDA kernel #include <hiprand/hiprand_kernel.h> //Include files #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "material.h" #include "device_launch_parameters.h" #include <SDL.h> using namespace std; // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(hipError_t result, char const* const func, const char* const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } // Matching the C++ code would recurse enough into color() calls that // it was blowing up the stack, so we have to turn this into a // limited-depth loop instead. Later code in the book limits to a max // depth of 50, so we adapt this a few chapters early on the GPU. __device__ vec3 color(const ray& r, hitable** world, hiprandState_t* local_rand_state) { ray cur_ray = r; vec3 cur_attenuation = vec3(1.0, 1.0, 1.0); for (int i = 0; i < 50; i++) { hit_record rec; if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { ray scattered; vec3 attenuation; if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) { cur_attenuation *= attenuation; cur_ray = scattered; } else { return vec3(0.0, 0.0, 0.0); } } else { vec3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5f * (unit_direction.y() + 1.0f); vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); return cur_attenuation * c; } } return vec3(0.0, 0.0, 0.0); // exceeded recursion } __global__ void rand_init(hiprandState_t* rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { hiprand_init(1984, 0, 0, rand_state); } } __global__ void render_init(int max_x, int max_y, hiprandState_t* rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; //Each thread gets same seed, a different sequence number, no offset hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } __global__ void render(vec3* fb, int max_x, int max_y, int ns, camera** cam, hitable** world, hiprandState_t* rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; hiprandState_t local_rand_state = rand_state[pixel_index]; vec3 col(0, 0, 0); for (int s = 0; s < ns; s++) { float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x); float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u, v, &local_rand_state); col += color(r, world, &local_rand_state); } rand_state[pixel_index] = local_rand_state; col /= float(ns); col[0] = sqrt(col[0]); col[1] = sqrt(col[1]); col[2] = sqrt(col[2]); fb[pixel_index] = col; } #define RND (hiprand_uniform(&local_rand_state)) __global__ void create_world(hitable** d_list, hitable** d_world, camera** d_camera, int nx, int ny, hiprandState_t* rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { hiprandState_t local_rand_state = *rand_state; d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000, new lambertian(vec3(0.5, 0.5, 0.5))); int i = 1; for (int a = -11; a < 11; a++) { for (int b = -11; b < 11; b++) { float choose_mat = RND; vec3 center(a + RND, 0.2, b + RND); if (choose_mat < 0.8f) { d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(RND * RND, RND * RND, RND * RND))); } else if (choose_mat < 0.95f) { d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND)); } else { d_list[i++] = new sphere(center, 0.2, new dielectric(1.5)); } } } d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5)); d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1))); d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0)); *rand_state = local_rand_state; *d_world = new hitable_list(d_list, 22 * 22 + 1 + 3); vec3 lookfrom(13, 2, 3); vec3 lookat(0, 0, 0); float dist_to_focus = 10.0; (lookfrom - lookat).length(); float aperture = 0.1; *d_camera = new camera(lookfrom, lookat, vec3(0, 1, 0), 30.0, float(nx) / float(ny), aperture, dist_to_focus); } } __global__ void free_world(hitable** d_list, hitable** d_world, camera** d_camera) { for (int i = 0; i < 22 * 22 + 1 + 3; i++) { delete ((sphere*)d_list[i])->mat_ptr; delete d_list[i]; } delete* d_world; delete* d_camera; } int main() { int nx = 1280; int ny = 720; int ns = 10; int tx = 32; int ty = 32; int width = 1280, height = 720; bool running = true; SDL_Init(SDL_INIT_VIDEO); SDL_Window* window = SDL_CreateWindow("SDL2", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, width, height, SDL_WINDOW_OPENGL); if (window == NULL) { std::cout << "Could not create window: " << SDL_GetError(); return 1; } while (running) { SDL_Event event; while (SDL_PollEvent(&event)) { switch (event.type) { case SDL_QUIT: { running = false; } break; case SDL_WINDOWEVENT: { if (event.window.event == SDL_WINDOWEVENT_RESIZED) { width = (uint32_t)event.window.data1; height = (uint32_t)event.window.data2; } } break; } } //SDL_RenderClear(renderer); //SDL_RenderCopy(renderer, texture, NULL, NULL); //SDL_RenderPresent(renderer); } std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; int num_pixels = nx * ny; size_t fb_size = num_pixels * sizeof(vec3); // allocate FB vec3* fb; checkCudaErrors(hipMallocManaged((void**)&fb, fb_size)); // allocate random state hiprandState_t* d_rand_state; checkCudaErrors(hipMalloc((void**)&d_rand_state, num_pixels * sizeof(hiprandState_t))); hiprandState_t* d_rand_state2; checkCudaErrors(hipMalloc((void**)&d_rand_state2, 1 * sizeof(hiprandState_t))); // we need that 2nd random state to be initialized for the world creation rand_init << <1, 1 >> > (d_rand_state2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // make our world of hitables & the camera hitable** d_list; int num_hitables = 22 * 22 + 1 + 3; checkCudaErrors(hipMalloc((void**)&d_list, num_hitables * sizeof(hitable*))); hitable** d_world; checkCudaErrors(hipMalloc((void**)&d_world, sizeof(hitable*))); camera** d_camera; checkCudaErrors(hipMalloc((void**)&d_camera, sizeof(camera*))); create_world << <1, 1 >> > (d_list, d_world, d_camera, nx, ny, d_rand_state2); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); clock_t start, stop; start = clock(); // Render our buffer dim3 blocks(nx / tx + 1, ny / ty + 1); dim3 threads(tx, ty); render_init << <blocks, threads >> > (nx, ny, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); render << <blocks, threads >> > (fb, nx, ny, ns, d_camera, d_world, d_rand_state); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; // Output FB as Image /* std::cout<< "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny - 1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j * nx + i; int ir = int(255.99 * fb[pixel_index].r()); int ig = int(255.99 * fb[pixel_index].g()); int ib = int(255.99 * fb[pixel_index].b()); std::cout << ir << " " << ig << " " << ib << "\n"; } } */ // clean up checkCudaErrors(hipDeviceSynchronize()); free_world << <1, 1 >> > (d_list, d_world, d_camera); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(d_camera)); checkCudaErrors(hipFree(d_world)); checkCudaErrors(hipFree(d_list)); checkCudaErrors(hipFree(d_rand_state)); checkCudaErrors(hipFree(fb)); hipDeviceReset(); SDL_DestroyWindow(window); SDL_Quit(); return 0; }
6d84923c5c1b0d05e5b612ec7b81bd3446770d5f.cu
//Standard Lib #include <iostream> #include <time.h> #include <float.h> //CUDA kernel #include <curand_kernel.h> //Include files #include "vec3.h" #include "ray.h" #include "sphere.h" #include "hitable_list.h" #include "camera.h" #include "material.h" #include "device_launch_parameters.h" #include <SDL.h> using namespace std; // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(cudaError_t result, char const* const func, const char* const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } // Matching the C++ code would recurse enough into color() calls that // it was blowing up the stack, so we have to turn this into a // limited-depth loop instead. Later code in the book limits to a max // depth of 50, so we adapt this a few chapters early on the GPU. __device__ vec3 color(const ray& r, hitable** world, curandState* local_rand_state) { ray cur_ray = r; vec3 cur_attenuation = vec3(1.0, 1.0, 1.0); for (int i = 0; i < 50; i++) { hit_record rec; if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) { ray scattered; vec3 attenuation; if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) { cur_attenuation *= attenuation; cur_ray = scattered; } else { return vec3(0.0, 0.0, 0.0); } } else { vec3 unit_direction = unit_vector(cur_ray.direction()); float t = 0.5f * (unit_direction.y() + 1.0f); vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); return cur_attenuation * c; } } return vec3(0.0, 0.0, 0.0); // exceeded recursion } __global__ void rand_init(curandState* rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { curand_init(1984, 0, 0, rand_state); } } __global__ void render_init(int max_x, int max_y, curandState* rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; //Each thread gets same seed, a different sequence number, no offset curand_init(1984, pixel_index, 0, &rand_state[pixel_index]); } __global__ void render(vec3* fb, int max_x, int max_y, int ns, camera** cam, hitable** world, curandState* rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; curandState local_rand_state = rand_state[pixel_index]; vec3 col(0, 0, 0); for (int s = 0; s < ns; s++) { float u = float(i + curand_uniform(&local_rand_state)) / float(max_x); float v = float(j + curand_uniform(&local_rand_state)) / float(max_y); ray r = (*cam)->get_ray(u, v, &local_rand_state); col += color(r, world, &local_rand_state); } rand_state[pixel_index] = local_rand_state; col /= float(ns); col[0] = sqrt(col[0]); col[1] = sqrt(col[1]); col[2] = sqrt(col[2]); fb[pixel_index] = col; } #define RND (curand_uniform(&local_rand_state)) __global__ void create_world(hitable** d_list, hitable** d_world, camera** d_camera, int nx, int ny, curandState* rand_state) { if (threadIdx.x == 0 && blockIdx.x == 0) { curandState local_rand_state = *rand_state; d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000, new lambertian(vec3(0.5, 0.5, 0.5))); int i = 1; for (int a = -11; a < 11; a++) { for (int b = -11; b < 11; b++) { float choose_mat = RND; vec3 center(a + RND, 0.2, b + RND); if (choose_mat < 0.8f) { d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(RND * RND, RND * RND, RND * RND))); } else if (choose_mat < 0.95f) { d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND)); } else { d_list[i++] = new sphere(center, 0.2, new dielectric(1.5)); } } } d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5)); d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1))); d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0)); *rand_state = local_rand_state; *d_world = new hitable_list(d_list, 22 * 22 + 1 + 3); vec3 lookfrom(13, 2, 3); vec3 lookat(0, 0, 0); float dist_to_focus = 10.0; (lookfrom - lookat).length(); float aperture = 0.1; *d_camera = new camera(lookfrom, lookat, vec3(0, 1, 0), 30.0, float(nx) / float(ny), aperture, dist_to_focus); } } __global__ void free_world(hitable** d_list, hitable** d_world, camera** d_camera) { for (int i = 0; i < 22 * 22 + 1 + 3; i++) { delete ((sphere*)d_list[i])->mat_ptr; delete d_list[i]; } delete* d_world; delete* d_camera; } int main() { int nx = 1280; int ny = 720; int ns = 10; int tx = 32; int ty = 32; int width = 1280, height = 720; bool running = true; SDL_Init(SDL_INIT_VIDEO); SDL_Window* window = SDL_CreateWindow("SDL2", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, width, height, SDL_WINDOW_OPENGL); if (window == NULL) { std::cout << "Could not create window: " << SDL_GetError(); return 1; } while (running) { SDL_Event event; while (SDL_PollEvent(&event)) { switch (event.type) { case SDL_QUIT: { running = false; } break; case SDL_WINDOWEVENT: { if (event.window.event == SDL_WINDOWEVENT_RESIZED) { width = (uint32_t)event.window.data1; height = (uint32_t)event.window.data2; } } break; } } //SDL_RenderClear(renderer); //SDL_RenderCopy(renderer, texture, NULL, NULL); //SDL_RenderPresent(renderer); } std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; int num_pixels = nx * ny; size_t fb_size = num_pixels * sizeof(vec3); // allocate FB vec3* fb; checkCudaErrors(cudaMallocManaged((void**)&fb, fb_size)); // allocate random state curandState* d_rand_state; checkCudaErrors(cudaMalloc((void**)&d_rand_state, num_pixels * sizeof(curandState))); curandState* d_rand_state2; checkCudaErrors(cudaMalloc((void**)&d_rand_state2, 1 * sizeof(curandState))); // we need that 2nd random state to be initialized for the world creation rand_init << <1, 1 >> > (d_rand_state2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // make our world of hitables & the camera hitable** d_list; int num_hitables = 22 * 22 + 1 + 3; checkCudaErrors(cudaMalloc((void**)&d_list, num_hitables * sizeof(hitable*))); hitable** d_world; checkCudaErrors(cudaMalloc((void**)&d_world, sizeof(hitable*))); camera** d_camera; checkCudaErrors(cudaMalloc((void**)&d_camera, sizeof(camera*))); create_world << <1, 1 >> > (d_list, d_world, d_camera, nx, ny, d_rand_state2); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); clock_t start, stop; start = clock(); // Render our buffer dim3 blocks(nx / tx + 1, ny / ty + 1); dim3 threads(tx, ty); render_init << <blocks, threads >> > (nx, ny, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); render << <blocks, threads >> > (fb, nx, ny, ns, d_camera, d_world, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; // Output FB as Image /* std::cout<< "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny - 1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j * nx + i; int ir = int(255.99 * fb[pixel_index].r()); int ig = int(255.99 * fb[pixel_index].g()); int ib = int(255.99 * fb[pixel_index].b()); std::cout << ir << " " << ig << " " << ib << "\n"; } } */ // clean up checkCudaErrors(cudaDeviceSynchronize()); free_world << <1, 1 >> > (d_list, d_world, d_camera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(d_camera)); checkCudaErrors(cudaFree(d_world)); checkCudaErrors(cudaFree(d_list)); checkCudaErrors(cudaFree(d_rand_state)); checkCudaErrors(cudaFree(fb)); cudaDeviceReset(); SDL_DestroyWindow(window); SDL_Quit(); return 0; }
96ed90b31971169811180aaa1c8df39772292de5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <cstdio> void checkCudaError(hipError_t err) { if (err != hipSuccess) { printf("%s: %s\n", hipGetErrorName(err), hipGetErrorString(err)); exit(1); } } __global__ void cudaKernel(void) { printf("GPU says hello.\n"); } int main(void) { printf("CPU says hello.\n"); checkCudaError(cudaLaunchKernel((void*)cudaKernel, 1, 1, NULL, 0, NULL)); checkCudaError(hipDeviceSynchronize()); return 0; }
96ed90b31971169811180aaa1c8df39772292de5.cu
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html #include <cstdio> void checkCudaError(cudaError_t err) { if (err != cudaSuccess) { printf("%s: %s\n", cudaGetErrorName(err), cudaGetErrorString(err)); exit(1); } } __global__ void cudaKernel(void) { printf("GPU says hello.\n"); } int main(void) { printf("CPU says hello.\n"); checkCudaError(cudaLaunchKernel((void*)cudaKernel, 1, 1, NULL, 0, NULL)); checkCudaError(cudaDeviceSynchronize()); return 0; }
0cb5f29c60a7f3b4f5687228756399f78e2d7510.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "dyadicAdd.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *counter = NULL; hipMalloc(&counter, XSIZE*YSIZE); const int length = 1; const int shift = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( dyadicAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, counter,length,shift); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( dyadicAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, counter,length,shift); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( dyadicAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, counter,length,shift); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0cb5f29c60a7f3b4f5687228756399f78e2d7510.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "dyadicAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *counter = NULL; cudaMalloc(&counter, XSIZE*YSIZE); const int length = 1; const int shift = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); dyadicAdd<<<gridBlock,threadBlock>>>(counter,length,shift); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { dyadicAdd<<<gridBlock,threadBlock>>>(counter,length,shift); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { dyadicAdd<<<gridBlock,threadBlock>>>(counter,length,shift); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6e138dc2759ab5ec95b4231ca933c1dea390b990.hip
// !!! This is a file automatically generated by hipify!!! #include <cfloat> #include <chrono> #include <vector> #include <string> #include <hip/hip_runtime_api.h> #include <iostream> #include "cuda_error_handling.h" using namespace std; void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) { const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; if (src_id < vertices_count) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; // , : /* BFS int src_level = data[src_id]; int dst_level = data[dst_id]; if((src_level == current_level) && (dst_level == UNVISITED_VERTEX)) { data[dst_id] = current_level + 1; } */ /* SSSP float weight = outgoing_weights[first_edge_ptr + cur_edge]; float src_weight = data[src_id]; float dst_weight = data[dst_id]; if(dst_weight > src_weight + weight) { data[dst_id] = src_weight + weight; } */ } } } void __global__ gather_opt(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) { const long long id = (blockIdx.x*blockDim.x+threadIdx.x); const int i = id / 32; const int first_edge_ptr = ptrs[i]; const int connections_count = connections[i]; int rep = (connections_count/32)+1; for (int k = 0; k < rep; ++k) { int cur_edge = threadIdx.x%32 + (k*32); if (cur_edge < connections_count) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } void cpu_gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, vector<int>& result) { for (int i = 0; i < vertices_count; ++i) { const int first_edge_ptr = ptrs[i]; const int connections_count = connections[i]; for (int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } bool checkAnswers(const vector<int>& res1, const vector<int>& res2) { for (int i = 0; i < res1.size(); ++i) { if (res1[i] != res2[i]) { printf("%d %d %d are different\n", i, res1[i], res2[i]); return false; } } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { if (argc < 2) { cout << "Incorrect number of arguments\n"; return 0; } int vertices_count = 1024*1024; int *ptrs = new int[vertices_count]; int *data = new int[vertices_count]; int *connections = new int[vertices_count]; int pos = 0; double percent = stod(argv[1]); srand(time(NULL)); for(int i = 0; i < vertices_count; i++) // TODO (bonus) "" { ptrs[i] = pos; if ((double)rand()/RAND_MAX < percent) { connections[i] = 1000 + rand()%9000; pos += connections[i]; } else { connections[i] = 16 + rand()%32; pos += connections[i]; } data[i] = rand(); } int edges_count = pos; srand(time(NULL)); printf("Edges count %d\n", edges_count); int *outgoing_ids = new int[edges_count]; for(int i = 0; i < edges_count; i++) { outgoing_ids[i] = rand()%vertices_count; } int *dev_ptrs; int *dev_connections; int *dev_outgoing_ids; int *dev_data; int *dev_result; hipMalloc((void**)&dev_ptrs, vertices_count*sizeof(int)); hipMalloc((void**)&dev_connections, vertices_count*sizeof(int)); hipMalloc((void**)&dev_data, vertices_count*sizeof(int)); hipMalloc((void**)&dev_outgoing_ids, edges_count*sizeof(int)); hipMalloc((void**)&dev_result, edges_count*sizeof(int)); SAFE_CALL(hipMemcpy(dev_ptrs, ptrs, vertices_count * sizeof(int), hipMemcpyHostToDevice)); SAFE_CALL(hipMemcpy(dev_connections, connections, vertices_count * sizeof(int), hipMemcpyHostToDevice)); SAFE_CALL(hipMemcpy(dev_data, data, vertices_count * sizeof(int), hipMemcpyHostToDevice)); SAFE_CALL(hipMemcpy(dev_outgoing_ids, outgoing_ids, edges_count * sizeof(int), hipMemcpyHostToDevice)); dim3 compute_threads(1024); dim3 compute_blocks(32*((vertices_count - 1) / compute_threads.x + 1)); for(int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( SAFE_KERNEL_CALL((gather_opt), dim3(compute_blocks), dim3(compute_threads) , 0, 0, dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result))); auto end = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } for(int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( SAFE_KERNEL_CALL((gather), dim3(compute_blocks), dim3(compute_threads) , 0, 0, dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result))); auto end = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } vector<int> gpu_result(edges_count); SAFE_CALL(hipMemcpy(&gpu_result[0], dev_result, edges_count * sizeof(int), hipMemcpyDeviceToHost)); vector<int> cpu_result(edges_count); cpu_gather(ptrs, connections, outgoing_ids, vertices_count, data, cpu_result); if (checkAnswers(gpu_result, cpu_result)) { printf("Test is passed\n"); } else { printf("Test is failed\n"); } // TODO 3 ? // TODO , // TODO // TODO (bonus) BFS ( ) hipFree(dev_data); hipFree(dev_ptrs); hipFree(dev_connections); hipFree(dev_result); hipFree(dev_outgoing_ids); delete[]data; delete[]ptrs; delete[]outgoing_ids; delete[]connections; return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////
6e138dc2759ab5ec95b4231ca933c1dea390b990.cu
#include <cfloat> #include <chrono> #include <vector> #include <string> #include <cuda_profiler_api.h> #include <iostream> #include "cuda_error_handling.h" using namespace std; void __global__ gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) { const long long src_id = blockIdx.x * blockDim.x + threadIdx.x; if (src_id < vertices_count) { const int first_edge_ptr = ptrs[src_id]; const int connections_count = connections[src_id]; for(register int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; // данную программу можно легко переделать во многие графовые алгоритмы, например: /* BFS int src_level = data[src_id]; int dst_level = data[dst_id]; if((src_level == current_level) && (dst_level == UNVISITED_VERTEX)) { data[dst_id] = current_level + 1; } */ /* SSSP float weight = outgoing_weights[first_edge_ptr + cur_edge]; float src_weight = data[src_id]; float dst_weight = data[dst_id]; if(dst_weight > src_weight + weight) { data[dst_id] = src_weight + weight; } */ } } } void __global__ gather_opt(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, int *result) { const long long id = (blockIdx.x*blockDim.x+threadIdx.x); const int i = id / 32; const int first_edge_ptr = ptrs[i]; const int connections_count = connections[i]; int rep = (connections_count/32)+1; for (int k = 0; k < rep; ++k) { int cur_edge = threadIdx.x%32 + (k*32); if (cur_edge < connections_count) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } void cpu_gather(int *ptrs, int *connections, int *outgoing_ids, int vertices_count, int *data, vector<int>& result) { for (int i = 0; i < vertices_count; ++i) { const int first_edge_ptr = ptrs[i]; const int connections_count = connections[i]; for (int cur_edge = 0; cur_edge < connections_count; cur_edge++) { int dst_id = outgoing_ids[first_edge_ptr + cur_edge]; int val = data[dst_id]; result[first_edge_ptr + cur_edge] = val; } } } bool checkAnswers(const vector<int>& res1, const vector<int>& res2) { for (int i = 0; i < res1.size(); ++i) { if (res1[i] != res2[i]) { printf("%d %d %d are different\n", i, res1[i], res2[i]); return false; } } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char* argv[]) { if (argc < 2) { cout << "Incorrect number of arguments\n"; return 0; } int vertices_count = 1024*1024; int *ptrs = new int[vertices_count]; int *data = new int[vertices_count]; int *connections = new int[vertices_count]; int pos = 0; double percent = stod(argv[1]); srand(time(NULL)); for(int i = 0; i < vertices_count; i++) // TODO (bonus) граф с несколькими "большими" вершинами { ptrs[i] = pos; if ((double)rand()/RAND_MAX < percent) { connections[i] = 1000 + rand()%9000; pos += connections[i]; } else { connections[i] = 16 + rand()%32; pos += connections[i]; } data[i] = rand(); } int edges_count = pos; srand(time(NULL)); printf("Edges count %d\n", edges_count); int *outgoing_ids = new int[edges_count]; for(int i = 0; i < edges_count; i++) { outgoing_ids[i] = rand()%vertices_count; } int *dev_ptrs; int *dev_connections; int *dev_outgoing_ids; int *dev_data; int *dev_result; cudaMalloc((void**)&dev_ptrs, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_connections, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_data, vertices_count*sizeof(int)); cudaMalloc((void**)&dev_outgoing_ids, edges_count*sizeof(int)); cudaMalloc((void**)&dev_result, edges_count*sizeof(int)); SAFE_CALL(cudaMemcpy(dev_ptrs, ptrs, vertices_count * sizeof(int), cudaMemcpyHostToDevice)); SAFE_CALL(cudaMemcpy(dev_connections, connections, vertices_count * sizeof(int), cudaMemcpyHostToDevice)); SAFE_CALL(cudaMemcpy(dev_data, data, vertices_count * sizeof(int), cudaMemcpyHostToDevice)); SAFE_CALL(cudaMemcpy(dev_outgoing_ids, outgoing_ids, edges_count * sizeof(int), cudaMemcpyHostToDevice)); dim3 compute_threads(1024); dim3 compute_blocks(32*((vertices_count - 1) / compute_threads.x + 1)); for(int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); SAFE_KERNEL_CALL((gather_opt<<< compute_blocks, compute_threads >>> (dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result))); auto end = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } for(int i = 0; i < 5; i++) { auto start = std::chrono::steady_clock::now(); SAFE_KERNEL_CALL((gather<<< compute_blocks, compute_threads >>> (dev_ptrs, dev_connections, dev_outgoing_ids, vertices_count, dev_data, dev_result))); auto end = std::chrono::steady_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; cout << "time: " << (elapsed_seconds.count())*1000.0 << " ms" << endl; cout << "bandwidth: " << 3.0*sizeof(int)*edges_count/((elapsed_seconds.count())*1e9) << " GB/s" << endl << endl; } vector<int> gpu_result(edges_count); SAFE_CALL(cudaMemcpy(&gpu_result[0], dev_result, edges_count * sizeof(int), cudaMemcpyDeviceToHost)); vector<int> cpu_result(edges_count); cpu_gather(ptrs, connections, outgoing_ids, vertices_count, data, cpu_result); if (checkAnswers(gpu_result, cpu_result)) { printf("Test is passed\n"); } else { printf("Test is failed\n"); } // TODO какие 3 недостатка у текущей версии ядра? // TODO отпрофилировать текущую версию, сделать выводы о её производитлеьности // TODO сделать оптимизированную версию ядра // TODO (bonus) реализовать базовую версию BFS алгоритма (выделить структуры данных и реализовать сам алгоритм) cudaFree(dev_data); cudaFree(dev_ptrs); cudaFree(dev_connections); cudaFree(dev_result); cudaFree(dev_outgoing_ids); delete[]data; delete[]ptrs; delete[]outgoing_ids; delete[]connections; return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////
bd5c7517bf6ea029566bae22d06cbdb7e03411b6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> using namespace std; int main () { int device_count; hipGetDeviceCount(&device_count); hipDeviceProp_t dp; cout << "CUDA device count: " << device_count << endl; for(int i = 0; i < device_count; i++) { hipGetDeviceProperties(&dp, i); cout << i << ": " << dp.name << " with CUDA compute compatibility " << dp.major << "." << dp.minor << endl; cout << i << ": = " << dp.clockRate << endl; cout << "" << endl; cout << i << ": = " << dp.totalGlobalMem / 1024 / 1024 << endl; cout << i << ": = " << dp.totalConstMem << endl; cout << i << ": = " << dp.memPitch << endl; cout << "" << endl; cout << i << ": = " << dp.multiProcessorCount << endl; cout << i << ": = " << dp.sharedMemPerBlock << endl; cout << i << ": = " << dp.regsPerBlock << endl; cout << i << ": WARP ( ) = " << dp.warpSize << endl; cout << i << ": = " << dp.maxThreadsPerBlock << endl; cout << i << ": M = " << dp.maxThreadsDim[0] << " " << dp.maxThreadsDim[1] << " " << dp.maxThreadsDim[2] << endl; cout << i << ": = " << dp.maxGridSize[0] << " " << dp.maxGridSize[1] << " " << dp.maxGridSize[2] << endl; } return 0; }
bd5c7517bf6ea029566bae22d06cbdb7e03411b6.cu
#include <iostream> using namespace std; int main () { int device_count; cudaGetDeviceCount(&device_count); cudaDeviceProp dp; cout << "CUDA device count: " << device_count << endl; for(int i = 0; i < device_count; i++) { cudaGetDeviceProperties(&dp, i); cout << i << ": " << dp.name << " with CUDA compute compatibility " << dp.major << "." << dp.minor << endl; cout << i << ": Тактовая частота ядра = " << dp.clockRate << endl; cout << "Память" << endl; cout << i << ": Общий объем графической памяти = " << dp.totalGlobalMem / 1024 / 1024 << endl; cout << i << ": Объем памяти констант = " << dp.totalConstMem << endl; cout << i << ": Максимальный шаг = " << dp.memPitch << endl; cout << "Мультипроцессоры" << endl; cout << i << ": Число потоковых мультипроцессоров = " << dp.multiProcessorCount << endl; cout << i << ": Объем разделяемой памяти в пределах блока = " << dp.sharedMemPerBlock << endl; cout << i << ": Число регистров в пределах блока = " << dp.regsPerBlock << endl; cout << i << ": Размер WARP’а (нитей в варпе) = " << dp.warpSize << endl; cout << i << ": Максимально допустимое число нитей в блоке = " << dp.maxThreadsPerBlock << endl; cout << i << ": Mаксимальную размерность при конфигурации нитей в блоке = " << dp.maxThreadsDim[0] << " " << dp.maxThreadsDim[1] << " " << dp.maxThreadsDim[2] << endl; cout << i << ": Максимальную размерность при конфигурации блоков в сетке = " << dp.maxGridSize[0] << " " << dp.maxGridSize[1] << " " << dp.maxGridSize[2] << endl; } return 0; }
76af27cc94bc74d5aaa95c015f53bf0ac741ca71.hip
// !!! This is a file automatically generated by hipify!!! extern "C" { #include <hip/hip_runtime.h> #include "const.h" __global__ void pluser_process (GPU_DATA_TYPE *data) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; for (;threadId < GPU_SIZE; threadId += blockDim.x * gridDim.x) data[threadId] += 1; } }
76af27cc94bc74d5aaa95c015f53bf0ac741ca71.cu
extern "C" { #include <cuda.h> #include "const.h" __global__ void pluser_process (GPU_DATA_TYPE *data) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; for (;threadId < GPU_SIZE; threadId += blockDim.x * gridDim.x) data[threadId] += 1; } }
3d7b75bbfaf17eff464b82877df0520a92262a4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include "hj.h" #include "hj_kernels.cu" // default number of buckets chosen by build_hash_table #define NR_BUCKETS_DEFAULT 256 // prefix sum. the sum of all elements in d_src is returned // if successful; otherwise, -1 is returned. int scan(int *d_dest, int *d_src, int len, hipStream_t* Stream, int StreamID) { hipError_t res; int *d_sumbuf; // the buffer used to store sum updates across subarrays int *h_sumbuf; int sum_tot, sum_delta; int nr_blocks = 512, nr_chunks; int nr_threads_per_block = 128; int nr_elems_per_block = 2 * nr_threads_per_block; int bytes_smem = sizeof(int) * nr_elems_per_block; // step 1: partition the array into many subarrays, // each of which is scanned separately nr_chunks = len / nr_elems_per_block; nr_chunks += (len % nr_elems_per_block) ? 1 : 0; res = hipMalloc((void **)&d_sumbuf, sizeof(int) * nr_chunks); if(res != hipSuccess) { fprintf(stderr, "cudaMemAlloc(&d_sumbuf) failed\n"); return -1; } // h_sumbuf = (int *)malloc(sizeof(int) * nr_chunks); // if(!h_sumbuf) { // fprintf(stderr, "malloc() failed for h_sumbuf\n"); // hipFree(d_sumbuf); // return -1; // } // printf("scan: begin hipHostMalloc\n"); res = hipHostMalloc((void**)&h_sumbuf, sizeof(int) * nr_chunks); if(res != hipSuccess) { fprintf(stderr, "hipHostMalloc(&h_sumbuf) failed\n"); return -1; } // printf("scan: finish hipHostMalloc\n"); // printf("scan: begin prefix_sum\n"); hipLaunchKernelGGL(( prefix_sum), dim3(nr_blocks), dim3(nr_threads_per_block), bytes_smem, Stream[StreamID], d_dest, d_sumbuf, d_src, nr_chunks, len); // printf("scan: finish prefix_sum\n"); // printf("scan: begin hipDeviceSynchronize\n"); if(hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "kernel failed at prefix_sum\n"); free(h_sumbuf); hipFree(d_sumbuf); return -1; } // printf("scan: finish hipDeviceSynchronize\n"); // free(h_sumbuf); // hipFree(d_sumbuf); // step 2: update all scanned subarrays to derive the final result // res = hipMemcpy(h_sumbuf, d_sumbuf, sizeof(int) * nr_chunks, // hipMemcpyDeviceToHost); // printf("scan: begin hipMemcpyAsync\n"); res = hipMemcpyAsync(h_sumbuf, d_sumbuf, sizeof(int) * nr_chunks, hipMemcpyDeviceToHost, Stream[StreamID]); // printf("scan: finish hipMemcpyAsync\n"); if(res != hipSuccess) { fprintf(stderr, "hipMemcpy DtoH failed\n"); free(h_sumbuf); hipFree(d_sumbuf); return -1; } sum_tot = 0; sum_delta = h_sumbuf[0]; for(int i = 1; i < nr_chunks; i++) { sum_tot += sum_delta; sum_delta = h_sumbuf[i]; h_sumbuf[i] = sum_tot; } h_sumbuf[0] = 0; sum_tot += sum_delta; // res = hipMemcpy(d_sumbuf, h_sumbuf, sizeof(int) * nr_chunks, // hipMemcpyHostToDevice); // printf("scan: begin hipMemcpyAsync\n"); res = hipMemcpyAsync(d_sumbuf, h_sumbuf, sizeof(int) * nr_chunks, hipMemcpyHostToDevice, Stream[StreamID]); // printf("scan: finish hipMemcpyAsync\n"); if(res != hipSuccess) { fprintf(stderr, "hipMemcpy HtoD failed\n"); free(h_sumbuf); hipFree(d_sumbuf); return -1; } // printf("scan: begin prefix_sum_update\n"); hipLaunchKernelGGL(( prefix_sum_update), dim3(nr_blocks), dim3(nr_threads_per_block), 0, Stream[StreamID], d_dest, d_sumbuf, nr_chunks, len); // printf("scan: finish prefix_sum_update\n"); // printf("scan: begin hipDeviceSynchronize\n"); if(hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "kernel failed at prefix_sum_update\n"); free(h_sumbuf); hipFree(d_sumbuf); return -1; } // printf("scan: finish hipDeviceSynchronize\n"); // printf("scan: free\n"); hipFree(h_sumbuf); // printf("scan: cudafree\n"); hipFree(d_sumbuf); return sum_tot; } int build_hash_table(hash_table_t *ht, record_t *h_r, int rlen, hipStream_t* Stream, int StreamID) { int nr_blocks = 64, nr_threads_per_block = 128; int *d_hist = NULL, *d_loc = NULL; record_t *d_r = NULL; hipError_t res; int ret = 0; ht->d_rec = NULL; ht->d_idx = NULL; ht->n_records = rlen; if(!ht->n_buckets) { ht->n_buckets = NR_BUCKETS_DEFAULT; } // copy records to GPU device memory res = hipMalloc((void **)&d_r, rlen * sizeof(record_t)); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&d_r) failed\n"); ret = -1; goto failed; } // res = hipMemcpy(d_r, h_r, rlen * sizeof(record_t), hipMemcpyHostToDevice); // printf("build_hash_table: begin hipMemcpyAsync(r)\n"); res = hipMemcpyAsync(d_r, h_r, rlen * sizeof(record_t), hipMemcpyHostToDevice, Stream[StreamID]); if(res != hipSuccess) { fprintf(stderr, "hipMemcpy(r) failed\n"); ret = -1; goto failed; } // printf("build_hash_table: finish hipMemcpyAsync(r)\n"); // build histogram matrix to collect how many // records each thread generates in each bucket res = hipMalloc((void **)&d_hist, sizeof(int) * nr_blocks * nr_threads_per_block * ht->n_buckets); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&d_hist) failed\n"); ret = -1; goto failed; } hipLaunchKernelGGL(( hash_build_hist), dim3(nr_blocks), dim3(nr_threads_per_block), 0, Stream[StreamID], d_hist, d_r, rlen, ht->n_buckets); // printf("build_hash_table: finish hash_build_hist\n"); if(hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "kernel failed at hash_build_hist\n"); ret = -1; goto failed; } // prefix sum to get the offsets res = hipMalloc((void **)&d_loc, sizeof(int) * nr_blocks * nr_threads_per_block * ht->n_buckets); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&d_loc) failed\n"); ret = -1; goto failed; } // printf("build_hash_table: begin scan\n"); if(scan(d_loc, d_hist, nr_blocks * nr_threads_per_block * ht->n_buckets, Stream, StreamID) < 0) { fprintf(stderr, "scan failed\n"); ret = -1; goto failed; } // printf("build_hash_table: finish scan\n"); CUDA_FREE(d_hist); d_hist = NULL; // build the hash table res = hipMalloc((void **)&ht->d_rec, rlen * sizeof(record_t)); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&ht->d_rec) failed\n"); ret = -1; goto failed; } res = hipMalloc((void **)&ht->d_idx, (ht->n_buckets + 1) * sizeof(int)); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&ht->d_idx) failed\n"); ret = -1; goto failed; } hipLaunchKernelGGL(( hash_build), dim3(nr_blocks), dim3(nr_threads_per_block), 0, Stream[StreamID], ht->d_rec, ht->d_idx, d_r, rlen, d_loc, ht->n_buckets); if(hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "kernel failed at hash_build\n"); ret = -1; goto failed; } goto finish; failed: free_hash_table(ht); finish: CUDA_FREE(d_r); CUDA_FREE(d_hist); CUDA_FREE(d_loc); return ret; } void free_hash_table(hash_table_t *ht) { CUDA_FREE(ht->d_rec); CUDA_FREE(ht->d_idx); ht->d_rec = NULL; ht->d_idx = NULL; ht->n_records = 0; ht->n_buckets = 0; } int hash_join(record_t **h_res, int *reslen, hash_table_t *ht_r, record_t *h_s, int slen, hipStream_t* Stream, int StreamID) { hipError_t res; int ret = 0, n_results; record_t *restmp = NULL; int nr_blocks = 256, nr_threads_per_block = 128; int *d_hist = NULL, *d_loc = NULL; record_t *d_s = NULL, *d_res = NULL; // copy S to GPU device memory res = hipMalloc((void **)&d_s, slen * sizeof(record_t)); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&d_s) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin hipMemcpyAsync\n"); // res = hipMemcpy(d_s, h_s, slen * sizeof(record_t), hipMemcpyHostToDevice); res = hipMemcpyAsync(d_s, h_s, slen * sizeof(record_t), hipMemcpyHostToDevice, Stream[StreamID]); // printf("hash_join: finish hipMemcpyAsync\n"); if(res != hipSuccess) { fprintf(stderr, "cuMemcpyHtoD(s) failed\n"); ret = -1; goto failed; } // count the number of records joined by each thread res = hipMalloc((void **)&d_hist, sizeof(int) * nr_blocks * nr_threads_per_block); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&d_hist) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin hash_join_hist\n"); hipLaunchKernelGGL(( hash_join_hist), dim3(nr_blocks), dim3(nr_threads_per_block), 0, Stream[StreamID], d_hist, ht_r->d_rec, ht_r->d_idx, ht_r->n_buckets, d_s, slen); // printf("hash_join: finish hash_join_hist\n"); if(hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "kernel failed at hash_join_hist\n"); ret = -1; goto failed; } // prefix sum to get the locations res = hipMalloc((void **)&d_loc, sizeof(int) * nr_blocks * nr_threads_per_block); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&d_loc) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin scan\n"); n_results = scan(d_loc, d_hist, nr_blocks * nr_threads_per_block, Stream, StreamID); if(n_results < 0) { fprintf(stderr, "scan failed\n"); ret = -1; goto failed; } // printf("hash_join: finish scan\n"); CUDA_FREE(d_hist); d_hist = NULL; if(n_results <= 0) { if(h_res) { *h_res = NULL; } if(reslen) { *reslen = 0; } goto finish; } // do hash join res = hipMalloc((void **)&d_res, n_results * sizeof(record_t)); if(res != hipSuccess) { fprintf(stderr, "hipMalloc(&d_res) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin hash_join\n"); hipLaunchKernelGGL(( hash_join), dim3(nr_blocks), dim3(nr_threads_per_block), 0, Stream[StreamID], d_res, d_loc, ht_r->d_rec, ht_r->d_idx, ht_r->n_buckets, d_s, slen); // printf("hash_join: finish hash_join\n"); if(hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "kernel failed at hash_join\n"); ret = -1; goto failed; } // setting return values, if required if(h_res) { // restmp = (record_t *)malloc(n_results * sizeof(record_t)); // if(!restmp) { // fprintf(stderr, "malloc failed for h_res\n"); // ret = -1; // goto failed; // } res = hipHostMalloc((void**)&restmp, n_results * sizeof(record_t)); if(res != hipSuccess) { fprintf(stderr, "hipHostMalloc(&h_res) failed\n"); ret = -1; goto failed; } // res = hipMemcpy((void *)restmp, d_res, n_results * sizeof(record_t), // hipMemcpyDeviceToHost); // printf("hash_join: begin hipMemcpyAsync\n"); res = hipMemcpyAsync((void *)restmp, d_res, n_results * sizeof(record_t), hipMemcpyDeviceToHost, Stream[StreamID]); // printf("hash_join: finish hipMemcpyAsync\n"); if (res != hipSuccess) { printf("cudaMemcpyDtoH failed when getting join results\n"); ret = -1; goto failed; } *h_res = restmp; } if(reslen) { *reslen = n_results; } goto finish; failed: hipFree(restmp); finish: CUDA_FREE(d_s); CUDA_FREE(d_hist); CUDA_FREE(d_loc); CUDA_FREE(d_res); return ret; }
3d7b75bbfaf17eff464b82877df0520a92262a4f.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "hj.h" #include "hj_kernels.cu" // default number of buckets chosen by build_hash_table #define NR_BUCKETS_DEFAULT 256 // prefix sum. the sum of all elements in d_src is returned // if successful; otherwise, -1 is returned. int scan(int *d_dest, int *d_src, int len, cudaStream_t* Stream, int StreamID) { cudaError_t res; int *d_sumbuf; // the buffer used to store sum updates across subarrays int *h_sumbuf; int sum_tot, sum_delta; int nr_blocks = 512, nr_chunks; int nr_threads_per_block = 128; int nr_elems_per_block = 2 * nr_threads_per_block; int bytes_smem = sizeof(int) * nr_elems_per_block; // step 1: partition the array into many subarrays, // each of which is scanned separately nr_chunks = len / nr_elems_per_block; nr_chunks += (len % nr_elems_per_block) ? 1 : 0; res = cudaMalloc((void **)&d_sumbuf, sizeof(int) * nr_chunks); if(res != cudaSuccess) { fprintf(stderr, "cudaMemAlloc(&d_sumbuf) failed\n"); return -1; } // h_sumbuf = (int *)malloc(sizeof(int) * nr_chunks); // if(!h_sumbuf) { // fprintf(stderr, "malloc() failed for h_sumbuf\n"); // cudaFree(d_sumbuf); // return -1; // } // printf("scan: begin cudaMallocHost\n"); res = cudaMallocHost((void**)&h_sumbuf, sizeof(int) * nr_chunks); if(res != cudaSuccess) { fprintf(stderr, "cudaMallocHost(&h_sumbuf) failed\n"); return -1; } // printf("scan: finish cudaMallocHost\n"); // printf("scan: begin prefix_sum\n"); prefix_sum<<<nr_blocks, nr_threads_per_block, bytes_smem, Stream[StreamID]>>>( d_dest, d_sumbuf, d_src, nr_chunks, len); // printf("scan: finish prefix_sum\n"); // printf("scan: begin cudaThreadSynchronize\n"); if(cudaThreadSynchronize() != cudaSuccess) { fprintf(stderr, "kernel failed at prefix_sum\n"); free(h_sumbuf); cudaFree(d_sumbuf); return -1; } // printf("scan: finish cudaThreadSynchronize\n"); // free(h_sumbuf); // cudaFree(d_sumbuf); // step 2: update all scanned subarrays to derive the final result // res = cudaMemcpy(h_sumbuf, d_sumbuf, sizeof(int) * nr_chunks, // cudaMemcpyDeviceToHost); // printf("scan: begin cudaMemcpyAsync\n"); res = cudaMemcpyAsync(h_sumbuf, d_sumbuf, sizeof(int) * nr_chunks, cudaMemcpyDeviceToHost, Stream[StreamID]); // printf("scan: finish cudaMemcpyAsync\n"); if(res != cudaSuccess) { fprintf(stderr, "cudaMemcpy DtoH failed\n"); free(h_sumbuf); cudaFree(d_sumbuf); return -1; } sum_tot = 0; sum_delta = h_sumbuf[0]; for(int i = 1; i < nr_chunks; i++) { sum_tot += sum_delta; sum_delta = h_sumbuf[i]; h_sumbuf[i] = sum_tot; } h_sumbuf[0] = 0; sum_tot += sum_delta; // res = cudaMemcpy(d_sumbuf, h_sumbuf, sizeof(int) * nr_chunks, // cudaMemcpyHostToDevice); // printf("scan: begin cudaMemcpyAsync\n"); res = cudaMemcpyAsync(d_sumbuf, h_sumbuf, sizeof(int) * nr_chunks, cudaMemcpyHostToDevice, Stream[StreamID]); // printf("scan: finish cudaMemcpyAsync\n"); if(res != cudaSuccess) { fprintf(stderr, "cudaMemcpy HtoD failed\n"); free(h_sumbuf); cudaFree(d_sumbuf); return -1; } // printf("scan: begin prefix_sum_update\n"); prefix_sum_update<<<nr_blocks, nr_threads_per_block, 0, Stream[StreamID]>>>(d_dest, d_sumbuf, nr_chunks, len); // printf("scan: finish prefix_sum_update\n"); // printf("scan: begin cudaThreadSynchronize\n"); if(cudaThreadSynchronize() != cudaSuccess) { fprintf(stderr, "kernel failed at prefix_sum_update\n"); free(h_sumbuf); cudaFree(d_sumbuf); return -1; } // printf("scan: finish cudaThreadSynchronize\n"); // printf("scan: free\n"); cudaFree(h_sumbuf); // printf("scan: cudafree\n"); cudaFree(d_sumbuf); return sum_tot; } int build_hash_table(hash_table_t *ht, record_t *h_r, int rlen, cudaStream_t* Stream, int StreamID) { int nr_blocks = 64, nr_threads_per_block = 128; int *d_hist = NULL, *d_loc = NULL; record_t *d_r = NULL; cudaError_t res; int ret = 0; ht->d_rec = NULL; ht->d_idx = NULL; ht->n_records = rlen; if(!ht->n_buckets) { ht->n_buckets = NR_BUCKETS_DEFAULT; } // copy records to GPU device memory res = cudaMalloc((void **)&d_r, rlen * sizeof(record_t)); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&d_r) failed\n"); ret = -1; goto failed; } // res = cudaMemcpy(d_r, h_r, rlen * sizeof(record_t), cudaMemcpyHostToDevice); // printf("build_hash_table: begin cudaMemcpyAsync(r)\n"); res = cudaMemcpyAsync(d_r, h_r, rlen * sizeof(record_t), cudaMemcpyHostToDevice, Stream[StreamID]); if(res != cudaSuccess) { fprintf(stderr, "cudaMemcpy(r) failed\n"); ret = -1; goto failed; } // printf("build_hash_table: finish cudaMemcpyAsync(r)\n"); // build histogram matrix to collect how many // records each thread generates in each bucket res = cudaMalloc((void **)&d_hist, sizeof(int) * nr_blocks * nr_threads_per_block * ht->n_buckets); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&d_hist) failed\n"); ret = -1; goto failed; } hash_build_hist<<<nr_blocks, nr_threads_per_block, 0, Stream[StreamID]>>>(d_hist, d_r, rlen, ht->n_buckets); // printf("build_hash_table: finish hash_build_hist\n"); if(cudaThreadSynchronize() != cudaSuccess) { fprintf(stderr, "kernel failed at hash_build_hist\n"); ret = -1; goto failed; } // prefix sum to get the offsets res = cudaMalloc((void **)&d_loc, sizeof(int) * nr_blocks * nr_threads_per_block * ht->n_buckets); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&d_loc) failed\n"); ret = -1; goto failed; } // printf("build_hash_table: begin scan\n"); if(scan(d_loc, d_hist, nr_blocks * nr_threads_per_block * ht->n_buckets, Stream, StreamID) < 0) { fprintf(stderr, "scan failed\n"); ret = -1; goto failed; } // printf("build_hash_table: finish scan\n"); CUDA_FREE(d_hist); d_hist = NULL; // build the hash table res = cudaMalloc((void **)&ht->d_rec, rlen * sizeof(record_t)); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&ht->d_rec) failed\n"); ret = -1; goto failed; } res = cudaMalloc((void **)&ht->d_idx, (ht->n_buckets + 1) * sizeof(int)); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&ht->d_idx) failed\n"); ret = -1; goto failed; } hash_build<<<nr_blocks, nr_threads_per_block, 0, Stream[StreamID]>>>(ht->d_rec, ht->d_idx, d_r, rlen, d_loc, ht->n_buckets); if(cudaThreadSynchronize() != cudaSuccess) { fprintf(stderr, "kernel failed at hash_build\n"); ret = -1; goto failed; } goto finish; failed: free_hash_table(ht); finish: CUDA_FREE(d_r); CUDA_FREE(d_hist); CUDA_FREE(d_loc); return ret; } void free_hash_table(hash_table_t *ht) { CUDA_FREE(ht->d_rec); CUDA_FREE(ht->d_idx); ht->d_rec = NULL; ht->d_idx = NULL; ht->n_records = 0; ht->n_buckets = 0; } int hash_join(record_t **h_res, int *reslen, hash_table_t *ht_r, record_t *h_s, int slen, cudaStream_t* Stream, int StreamID) { cudaError_t res; int ret = 0, n_results; record_t *restmp = NULL; int nr_blocks = 256, nr_threads_per_block = 128; int *d_hist = NULL, *d_loc = NULL; record_t *d_s = NULL, *d_res = NULL; // copy S to GPU device memory res = cudaMalloc((void **)&d_s, slen * sizeof(record_t)); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&d_s) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin cudaMemcpyAsync\n"); // res = cudaMemcpy(d_s, h_s, slen * sizeof(record_t), cudaMemcpyHostToDevice); res = cudaMemcpyAsync(d_s, h_s, slen * sizeof(record_t), cudaMemcpyHostToDevice, Stream[StreamID]); // printf("hash_join: finish cudaMemcpyAsync\n"); if(res != cudaSuccess) { fprintf(stderr, "cuMemcpyHtoD(s) failed\n"); ret = -1; goto failed; } // count the number of records joined by each thread res = cudaMalloc((void **)&d_hist, sizeof(int) * nr_blocks * nr_threads_per_block); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&d_hist) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin hash_join_hist\n"); hash_join_hist<<<nr_blocks, nr_threads_per_block, 0, Stream[StreamID]>>>(d_hist, ht_r->d_rec, ht_r->d_idx, ht_r->n_buckets, d_s, slen); // printf("hash_join: finish hash_join_hist\n"); if(cudaThreadSynchronize() != cudaSuccess) { fprintf(stderr, "kernel failed at hash_join_hist\n"); ret = -1; goto failed; } // prefix sum to get the locations res = cudaMalloc((void **)&d_loc, sizeof(int) * nr_blocks * nr_threads_per_block); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&d_loc) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin scan\n"); n_results = scan(d_loc, d_hist, nr_blocks * nr_threads_per_block, Stream, StreamID); if(n_results < 0) { fprintf(stderr, "scan failed\n"); ret = -1; goto failed; } // printf("hash_join: finish scan\n"); CUDA_FREE(d_hist); d_hist = NULL; if(n_results <= 0) { if(h_res) { *h_res = NULL; } if(reslen) { *reslen = 0; } goto finish; } // do hash join res = cudaMalloc((void **)&d_res, n_results * sizeof(record_t)); if(res != cudaSuccess) { fprintf(stderr, "cudaMalloc(&d_res) failed\n"); ret = -1; goto failed; } // printf("hash_join: begin hash_join\n"); hash_join<<<nr_blocks, nr_threads_per_block, 0, Stream[StreamID]>>>(d_res, d_loc, ht_r->d_rec, ht_r->d_idx, ht_r->n_buckets, d_s, slen); // printf("hash_join: finish hash_join\n"); if(cudaThreadSynchronize() != cudaSuccess) { fprintf(stderr, "kernel failed at hash_join\n"); ret = -1; goto failed; } // setting return values, if required if(h_res) { // restmp = (record_t *)malloc(n_results * sizeof(record_t)); // if(!restmp) { // fprintf(stderr, "malloc failed for h_res\n"); // ret = -1; // goto failed; // } res = cudaMallocHost((void**)&restmp, n_results * sizeof(record_t)); if(res != cudaSuccess) { fprintf(stderr, "cudaMallocHost(&h_res) failed\n"); ret = -1; goto failed; } // res = cudaMemcpy((void *)restmp, d_res, n_results * sizeof(record_t), // cudaMemcpyDeviceToHost); // printf("hash_join: begin cudaMemcpyAsync\n"); res = cudaMemcpyAsync((void *)restmp, d_res, n_results * sizeof(record_t), cudaMemcpyDeviceToHost, Stream[StreamID]); // printf("hash_join: finish cudaMemcpyAsync\n"); if (res != cudaSuccess) { printf("cudaMemcpyDtoH failed when getting join results\n"); ret = -1; goto failed; } *h_res = restmp; } if(reslen) { *reslen = n_results; } goto finish; failed: cudaFree(restmp); finish: CUDA_FREE(d_s); CUDA_FREE(d_hist); CUDA_FREE(d_loc); CUDA_FREE(d_res); return ret; }
0a44555ea759952824ae35d91c069fa26cebc888.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <memory.h> #include<time.h> #pragma comment( lib, "cufft.lib" ) #define N1 128 #define N2 128 #define N3 16 #define CN3 ((int)N3/2+1) // half N3 void functf(double A[2][N1][N2][N3]); void FFT3d_GPU(double A[2][N1][N2][N3]); void main(void) { static double A[2][N1][N2][N3]; functf(A); FFT3d_GPU(A); printf("OK! \n"); } void FFT3d_GPU(double A[2][N1][N2][N3]) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); const int FFT_SIZE_R = N1 * N2 * N3 * sizeof(hipfftDoubleReal); const int FFT_SIZE_C = N1 * N2 * CN3 * sizeof(hipfftDoubleComplex); hipfftDoubleReal *h_oRealData = (hipfftDoubleReal*)malloc(FFT_SIZE_R); memset(h_oRealData, 0x00, FFT_SIZE_R); hipfftDoubleComplex *h_otestComplexData = (hipfftDoubleComplex*)malloc(FFT_SIZE_C); memset(h_otestComplexData, 0x00, FFT_SIZE_C); hipfftDoubleReal *d_iRealData; hipMalloc((void**)&d_iRealData, FFT_SIZE_R); hipMemcpy(d_iRealData, A[0], FFT_SIZE_R, hipMemcpyHostToDevice); hipfftDoubleComplex *d_oComplexData; hipMalloc((void**)&d_oComplexData, FFT_SIZE_C); hipfftDoubleReal *d_oRealData; hipMalloc((void**)&d_oRealData, FFT_SIZE_R); // /* Create a 3D FFT plan for D2Z */ printf("cufft FFT,direct transform(x-->A), A:\n"); hipEventRecord(start, 0); hipfftHandle planD2Z3D; hipfftPlan3d(&planD2Z3D, N1, N2, N3, HIPFFT_D2Z); hipfftExecD2Z(planD2Z3D, (hipfftDoubleReal*)d_iRealData, d_oComplexData); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Kernel time: %.2f ms\n", elapsedTime); hipMemcpy(h_otestComplexData, d_oComplexData, FFT_SIZE_C, hipMemcpyDeviceToHost); hipEventRecord(start, 0); //Create a 3D FFT plan for Z2D. hipfftHandle planZ2D3D; hipfftPlan3d(&planZ2D3D, N1, N2, N3, HIPFFT_Z2D); //D2Z out of place printf("\n\nAfter 3D C2R out of place : \n"); printf("cuFFT IFFT,inverse transform(x-->A), A:\n"); // Use the CUFFT plan to transform the signal out of place. hipfftExecZ2D(planZ2D3D, d_oComplexData, (hipfftDoubleReal*)d_oRealData); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime1; hipEventElapsedTime(&elapsedTime1, start, stop); printf("iCufft Kernel time: %.2f ms\n", elapsedTime1); printf("total Kernel time: %.2f ms\n", elapsedTime1 + elapsedTime); hipMemcpy(h_oRealData, d_oRealData, FFT_SIZE_R, hipMemcpyDeviceToHost); // printf("Total running time: %f seconds\n", (double)(totaltime1) / CLOCKS_PER_SEC); unsigned int i, j, k; FILE *fp; fp = fopen("fft3_cuda.d", "w"); //system("cls"); // origianl data printf("file copy\n"); fprintf(fp, "the original data, AK:\n"); for (k = 0; k<N3; k++) for (j = 0; j<N2; j++) for (i = 0; i<N1; i++) fprintf(fp, "%15.9f\n", A[0][i][j][k]); fprintf(fp, "\n"); // direct transform fprintf(fp, "cufft FFT,direct transform(x-->A), A:\n"); for (k = 0; k<CN3; k++) for (j = 0; j<N2; j++) for (i = 0; i<N1; i++) fprintf(fp, "%4u,%4u,%4u:%15.9f,%16.8e\n", i, j, k, h_otestComplexData[i*N2*CN3 + j*CN3 + k].x, h_otestComplexData[i*N2*CN3 + j*CN3 + k].y); fprintf(fp, "\n"); //inverse transform fprintf(fp, "cu FFT IFFT,inverse transform(x-->A), A:\n"); for (k = 0; k<N3; k++) for (j = 0; j<N2; j++) for (i = 0; i<N1; i++) fprintf(fp, "%4u,%4u,%4u:%15.9f\n", i, j, k, h_oRealData[i*N2*N3 + j*N3 + k] / (N1*N2*N3)); fprintf(fp, "\n"); } /*****************************************************/ void functf(double A[2][N1][N2][N3]) { unsigned int i, j, k; double tti, ttj, ttk, dti, dtj, dtk, A0i, A0j, A0k, A1i, A1j, A1k; dti = 0.1; dtj = 2.0*dti; dtk = dtj; A0i = exp(-N1*dti); A0j = exp(-N2*dtj); A0k = exp(-N3*dtk); for (i = 0; i<N1; i++) { tti = (double)i*dti; A1i = exp(-tti); for (j = 0; j<N2; j++) { ttj = (double)j*dtj; A1j = exp(-ttj); for (k = 0; k<N3; k++) { ttk = (double)k*dtk; A1k = exp(-ttk); A[0][i][j][k] = (A1i + A0i / A1i)*dti*(A1j + A0j / A1j)*dtj*(A1k + A0k / A1k)*dtk; A[1][i][j][k] = 0.0; } } } }
0a44555ea759952824ae35d91c069fa26cebc888.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <cufft.h> #include <memory.h> #include<time.h> #pragma comment( lib, "cufft.lib" ) #define N1 128 #define N2 128 #define N3 16 #define CN3 ((int)N3/2+1) // half N3 void functf(double A[2][N1][N2][N3]); void FFT3d_GPU(double A[2][N1][N2][N3]); void main(void) { static double A[2][N1][N2][N3]; functf(A); FFT3d_GPU(A); printf("OK! \n"); } void FFT3d_GPU(double A[2][N1][N2][N3]) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); const int FFT_SIZE_R = N1 * N2 * N3 * sizeof(cufftDoubleReal); const int FFT_SIZE_C = N1 * N2 * CN3 * sizeof(cufftDoubleComplex); cufftDoubleReal *h_oRealData = (cufftDoubleReal*)malloc(FFT_SIZE_R); memset(h_oRealData, 0x00, FFT_SIZE_R); cufftDoubleComplex *h_otestComplexData = (cufftDoubleComplex*)malloc(FFT_SIZE_C); memset(h_otestComplexData, 0x00, FFT_SIZE_C); cufftDoubleReal *d_iRealData; cudaMalloc((void**)&d_iRealData, FFT_SIZE_R); cudaMemcpy(d_iRealData, A[0], FFT_SIZE_R, cudaMemcpyHostToDevice); cufftDoubleComplex *d_oComplexData; cudaMalloc((void**)&d_oComplexData, FFT_SIZE_C); cufftDoubleReal *d_oRealData; cudaMalloc((void**)&d_oRealData, FFT_SIZE_R); // /* Create a 3D FFT plan for D2Z */ printf("cufft FFT,direct transform(x-->A), A:\n"); cudaEventRecord(start, 0); cufftHandle planD2Z3D; cufftPlan3d(&planD2Z3D, N1, N2, N3, CUFFT_D2Z); cufftExecD2Z(planD2Z3D, (cufftDoubleReal*)d_iRealData, d_oComplexData); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Kernel time: %.2f ms\n", elapsedTime); cudaMemcpy(h_otestComplexData, d_oComplexData, FFT_SIZE_C, cudaMemcpyDeviceToHost); cudaEventRecord(start, 0); //Create a 3D FFT plan for Z2D. cufftHandle planZ2D3D; cufftPlan3d(&planZ2D3D, N1, N2, N3, CUFFT_Z2D); //D2Z out of place printf("\n\nAfter 3D C2R out of place : \n"); printf("cuFFT IFFT,inverse transform(x-->A), A:\n"); // Use the CUFFT plan to transform the signal out of place. cufftExecZ2D(planZ2D3D, d_oComplexData, (cufftDoubleReal*)d_oRealData); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime1; cudaEventElapsedTime(&elapsedTime1, start, stop); printf("iCufft Kernel time: %.2f ms\n", elapsedTime1); printf("total Kernel time: %.2f ms\n", elapsedTime1 + elapsedTime); cudaMemcpy(h_oRealData, d_oRealData, FFT_SIZE_R, cudaMemcpyDeviceToHost); // printf("Total running time: %f seconds\n", (double)(totaltime1) / CLOCKS_PER_SEC); unsigned int i, j, k; FILE *fp; fp = fopen("fft3_cuda.d", "w"); //system("cls"); // origianl data printf("file copy\n"); fprintf(fp, "the original data, AK:\n"); for (k = 0; k<N3; k++) for (j = 0; j<N2; j++) for (i = 0; i<N1; i++) fprintf(fp, "%15.9f\n", A[0][i][j][k]); fprintf(fp, "\n"); // direct transform fprintf(fp, "cufft FFT,direct transform(x-->A), A:\n"); for (k = 0; k<CN3; k++) for (j = 0; j<N2; j++) for (i = 0; i<N1; i++) fprintf(fp, "%4u,%4u,%4u:%15.9f,%16.8e\n", i, j, k, h_otestComplexData[i*N2*CN3 + j*CN3 + k].x, h_otestComplexData[i*N2*CN3 + j*CN3 + k].y); fprintf(fp, "\n"); //inverse transform fprintf(fp, "cu FFT IFFT,inverse transform(x-->A), A:\n"); for (k = 0; k<N3; k++) for (j = 0; j<N2; j++) for (i = 0; i<N1; i++) fprintf(fp, "%4u,%4u,%4u:%15.9f\n", i, j, k, h_oRealData[i*N2*N3 + j*N3 + k] / (N1*N2*N3)); fprintf(fp, "\n"); } /*****************************************************/ void functf(double A[2][N1][N2][N3]) { unsigned int i, j, k; double tti, ttj, ttk, dti, dtj, dtk, A0i, A0j, A0k, A1i, A1j, A1k; dti = 0.1; dtj = 2.0*dti; dtk = dtj; A0i = exp(-N1*dti); A0j = exp(-N2*dtj); A0k = exp(-N3*dtk); for (i = 0; i<N1; i++) { tti = (double)i*dti; A1i = exp(-tti); for (j = 0; j<N2; j++) { ttj = (double)j*dtj; A1j = exp(-ttj); for (k = 0; k<N3; k++) { ttk = (double)k*dtk; A1k = exp(-ttk); A[0][i][j][k] = (A1i + A0i / A1i)*dti*(A1j + A0j / A1j)*dtj*(A1k + A0k / A1k)*dtk; A[1][i][j][k] = 0.0; } } } }
5791c0a01f6a84b7e7ae95e4f21e485def057839.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // default.cu // // Default implementation of curl. Can also be tweaked with launch bounds. // // omega = curl(u) // omega and u are vector fields. #ifdef CURL_LAUNCH_BOUNDS __launch_bounds__(NX_TILE*NY_TILE,4) __global__ void curl_kernel_lb(const real * __restrict__ u, real * __restrict__ omega, const real xfactor, const real yfactor, const real zfactor) #else __global__ void curl_kernel_default(const real * __restrict__ u, real * __restrict__ omega, const real xfactor, const real yfactor, const real zfactor) #endif { __shared__ real us[3][NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST]; // Local indices const int xli = threadIdx.x + NGHOST; const int yli = threadIdx.y + NGHOST; // Global indices const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST; const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST; // Z-wise iteration values real xzbehind3, xzbehind2 = u[vfidx(xi, yi, 0, 0)], xzbehind1 = u[vfidx(xi, yi, 1, 0)], xzcurrent = u[vfidx(xi, yi, 2, 0)], xzforward1 = u[vfidx(xi, yi, 3, 0)], xzforward2 = u[vfidx(xi, yi, 4, 0)], xzforward3 = u[vfidx(xi, yi, 5, 0)]; real yzbehind3, yzbehind2 = u[vfidx(xi, yi, 0, 1)], yzbehind1 = u[vfidx(xi, yi, 1, 1)], yzcurrent = u[vfidx(xi, yi, 2, 1)], yzforward1 = u[vfidx(xi, yi, 3, 1)], yzforward2 = u[vfidx(xi, yi, 4, 1)], yzforward3 = u[vfidx(xi, yi, 5, 1)]; for (int zi = NGHOST; zi < NZ + NGHOST; zi++) { // Iterate through z dimension in registers xzbehind3 = xzbehind2; xzbehind2 = xzbehind1; xzbehind1 = xzcurrent; xzcurrent = xzforward1; xzforward1 = xzforward2; xzforward2 = xzforward3; xzforward3 = u[vfidx(xi, yi, zi + 3, 0)]; yzbehind3 = yzbehind2; yzbehind2 = yzbehind1; yzbehind1 = yzcurrent; yzcurrent = yzforward1; yzforward1 = yzforward2; yzforward2 = yzforward3; yzforward3 = u[vfidx(xi, yi, zi + 3, 1)]; // Load x-y tiles to shared memory __syncthreads(); us[0][yli][xli] = xzcurrent; us[1][yli][xli] = yzcurrent; us[2][yli][xli] = u[vfidx(xi, yi, zi, 2)]; if (threadIdx.x < NGHOST) { us[1][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 1)]; us[1][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 1)]; us[2][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 2)]; us[2][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 2)]; } if (threadIdx.y < NGHOST) { us[0][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 0)]; us[0][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 0)]; us[2][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 2)]; us[2][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 2)]; } __syncthreads(); // Compute the curl real d1, d2; // zdy - ydz d2 = zfactor * fd1D(yzbehind3, yzbehind2, yzbehind1, yzforward1, yzforward2, yzforward3); d1 = yfactor * fd1D(us[2][yli - 3][xli], us[2][yli - 2][xli], us[2][yli - 1][xli], us[2][yli + 1][xli], us[2][yli + 2][xli], us[2][yli + 3][xli]); omega[vfidx(xi, yi, zi, 0)] = d1 - d2; // xdz - zdx d1 = zfactor * fd1D(xzbehind3, xzbehind2, xzbehind1, xzforward1, xzforward2, xzforward3); d2 = xfactor * fd1D(us[2][yli][xli - 3], us[2][yli][xli - 2], us[2][yli][xli - 1], us[2][yli][xli + 1], us[2][yli][xli + 2], us[2][yli][xli + 3]); omega[vfidx(xi, yi, zi, 1)] = d1 - d2; // ydx - xdy d1 = xfactor * fd1D(us[1][yli][xli - 3], us[1][yli][xli - 2], us[1][yli][xli - 1], us[1][yli][xli + 1], us[1][yli][xli + 2], us[1][yli][xli + 3]); d2 = yfactor * fd1D(us[0][yli - 3][xli], us[0][yli - 2][xli], us[0][yli - 1][xli], us[0][yli + 1][xli], us[0][yli + 2][xli], us[0][yli + 3][xli]); omega[vfidx(xi, yi, zi, 2)] = d1 - d2; } } #ifdef CURL_LAUNCH_BOUNDS void curl_lb(vf3dgpu &u, vf3dgpu &omega) { hipLaunchKernelGGL(( curl_kernel_lb), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, u.mem(), omega.mem(), 1.0/dx, 1.0/dy, 1.0/dz); } #else void curl_default(vf3dgpu &u, vf3dgpu &omega) { hipLaunchKernelGGL(( curl_kernel_default), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, u.mem(), omega.mem(), 1.0/dx, 1.0/dy, 1.0/dz); } #endif
5791c0a01f6a84b7e7ae95e4f21e485def057839.cu
// default.cu // // Default implementation of curl. Can also be tweaked with launch bounds. // // omega = curl(u) // omega and u are vector fields. #ifdef CURL_LAUNCH_BOUNDS __launch_bounds__(NX_TILE*NY_TILE,4) __global__ void curl_kernel_lb(const real * __restrict__ u, real * __restrict__ omega, const real xfactor, const real yfactor, const real zfactor) #else __global__ void curl_kernel_default(const real * __restrict__ u, real * __restrict__ omega, const real xfactor, const real yfactor, const real zfactor) #endif { __shared__ real us[3][NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST]; // Local indices const int xli = threadIdx.x + NGHOST; const int yli = threadIdx.y + NGHOST; // Global indices const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST; const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST; // Z-wise iteration values real xzbehind3, xzbehind2 = u[vfidx(xi, yi, 0, 0)], xzbehind1 = u[vfidx(xi, yi, 1, 0)], xzcurrent = u[vfidx(xi, yi, 2, 0)], xzforward1 = u[vfidx(xi, yi, 3, 0)], xzforward2 = u[vfidx(xi, yi, 4, 0)], xzforward3 = u[vfidx(xi, yi, 5, 0)]; real yzbehind3, yzbehind2 = u[vfidx(xi, yi, 0, 1)], yzbehind1 = u[vfidx(xi, yi, 1, 1)], yzcurrent = u[vfidx(xi, yi, 2, 1)], yzforward1 = u[vfidx(xi, yi, 3, 1)], yzforward2 = u[vfidx(xi, yi, 4, 1)], yzforward3 = u[vfidx(xi, yi, 5, 1)]; for (int zi = NGHOST; zi < NZ + NGHOST; zi++) { // Iterate through z dimension in registers xzbehind3 = xzbehind2; xzbehind2 = xzbehind1; xzbehind1 = xzcurrent; xzcurrent = xzforward1; xzforward1 = xzforward2; xzforward2 = xzforward3; xzforward3 = u[vfidx(xi, yi, zi + 3, 0)]; yzbehind3 = yzbehind2; yzbehind2 = yzbehind1; yzbehind1 = yzcurrent; yzcurrent = yzforward1; yzforward1 = yzforward2; yzforward2 = yzforward3; yzforward3 = u[vfidx(xi, yi, zi + 3, 1)]; // Load x-y tiles to shared memory __syncthreads(); us[0][yli][xli] = xzcurrent; us[1][yli][xli] = yzcurrent; us[2][yli][xli] = u[vfidx(xi, yi, zi, 2)]; if (threadIdx.x < NGHOST) { us[1][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 1)]; us[1][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 1)]; us[2][yli][xli - NGHOST] = u[vfidx(xi - NGHOST, yi, zi, 2)]; us[2][yli][xli + NX_TILE] = u[vfidx(xi + NX_TILE, yi, zi, 2)]; } if (threadIdx.y < NGHOST) { us[0][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 0)]; us[0][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 0)]; us[2][yli - NGHOST][xli] = u[vfidx(xi, yi - NGHOST, zi, 2)]; us[2][yli + NY_TILE][xli] = u[vfidx(xi, yi + NY_TILE, zi, 2)]; } __syncthreads(); // Compute the curl real d1, d2; // zdy - ydz d2 = zfactor * fd1D(yzbehind3, yzbehind2, yzbehind1, yzforward1, yzforward2, yzforward3); d1 = yfactor * fd1D(us[2][yli - 3][xli], us[2][yli - 2][xli], us[2][yli - 1][xli], us[2][yli + 1][xli], us[2][yli + 2][xli], us[2][yli + 3][xli]); omega[vfidx(xi, yi, zi, 0)] = d1 - d2; // xdz - zdx d1 = zfactor * fd1D(xzbehind3, xzbehind2, xzbehind1, xzforward1, xzforward2, xzforward3); d2 = xfactor * fd1D(us[2][yli][xli - 3], us[2][yli][xli - 2], us[2][yli][xli - 1], us[2][yli][xli + 1], us[2][yli][xli + 2], us[2][yli][xli + 3]); omega[vfidx(xi, yi, zi, 1)] = d1 - d2; // ydx - xdy d1 = xfactor * fd1D(us[1][yli][xli - 3], us[1][yli][xli - 2], us[1][yli][xli - 1], us[1][yli][xli + 1], us[1][yli][xli + 2], us[1][yli][xli + 3]); d2 = yfactor * fd1D(us[0][yli - 3][xli], us[0][yli - 2][xli], us[0][yli - 1][xli], us[0][yli + 1][xli], us[0][yli + 2][xli], us[0][yli + 3][xli]); omega[vfidx(xi, yi, zi, 2)] = d1 - d2; } } #ifdef CURL_LAUNCH_BOUNDS void curl_lb(vf3dgpu &u, vf3dgpu &omega) { curl_kernel_lb<<<xy_tile.nblocks, xy_tile.nthreads>>>(u.mem(), omega.mem(), 1.0/dx, 1.0/dy, 1.0/dz); } #else void curl_default(vf3dgpu &u, vf3dgpu &omega) { curl_kernel_default<<<xy_tile.nblocks, xy_tile.nthreads>>>(u.mem(), omega.mem(), 1.0/dx, 1.0/dy, 1.0/dz); } #endif
4fd208d89f63ccefdfeec66ede267c7c90fa89fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* standard matrix mult */ #include <iostream> #include <math.h> __global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; int stride = blockDim.x * gridDim.x; float tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } void matrixMultiplication(float *A, float *B, float *C, int N){ // declare the number of blocks per grid and the number of threads // per block // use 1 to 512 threads per block dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 1024){ threadsPerBlock.x = 1024; threadsPerBlock.y = 1; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, A, B, C, N); } int main(int argc, char* argv[]) { if(argc < 2) std::cout << "Needs a dimension parameter.\n"; int N = atoi(argv[1]); bool output = atoi(argv[2]); float* A; hipError_t result = hipMallocManaged(&A, N*N*sizeof(float)); if( result != hipSuccess) { throw std::runtime_error("Failed allocation."); } float* B; result = hipMallocManaged(&B, N*N*sizeof(float)); if( result != hipSuccess) { throw std::runtime_error("Failed allocation."); } float* C; result = hipMallocManaged(&C, N*N*sizeof(float)); if( result != hipSuccess) { throw std::runtime_error("Failed allocation."); } for(int i=0; i < N*N; ++i) { A[i] = 1.2345; B[i] = 1.2345; C[i] = 0; } // if output set to 1, display A and B if(output) { for(int i = 0; i < N*N; ++i) { if (i%N == 0) std::cout << "\n"; std::cout << A[i] << " "; } for(int i = 0; i < N*N; ++i) { if (i%N == 0) std::cout << "\n"; std::cout << B[i] << " "; } } matrixMultiplication(A, B, C, N); //matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N); // simpleMatMulKernell<<<1,256>>>(A,B,C,w); hipDeviceSynchronize(); // if output set to 1, show C after mult if(output) { for(int i =0; i < N*N; ++i) { if (i%N == 0) std::cout << "\n"; std::cout << C[i] << " "; } } std::cout << "\nC[0] : " << C[0] << "\n"; hipFree(A); hipFree(B); hipFree(C); return 0; }
4fd208d89f63ccefdfeec66ede267c7c90fa89fa.cu
/* standard matrix mult */ #include <iostream> #include <math.h> __global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; int stride = blockDim.x * gridDim.x; float tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } void matrixMultiplication(float *A, float *B, float *C, int N){ // declare the number of blocks per grid and the number of threads // per block // use 1 to 512 threads per block dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 1024){ threadsPerBlock.x = 1024; threadsPerBlock.y = 1; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N); } int main(int argc, char* argv[]) { if(argc < 2) std::cout << "Needs a dimension parameter.\n"; int N = atoi(argv[1]); bool output = atoi(argv[2]); float* A; cudaError_t result = cudaMallocManaged(&A, N*N*sizeof(float)); if( result != cudaSuccess) { throw std::runtime_error("Failed allocation."); } float* B; result = cudaMallocManaged(&B, N*N*sizeof(float)); if( result != cudaSuccess) { throw std::runtime_error("Failed allocation."); } float* C; result = cudaMallocManaged(&C, N*N*sizeof(float)); if( result != cudaSuccess) { throw std::runtime_error("Failed allocation."); } for(int i=0; i < N*N; ++i) { A[i] = 1.2345; B[i] = 1.2345; C[i] = 0; } // if output set to 1, display A and B if(output) { for(int i = 0; i < N*N; ++i) { if (i%N == 0) std::cout << "\n"; std::cout << A[i] << " "; } for(int i = 0; i < N*N; ++i) { if (i%N == 0) std::cout << "\n"; std::cout << B[i] << " "; } } matrixMultiplication(A, B, C, N); //matrixMultiplication(d_A.getData(), d_B.getData(), d_C.getData(), N); // simpleMatMulKernell<<<1,256>>>(A,B,C,w); cudaDeviceSynchronize(); // if output set to 1, show C after mult if(output) { for(int i =0; i < N*N; ++i) { if (i%N == 0) std::cout << "\n"; std::cout << C[i] << " "; } } std::cout << "\nC[0] : " << C[0] << "\n"; cudaFree(A); cudaFree(B); cudaFree(C); return 0; }
f8faf1adf23723c0dffa29c20aac8167270eb950.hip
// !!! This is a file automatically generated by hipify!!! // Includes, system #include <errno.h> #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #include <math.h> #include <assert.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_vector_types.h> // includes, kernels #include <common.cu> #include <mummergpu.h> #include <mummergpu_kernel.cu> int USE_PRINT_KERNEL = 1; #define BREATHING_ROOM (16 * 1024 * 1024) #define BASES_PER_TREE_PAGE 8388608 //#define BASES_PER_TREE_PAGE 7000000 #define BLOCKSIZE 256 unsigned int cuda_calls = 0; void trap_dbg() { fprintf(stderr, "Trapped\n"); } #define CUDA_SAFE_CALL( call) do { \ cuda_calls++; \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \ __FILE__, __LINE__, err, hipGetErrorString( err) ); \ trap_dbg(); \ exit(EXIT_FAILURE); \ } } while (0) # define CU_SAFE_CALL_NO_SYNC( call ) do { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \ err, __FILE__, __LINE__ ); \ exit(EXIT_FAILURE); \ } } while (0) # define CUT_DEVICE_INIT_DRV(cuDevice) do { \ cuDevice = 0; \ int deviceCount = 0; \ hipError_t err = hipInit(0); \ if (hipSuccess == err) \ CU_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); \ if (deviceCount == 0) { \ fprintf(stderr, "There is no device.\n"); \ exit(EXIT_FAILURE); \ } \ int dev; \ for (dev = 0; dev < deviceCount; ++dev) { \ int major, minor; \ CU_SAFE_CALL_NO_SYNC(hipDeviceComputeCapability(&major, &minor, dev));\ if (major >= 1) \ break; \ } \ if (dev == deviceCount) { \ fprintf(stderr, "There is no device supporting CUDA.\n"); \ exit(EXIT_FAILURE); \ } \ else \ CU_SAFE_CALL_NO_SYNC(hipDeviceGet(&cuDevice, dev)); \ } while (0) unsigned int num_bind_tex_calls = 0; #define BIND_TEX(offset, tex, arr, desc, len) do { \ CUDA_SAFE_CALL(hipBindTexture(offset, tex, arr, desc, len)); \ ++num_bind_tex_calls; \ } while(0) #define BIND_TEX_ARRAY(tex, arr, desc) do { \ CUDA_SAFE_CALL(hipBindTextureToArray(tex, arr, desc)); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC(ptr, size) do { \ hipMalloc(ptr, size); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \ hipMallocPitch(ptr, out_pitch, rowsize, numrows); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \ hipMallocArray(ptr, desc, pitch, rows); \ ++num_bind_tex_calls; \ } while(0) #include "../../timing.h" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold(MatchResults* results, char* refstr, char* queries, int* queryAddrs, int* queryLengths, PixelOfNode* nodeTexture, PixelOfChildren* childrenTexture, int numQueries, int mismatch_length, int rc); extern "C" void getReferenceString(const char * filename, char** refstr, size_t* reflen); extern "C" void createTreeTexture(const char * filename, PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture, unsigned int* width, unsigned int* node_height, unsigned int* children_height, AuxiliaryNodeData** aux_data, int* num_match_coords, int min_match_len, Statistics* statistics, const char * dotfilename, const char * texfilename); extern "C" void getQueriesTexture(int qfile, char** queryTexture, size_t* queryLength, int** queryAddrs, char*** queryNames, int** queryLengths, unsigned int* numQueries, unsigned int* num_match_coords, unsigned int device_memory_avail, int min_match_length, bool rc); extern "C" int lookupNumLeaves(ReferencePage * page, TextureAddress addr); void printAlignments(ReferencePage* page, Alignment* alignments, char* query, int qrylen, TextureAddress nodeid, int qrypos, int edge_depth, int min_match, bool rc, bool forwardcoordinates); int countLeafNodes(int nodeid); extern "C" void mapQueriesEndToEnd(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* h_alignments, unsigned int numAligments); extern "C" int createReference(const char* fromFile, Reference* ref) { if (!fromFile || !ref) return -1; TIMESTAMP(t0); getReferenceString(fromFile, &(ref->str), &(ref->len)); TIMESTAMP(t1); ref->t_load_from_disk += ELAPSED(t0, t1); return 0; } extern "C" int destroyReference(Reference* ref) { free(ref->h_node_tex_array); free(ref->h_children_tex_array); free(ref->str); #if REORDER_REF free(ref->h_ref_array); #endif free(ref->aux_data); #if TREE_ACCESS_HISTOGRAM free(ref->h_node_hist); free(ref->h_child_hist); #endif ref->str = NULL; ref->len = 0; return 0; } extern "C" int createQuerySet(const char* fromFile, QuerySet* queries) { fprintf(stderr, "Opening %s...\n", fromFile); int qfile = open(fromFile, O_RDONLY); if (qfile == -1) { fprintf(stderr, "Can't open %s: %d\n", fromFile, errno); exit (1); } queries->qfile = qfile; return 0; } extern "C" int destroyQuerySet(QuerySet* queries) { if (queries->qfile) close(queries->qfile); return 0; } extern "C" void printStringForError(int err) { } extern "C" int createMatchContext(Reference* ref, QuerySet* queries, MatchResults* matches, bool on_cpu, int min_match_length, char* stats_file, bool reverse, bool forwardreverse, bool forwardcoordinates, bool showQueryLength, char* dotfilename, char* texfilename, MatchContext* ctx) { ctx->queries = queries; ctx->ref = ref; ctx->full_ref = ref->str; ctx->full_ref_len = ref->len; ctx->on_cpu = on_cpu; ctx->min_match_length = min_match_length; ctx->stats_file = stats_file; ctx->reverse = reverse; ctx->forwardreverse = forwardreverse; ctx->forwardcoordinates = forwardcoordinates; ctx->show_query_length = showQueryLength; ctx->dotfilename = dotfilename; ctx->texfilename = texfilename; return 0; } extern "C" int destroyMatchContext(MatchContext* ctx) { free(ctx->full_ref); //destroyReference(ctx->ref); destroyQuerySet(ctx->queries); return 0; } void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end, int min_match_len, char* dotfilename, char* texfilename, Statistics* statistics, bool unified) { fprintf(stderr, "Building reference texture...\n"); PixelOfNode* nodeTexture = NULL; PixelOfChildren * childrenTexture = NULL; unsigned int width = 0; unsigned int node_height = 0; unsigned int children_height = 0; AuxiliaryNodeData* aux_data = NULL; int num_nodes; TIMESTAMP(t0); ref->len = end - begin + 3; if (unified) { CUDA_SAFE_CALL(hipMallocManaged(&ref->str, ref->len)); } else { ref->str = (char*)malloc(ref->len); } ref->str[0] = 's'; strncpy(ref->str + 1, full_ref + begin, ref->len - 3); strcpy(ref->str + ref->len - 2, "$"); TIMESTAMP(t1); statistics->t_ref_from_disk += ELAPSED(t0, t1); createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width, &node_height, &children_height, &aux_data, &num_nodes, min_match_len, statistics, dotfilename, texfilename); ref->h_node_tex_array = nodeTexture; ref->h_children_tex_array = childrenTexture; ref->tex_width = width; ref->tex_node_height = node_height; ref->tex_children_height = children_height; #if TREE_ACCESS_HISTOGRAM ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int)); ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int)); #endif ref->aux_data = aux_data; ref->num_nodes = num_nodes; ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren)); fprintf(stderr, "This tree will need %lu bytes on the board\n", ref->bytes_on_board); #if REORDER_REF TIMESTAMP(t0); unsigned int refpitch = ref->pitch = 65536; int numrows = ceil(ref->len / ((float)refpitch)); int blocksize = 4; numrows += blocksize; int refstrsize = numrows * refpitch; ref->h_ref_array = (char *) malloc(refstrsize); ref->bytes_on_board += refstrsize; fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize); int z_max = numrows * refpitch; for (int z = 0; z < z_max; z++) { ref->h_ref_array[z] = 'Z'; } int x, y; int maxx = 0, maxy = 0; size_t reflen = ref->len; char* refstr = ref->str; int block_dim = refpitch * blocksize; for (int i = 0; i < reflen; i++) { int bigx = i % (block_dim); // ref string reorder int bigy = i / (block_dim); y = bigy * blocksize + bigx % blocksize; x = bigx / blocksize; // printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]); assert(x < refpitch); assert(y < numrows); ref->h_ref_array[y*refpitch+x] = refstr[i]; if (x > maxx) { maxx = x; } if (y > maxy) { maxy = y; } } if ((maxx >= refpitch) || (maxy >= numrows)) { fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n", maxx, refpitch, maxy, numrows); exit(1); } TIMETAMP(t1); if (statistics) statistics->t_reorder_ref_str += ELAPSED(t0, t1); #else fprintf(stderr, "The refstr requires %lu bytes\n", ref->len); ref->bytes_on_board += ref->len; #endif } void boardMemory(size_t * free_mem, size_t * total_mem) { // The emulator doesn't allow calls to cuMemGetInfo #ifdef __DEVICE_EMULATION__ *free_mem = 512*1024*1024; *total_mem = 768*1024*1024; #else CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem)); #endif } void loadReferenceTexture(MatchContext* ctx, bool unified) { Reference* ref = ctx->ref; int numrows = ceil(ref->len / ((float)ref->pitch)); int blocksize = 4; numrows += blocksize; hipChannelFormatDesc refTextureDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned); if (!ctx->on_cpu) { TIMESTAMP(t0); #if REFTEX #if REORDER_REF CUDA_MALLOC_ARRAY((hipArray**)(&ref->d_ref_array), &refTextureDesc, ref->pitch, numrows); CUDA_SAFE_CALL(hipMemcpyToArray( (hipArray*)(ref->d_ref_array), 0, 0, ref->h_ref_array, numrows*ref->pitch, hipMemcpyHostToDevice)); reftex.addressMode[0] = hipAddressModeClamp; reftex.addressMode[1] = hipAddressModeClamp; reftex.filterMode = hipFilterModePoint; reftex.normalized = false; BIND_TEX_ARRAY(reftex, (hipArray*)ref->d_ref_array, refTextureDesc); ctx->ref->bytes_on_board += numrows * ref->pitch; #else CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL(hipMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, hipMemcpyHostToDevice) ); reftex.addressMode[0] = hipAddressModeClamp; reftex.filterMode = hipFilterModePoint; reftex.normalized = false; // access with normalized texture coordinates hipChannelFormatDesc refDesc = hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned); BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len); ctx->ref->bytes_on_board += ref->len; #endif #else #if REORDER_REF size_t refpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array), &refpitch, ref->pitch * sizeof(char), numrows); CUDA_SAFE_CALL( hipMemcpy2D((ref->d_ref_array), refpitch, ref->h_ref_array, ref->pitch , ref->pitch * sizeof(char), numrows, hipMemcpyHostToDevice)); ctx->ref->bytes_on_board += numrows * ref->pitch; #else if (unified) { ref->d_ref_array = ref->str; } else { CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL( hipMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, hipMemcpyHostToDevice) ); } ctx->ref->bytes_on_board += ref->len; #endif #endif TIMESTAMP(t1); ctx->statistics.t_ref_str_to_board += ELAPSED(t0, t1); } else { ref->d_ref_array = NULL; } } void unloadReferenceString(Reference* ref) { #if REFTEX CUDA_SAFE_CALL(hipUnbindTexture( reftex ) ); #endif #if REORDER_REF && REFTEX CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_array))); #else CUDA_SAFE_CALL(hipFree((ref->d_ref_array))); #endif ref->d_ref_array = NULL; } void unloadReferenceTree(MatchContext* ctx) { Reference* ref = ctx->ref; #if REORDER_TREE // Unload nodetex #if NODETEX CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) ); CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array))); #else CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array)); #endif ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) ); CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array))); #else CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array)); #endif } ref->d_children_tex_array = NULL; #else #if NODETEX CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) ); #endif CUDA_SAFE_CALL(hipFree(ref->d_node_tex_array)); ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) ); #endif CUDA_SAFE_CALL(hipFree(ref->d_children_tex_array)); ref->d_children_tex_array = NULL; } #endif #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(hipFree(ref->d_node_hist)); ref->d_node_hist = NULL; CUDA_SAFE_CALL(hipFree(ref->d_child_hist)); ref->d_child_hist = NULL; #endif } //loads a tree and text for [begin, end) in the reference void loadReference(MatchContext* ctx, bool unified) { Reference* ref = ctx->ref; ref->bytes_on_board = 0; loadReferenceTexture(ctx, unified); if (!ctx->on_cpu) { TIMESTAMP(t0); // node texels ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode)); // children texels ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren); #if REORDER_TREE #if NODETEX hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_node_tex_array), &nodeTextureDesc, ref->tex_width, ref->tex_node_height ); CUDA_SAFE_CALL( hipMemcpyToArray( (hipArray*)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array, ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode), hipMemcpyHostToDevice)); nodetex.addressMode[0] = hipAddressModeClamp; nodetex.addressMode[1] = hipAddressModeClamp; nodetex.filterMode = hipFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(nodetex, (hipArray*)ref->d_node_tex_array, nodeTextureDesc); #else size_t nodepitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array), &nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height ); CUDA_SAFE_CALL( hipMemcpy2D((ref->d_node_tex_array), nodepitch, ref->h_node_tex_array, nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height, hipMemcpyHostToDevice)); #endif if (ref->tex_children_height) { #if CHILDTEX hipChannelFormatDesc childrenTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (hipArray**)(&ref->d_children_tex_array), &childrenTextureDesc, ref->tex_width, ref->tex_children_height ); CUDA_SAFE_CALL( hipMemcpyToArray((hipArray*)(ref->d_children_tex_array), 0, 0, ref->h_children_tex_array, ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren), hipMemcpyHostToDevice)); childrentex.addressMode[0] = hipAddressModeClamp; childrentex.addressMode[1] = hipAddressModeClamp; childrentex.filterMode = hipFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(childrentex, (hipArray*)(ref->d_children_tex_array), childrenTextureDesc); #else size_t childpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array), &childpitch, ref->tex_width * sizeof(PixelOfChildren), ref->tex_children_height ); CUDA_SAFE_CALL( hipMemcpy2D((ref->d_children_tex_array), childpitch, ref->h_children_tex_array, childpitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_children_height, hipMemcpyHostToDevice)); #endif } #if TREE_ACCESS_HISTOGRAM // node hist ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_width * ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0, ref->tex_width * ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { // children hist ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int); fprintf(stderr, "after child_hist ref->bytes_on_board:%lu\n", ref->bytes_on_board); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_width * ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0, ref->tex_width * ref->tex_children_height * sizeof(int))); } #endif #else // NO TREE REORDERING // Node tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_node_tex_array), ref->tex_node_height * sizeof(PixelOfNode)); CUDA_SAFE_CALL( hipMemcpy( (ref->d_node_tex_array), ref->h_node_tex_array, ref->tex_node_height * sizeof(PixelOfNode), hipMemcpyHostToDevice)); #if NODETEX hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); nodetex.addressMode[0] = hipAddressModeClamp; nodetex.filterMode = hipFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc, ref->tex_node_height* sizeof(PixelOfNode)); #endif if (ref->tex_children_height) { // Child tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_children_tex_array), ref->tex_children_height * sizeof(PixelOfChildren)); CUDA_SAFE_CALL( hipMemcpy( (ref->d_children_tex_array), ref->h_children_tex_array, ref->tex_children_height * sizeof(PixelOfChildren), hipMemcpyHostToDevice)); #if CHILDTEX hipChannelFormatDesc childTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); childrentex.addressMode[0] = hipAddressModeClamp; childrentex.filterMode = hipFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array), childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren)); #endif } #if TREE_ACCESS_HISTOGRAM ref->bytes_on_board += ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_node_hist),0, ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { ref->bytes_on_board += ref->tex_children_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( hipMemset((ref->d_child_hist),0, ref->tex_children_height * sizeof(int))); } #endif #endif #if TWO_LEVEL_NODE_TREE PixelOfNode node_buf[NODE_THRESH]; memset(node_buf, 0, sizeof(node_buf)); for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif MERGETEX node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2]; #else node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( hipMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf))); #endif #if TWO_LEVEL_CHILD_TREE PixelOfChildren child_buf[CHILD_THRESH]; memset(child_buf, 0, sizeof(child_buf)); for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc]; #elif MERGETEX child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1]; #else child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( hipMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf))); #endif TIMESTAMP(t1); ctx->statistics.t_tree_to_board += ELAPSED(t0, t1); fprintf(stderr, "done\n"); } else { ref->d_node_tex_array = NULL; ref->d_children_tex_array = NULL; } } void dumpQueryBlockInfo(QuerySet* queries) { fprintf(stderr, "\tProcessing queries %s to %s\n", queries->h_names[0], queries->h_names[queries->count-1]); } void loadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; queries->bytes_on_board = 0; unsigned int numQueries = queries->count; if (!ctx->on_cpu) { fprintf(stderr, "Allocating device memory for queries... "); TIMESTAMP(t0); dumpQueryBlockInfo(queries); CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \ queries->bytes_on_board += queries->texlen; CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_tex_array, queries->h_tex_array + queries->h_addrs_tex_array[0], queries->texlen, hipMemcpyHostToDevice)); #if QRYTEX qrytex.addressMode[0] = hipAddressModeClamp; qrytex.filterMode = hipFilterModePoint; qrytex.normalized = false; // access with normalized texture coordinates hipChannelFormatDesc qryDesc = hipCreateChannelDesc(8,0,0,0, hipChannelFormatKindUnsigned); BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc, queries->texlen); #endif CUDA_MALLOC((void**) &queries->d_addrs_tex_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_addrs_tex_array, queries->h_addrs_tex_array, numQueries * sizeof(int), hipMemcpyHostToDevice)); CUDA_MALLOC((void**) &queries->d_lengths_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpy((void*) queries->d_lengths_array, queries->h_lengths_array, numQueries * sizeof(int), hipMemcpyHostToDevice)); TIMESTAMP(t1); ctx->statistics.t_queries_to_board += ELAPSED(t0, t1); fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board); } else { queries->d_addrs_tex_array = NULL; queries->d_tex_array = NULL; queries->d_lengths_array = NULL; fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen); } } void unloadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; CUDA_SAFE_CALL(hipFree(queries->d_tex_array)); queries->d_tex_array = NULL; CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array)); queries->d_addrs_tex_array = NULL; CUDA_SAFE_CALL(hipFree(queries->d_lengths_array)); queries->d_lengths_array = NULL; queries->bytes_on_board = 0; } // Computes the location of the first MatchCoord for a given query. NOTE: // Do NOT use this function if COALESCED_QUERIES == 1 inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) { return qry_addrs - qryid * (match_length + 1); } // Construct the offset table for a set of queries. This table will be used // by the printing functions, and if COALESCED_QUERIES == 1, by the matching // kernel. void buildCoordOffsetArray(MatchContext* ctx, int** h_coord_offset_array, unsigned int* num_coords) { int numCoords = 0; int match_length = ctx->min_match_length; int numQueries = ctx->queries->count; int* lengths = ctx->queries->h_lengths_array; int* coord_offsets = (int*)calloc(numQueries, sizeof(int)); #if COALESCED_QUERIES for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) { // Every query in this warp will need at least this many coords int max_num_coords = 0; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { int num_coords = lengths[i + j] - match_length + 1; if ( max_num_coords < num_coords) max_num_coords = num_coords; } unsigned int block_size = max_num_coords * WARP_SIZE; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { ctx->results.h_coord_tex_array[i + j] = numCoords + j; } numCoords += block_size; } #else for (unsigned int i = 0; i < numQueries; ++i) { int qryoffset = ctx->queries->h_addrs_tex_array[i]; coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length); } if (numQueries > 0) { unsigned int last_qry = numQueries - 1; unsigned int last_qry_len = lengths[last_qry] - match_length + 1; numCoords = coord_offsets[last_qry] + last_qry_len; fprintf(stderr, "Need %d match coords for this result array\n", numCoords); } #endif *num_coords = numCoords; *h_coord_offset_array = coord_offsets; } void loadResultBuffer(MatchContext* ctx) { unsigned int numQueries = ctx->queries->count; assert (numQueries); TIMESTAMP(t0); buildCoordOffsetArray(ctx, &(ctx->results.h_coord_tex_array), &(ctx->results.numCoords)); TIMESTAMP(t1); ctx->statistics.t_build_coord_offsets += ELAPSED(t0, t1); unsigned int numCoords = ctx->results.numCoords; fprintf(stderr, "Allocating result array for %d queries (%lu bytes) ...", numQueries, numCoords*sizeof(MatchCoord) ); size_t boardFreeMemory = 0; size_t total_mem = 0; boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr,"board free memory: %lu total memory: %lu\n", boardFreeMemory, total_mem); ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord)); if (ctx->results.h_match_coords == NULL) { trap_dbg(); exit(EXIT_FAILURE); } if (!ctx->on_cpu) { TIMESTAMP(t2); ctx->results.bytes_on_board = 0; CUDA_MALLOC( (void**) &ctx->results.d_match_coords, numCoords * sizeof(MatchCoord)); ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord); CUDA_SAFE_CALL( hipMemset( (void*)ctx->results.d_match_coords, 0, numCoords * sizeof(MatchCoord))); #if COALESCED_QUERIES CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array, numQueries * sizeof(int)); ctx->results.bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpy((void*) ctx->results.d_coord_tex_array, ctx->results.h_coord_tex_array, numQueries * sizeof(int), hipMemcpyHostToDevice)); #endif TIMESTAMP(t3); ctx->statistics.t_match_coords_to_board += ELAPSED(t2, t3); } else { ctx->results.d_match_coords = NULL; } fprintf(stderr, "done\n"); } void unloadResultBuffer(MatchContext* ctx) { CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords)); ctx->results.d_match_coords = NULL; ctx->results.bytes_on_board = 0; #if COALESCED_QUERIES CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords)); #endif } void transferResultsFromDevice(MatchContext* ctx) { if (!ctx->on_cpu) { TIMESTAMP(t0); CUDA_SAFE_CALL(hipMemcpy(ctx->results.h_match_coords, ctx->results.d_match_coords, ctx->results.numCoords * sizeof(MatchCoord), hipMemcpyDeviceToHost) ); #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_node_hist, ctx->ref->d_node_hist, ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL(hipMemcpy(ctx->ref->h_child_hist, ctx->ref->d_child_hist, ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int), hipMemcpyDeviceToHost) ); if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) { int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int)); if (ctx->statistics.node_hist_size) memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int)); ctx->statistics.node_hist = temp; ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height; } if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) { temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int)); if (ctx->statistics.hist_size) memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int)); ctx->statistics.child_hist = temp; ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height; } for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i]; } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i]; } #endif TIMESTAMP(t1); ctx->statistics.t_match_coords_from_board += ELAPSED(t0, t1); } } int flushOutput(); int addToBuffer(char* string); char numbuffer[32]; MatchCoord* coordForQueryChar(MatchContext* ctx, unsigned int qryid, unsigned int qrychar) { MatchResults* results = &(ctx->results); MatchCoord* coords = results->h_match_coords; #if COALESCED_QUERIES return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE; #else return coords + results->h_coord_tex_array[qryid] + qrychar; #endif } void coordsToPrintBuffers(MatchContext* ctx, ReferencePage* page, MatchInfo** matches, Alignment** alignments, unsigned int mem_avail, unsigned int* coord_idx, unsigned int* match_idx, unsigned int* align_idx, unsigned int* nextqry, unsigned int* nextqrychar, bool unified) { unsigned int numQueries = ctx->queries->count; int match_length = ctx->min_match_length; unsigned int cidx = *coord_idx; unsigned int midx = 0; unsigned int numCoords = ctx->results.numCoords; unsigned int numMatches = 0; unsigned int numAlignments = 0; int DEBUG = 0; if (DEBUG && cidx == 0) { for (int j = 0; j < numCoords; ++j) { MatchCoord * coord = ctx->results.h_match_coords+j; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { //fprintf(stdout, "node: %d\n", // coord->node); fprintf(stdout, "node: %d leaves:%d\n", coord->node.data, lookupNumLeaves(page, coord->node)); } } exit(0); } // How much can we fit into mem_avail? for (int j = cidx; j < numCoords; ++j) { MatchCoord* coord = ctx->results.h_match_coords + j; int queryAlignments = 0; int queryMatches = 0; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { int numLeaves = lookupNumLeaves(page, coord->node); queryAlignments += numLeaves; queryMatches++; } int allMatches = numMatches + queryMatches; int allAlignments = numAlignments + queryAlignments; int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment); if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION) { // adding this match won't fit on the board break; } ++cidx; numMatches = allMatches; numAlignments = allAlignments; } MatchInfo* M; if (unified) { hipMallocManaged(&M, numMatches * sizeof(MatchInfo)); memset(M, 0, numMatches * sizeof(MatchInfo)); } else { M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo)); } unsigned int alignmentOffset = 0; int qry = *nextqry; int qrychar = *nextqrychar; bool set_full = false; while (qry < numQueries) { // h_lengths_array doesn't count the 'q' at the beginning of each query int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length; while (qrychar < qlen) { if (midx >= numMatches) { set_full = true; break; } MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar); if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { MatchInfo m; m.resultsoffset = alignmentOffset; m.qrystartpos = qrychar; m.matchnode = coord->node; m.edgematch = coord->edge_match_length; m.numLeaves = lookupNumLeaves(page, m.matchnode); m.queryid = qry; alignmentOffset += m.numLeaves; M[midx++] = m; } ++qrychar; } if (set_full) break; ++qry; qrychar = 0; } *coord_idx = cidx; *match_idx = midx; *align_idx = alignmentOffset; *matches = M; *nextqry = qry; *nextqrychar = qrychar; fprintf(stderr, "Allocing %lu bytes of host memory for %u alignments\n", alignmentOffset * sizeof(Alignment), numAlignments); if (unified) { hipMallocManaged(alignments, alignmentOffset * sizeof(Alignment)); memset(alignments, 0, alignmentOffset * sizeof(Alignment)); } else { *alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment)); } //hipHostMalloc((void**)alignments, numAlignments * sizeof(Alignment)); } void runPrintKernel(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments, bool unified) { MatchInfo* d_matches; size_t matchesSize = numMatches * sizeof(MatchInfo); if (!unified) { CUDA_MALLOC(&d_matches, matchesSize); } struct Alignment* d_alignments; size_t alignmentSize = numAlignments * sizeof(Alignment); if (!unified) { CUDA_MALLOC(&d_alignments, alignmentSize); CUDA_SAFE_CALL(hipMemset((void*) d_alignments, 0, alignmentSize)); } TIMESTAMP(t0); // Copy matches to card fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments); fprintf(stderr, "Copying %lu bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments); int DEBUG = 0; if (DEBUG) { for (int i = 0; i < numMatches; i++) { printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n", i, h_matches[i].resultsoffset, h_matches[i].queryid, h_matches[i].matchnode.data, h_matches[i].numLeaves, h_matches[i].edgematch, h_matches[i].qrystartpos); } exit(0); } if (unified) { d_matches = h_matches; d_alignments = alignments; } else { CUDA_SAFE_CALL(hipMemcpy(d_matches, h_matches, matchesSize, hipMemcpyHostToDevice)); } TIMESTAMP(t1); long long mtime = ELAPSED(t0, t1); // Launch the kernel int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1); fprintf(stderr, " Calling print kernel... "); hipLaunchKernelGGL(( printKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, d_matches, numMatches, d_alignments, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)ctx->ref->d_node_tex_array, #endif #if !CHILDTEX (_PixelOfChildren*)ctx->ref->d_children_tex_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, page->begin, page->end, page->shadow_left, page->shadow_right, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if ( hipSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } TIMESTAMP(t2); // Copy the results back to the host if (!unified) { CUDA_SAFE_CALL(hipMemcpy((void*)alignments, (void*)d_alignments, alignmentSize, hipMemcpyDeviceToHost)); } hipDeviceSynchronize(); TIMESTAMP(t3); long long atime = ELAPSED(t2, t3); fprintf(stderr, "memcpy time= %lld\n", atime + mtime); // Cleanup if (!unified) { CUDA_SAFE_CALL(hipFree(d_alignments)); CUDA_SAFE_CALL(hipFree(d_matches)); } } // TODO: need reverse-complement printing support void runPrintOnCPU(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments) { unsigned int min_match_length = ctx->min_match_length; int* addrs = ctx->queries->h_addrs_tex_array; int* lengths = ctx->queries->h_lengths_array; char* qrychars = ctx->queries->h_tex_array; if (!numMatches) return; int qry = -1; unsigned int qrylen; for (int i = 0; i < numMatches; ++i) { MatchInfo& match = h_matches[i]; if (match.queryid != qry) { qry = match.queryid; qrylen = lengths[qry]; } if (!(match.edgematch & FRMASK)) { printAlignments(page, alignments + match.resultsoffset, #if COALESCED_QUERIES qrychars + sizeof(int) * addrs[qry], #else qrychars + addrs[qry], #endif qrylen, match.matchnode, match.qrystartpos, match.edgematch, min_match_length, 0, ctx->forwardcoordinates); } } } int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen); void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu, bool unified) { assert(!ctx->reverse && !ctx->forwardreverse); size_t boardFreeMemory; size_t total_mem; if (!on_cpu) { boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr, "board free memory: %lu total memory: %lu\n", boardFreeMemory, total_mem); } else { boardFreeMemory = 256 * 1024 * 1024; total_mem = boardFreeMemory; } #ifdef __DEVICE_EMULATION__ boardFreeMemory = 512 * 1024 * 1024; #endif boardFreeMemory -= BREATHING_ROOM; fprintf(stderr, "board free memory: %lu\n", boardFreeMemory); int rTotalMatches = 0; int rTotalAlignments = 0; int totalRounds = 0; unsigned int last_coord = ctx->results.numCoords; unsigned int next_coord = 0; unsigned int nextqry = 0; unsigned int nextqrychar = 0; int lastqry = -1; while (next_coord < last_coord) { // see how many queries will fit on the board totalRounds++; unsigned int numMatches = 0; unsigned int numAlignments = 0; MatchInfo* h_matches = NULL; Alignment* h_alignments = NULL; int coord_left = next_coord; TIMESTAMP(t0); coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory, &next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar, unified); TIMESTAMP(t1); long long btime = ELAPSED(t0, t1); ctx->statistics.t_coords_to_buffers += btime; fprintf(stderr, "buffer prep time= %lld\n", btime); fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n", totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments); if (numMatches == 0) continue; char buf[256]; //assert(qryend > qrystart); rTotalAlignments += numAlignments; rTotalMatches += numMatches; if (num_bind_tex_calls > 100) { hipDeviceReset(); num_bind_tex_calls = 0; loadReference(ctx, unified); loadQueries(ctx); } TIMESTAMP(t2); if (on_cpu) { runPrintOnCPU(ctx, page, h_matches, numMatches, h_alignments, numAlignments); } else { runPrintKernel(ctx, page, h_matches, numMatches, h_alignments, numAlignments, unified); } TIMESTAMP(t3); long long ktime = ELAPSED(t2, t3); ctx->statistics.t_print_kernel += ktime; fprintf(stderr, "print kernel time= %lld\n", ktime); //Process the alignments for (int m = 0; m < numMatches; m++) { int base = h_matches[m].resultsoffset; for (int i = 0; i < h_matches[m].numLeaves; i++) { // See if there are any more left maximal alignments for this match if (h_alignments[base+i].left_in_ref == 0) { break; } if (h_matches[m].queryid != lastqry) { lastqry = h_matches[m].queryid; //addToBuffer("> "); //addToBuffer(*(ctx->queries->h_names + lastqry)); //addToBuffer("\n"); } //sprintf(buf, "%d\t%d\t%d\n", // h_alignments[base+i].left_in_ref, // h_matches[m].qrystartpos + 1, // h_alignments[base+i].matchlen); //addToBuffer(buf); // addMatchToBuffer(h_alignments[base+i].left_in_ref, // h_matches[m].qrystartpos + 1, // h_alignments[base+i].matchlen); } } //flushOutput(); TIMESTAMP(t4); ctx->statistics.t_results_to_disk += ELAPSED(t3, t4); free(h_matches); free(h_alignments); } free(ctx->results.h_coord_tex_array); free(ctx->results.h_match_coords); ctx->results.h_coord_tex_array = NULL; ctx->results.h_match_coords = NULL; fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n", rTotalMatches, rTotalAlignments, totalRounds); } int getQueryBlock(MatchContext* ctx, size_t device_mem_avail) { QuerySet* queries = ctx->queries; char * queryTex = NULL; int* queryAddrs = NULL; int* queryLengths = NULL; unsigned int numQueries; unsigned int num_match_coords; size_t queryLen; char** names; fprintf(stderr, "Loading query block... "); TIMESTAMP(t0); getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names, &queryLengths, &numQueries, &num_match_coords, device_mem_avail, ctx->min_match_length, ctx->reverse || ctx->forwardreverse); TIMESTAMP(t1); ctx->statistics.t_queries_from_disk += ELAPSED(t0, t1); queries->h_tex_array = queryTex; queries->count = numQueries; queries->h_addrs_tex_array = queryAddrs; queries->texlen = queryLen; queries->h_names = names; queries->h_lengths_array = queryLengths; ctx->results.numCoords = num_match_coords; fprintf(stderr, "done.\n"); return numQueries; } void destroyQueryBlock(QuerySet* queries) { free(queries->h_tex_array); queries->h_tex_array = NULL; for (int i = 0; i < queries->count; ++i) free(queries->h_names[i]); free(queries->h_names); queries->count = 0; queries->texlen = 0; free(queries->h_addrs_tex_array); queries->h_addrs_tex_array = NULL; free(queries->h_lengths_array); queries->h_lengths_array = NULL; } void resetStats(Statistics* stats) { stats->t_end_to_end = 0.0; stats->t_match_kernel = 0.0; stats->t_print_kernel = 0.0; stats->t_queries_to_board = 0.0; stats->t_match_coords_to_board = 0.0; stats->t_match_coords_from_board = 0.0; stats->t_tree_to_board = 0.0; stats->t_ref_str_to_board = 0.0; stats->t_queries_from_disk = 0.0; stats->t_ref_from_disk = 0.0; stats->t_results_to_disk = 0.0; stats->t_tree_construction = 0.0; stats->t_tree_reorder = 0.0; stats->t_tree_flatten = 0.0; stats->t_reorder_ref_str = 0.0; stats->t_build_coord_offsets = 0.0; stats->t_coords_to_buffers = 0.0; stats->bp_avg_query_length = 0.0; #if TREE_ACCESS_HISTOGRAM if (stats->node_hist_size) { free(stats->node_hist); stats->node_hist = NULL; stats->node_hist_size = 0; } if (stats->child_hist_size) { free(stats->child_hist); stats->child_hist = NULL; stats->child_hist_size = 0; } #endif } void writeStatisticsFile(Statistics* stats, char* stats_filename, char* node_hist_filename = NULL, char* child_hist_filename = NULL) { if (stats_filename) { FILE* f = fopen(stats_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename); } else { fprintf(f, "Q"); fprintf(f, ",R"); fprintf(f, ",T"); fprintf(f, ",m"); fprintf(f, ",r"); fprintf(f, ",t"); fprintf(f, ",n"); fprintf(f, ",Total"); fprintf(f, ",Match kernel"); fprintf(f, ",Print Kernel"); fprintf(f, ",Queries to board"); fprintf(f, ",Match coords to board"); fprintf(f, ",Match coords from board"); fprintf(f, ",Tree to board"); fprintf(f, ",Ref str to board"); fprintf(f, ",Queries from disk"); fprintf(f, ",Ref from disk"); fprintf(f, ",Output to disk"); fprintf(f, ",Tree construction"); fprintf(f, ",Tree reorder"); fprintf(f, ",Tree flatten"); fprintf(f, ",Ref reorder"); fprintf(f, ",Build coord table"); fprintf(f, ",Coords to buffers"); fprintf(f, ",Avg qry length"); fprintf(f, "\n"); fprintf(f, "%d", QRYTEX); fprintf(f, ",%d", REFTEX); fprintf(f, ",%d", TREETEX); fprintf(f, ",%d", MERGETEX); fprintf(f, ",%d", REORDER_REF); fprintf(f, ",%d", REORDER_TREE); fprintf(f, ",%d", RENUMBER_TREE); fprintf(f, ",%f", stats->t_end_to_end * 1e-6); fprintf(f, ",%f", stats->t_match_kernel * 1e-6); fprintf(f, ",%f", stats->t_print_kernel * 1e-6); fprintf(f, ",%f", stats->t_queries_to_board * 1e-6); fprintf(f, ",%f", stats->t_match_coords_to_board * 1e-6); fprintf(f, ",%f", stats->t_match_coords_from_board * 1e-6); fprintf(f, ",%f", stats->t_tree_to_board * 1e-6); fprintf(f, ",%f", stats->t_ref_str_to_board * 1e-6); fprintf(f, ",%f", stats->t_queries_from_disk * 1e-6); fprintf(f, ",%f", stats->t_ref_from_disk * 1e-6); fprintf(f, ",%f", stats->t_results_to_disk * 1e-6); fprintf(f, ",%f", stats->t_tree_construction * 1e-6); fprintf(f, ",%f", stats->t_tree_reorder * 1e-6); fprintf(f, ",%f", stats->t_tree_flatten * 1e-6); fprintf(f, ",%f", stats->t_reorder_ref_str * 1e-6); fprintf(f, ",%f", stats->t_build_coord_offsets * 1e-6); fprintf(f, ",%f", stats->t_coords_to_buffers * 1e-6); fprintf(f, ",%f", stats->bp_avg_query_length * 1e-6); fprintf(f,"\n"); fclose(f); } } #if TREE_ACCESS_HISTOGRAM if (node_hist_filename) { FILE* f = fopen(node_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]); } } if (child_hist_filename) { FILE* f = fopen(child_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]); } } float total_node_hits = 0; float tree_top_node_hits = 0; float total_child_hits = 0; float tree_top_child_hits = 0; for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { total_node_hits +=ctx->statistics.node_hist[i]; if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; } } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { total_child_hits +=ctx->statistics.child_hist[i]; if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; } } fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits); fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits); #endif } void matchOnCPU(MatchContext* ctx, bool doRC) { //TODO: CPU is matching is disabled. if (doRC) { // Match the reverse complement of the queries to the ref computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, FORWARD); } } void matchOnGPU(MatchContext* ctx, bool doRC) { int numQueries = ctx->queries->count; int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1); // Match the reverse complement of the queries to the ref if (doRC) { //TODO: GPU RC is disabled hipLaunchKernelGGL(( mummergpuRCKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } else { hipLaunchKernelGGL(( mummergpuKernel) , dim3(dimGrid), dim3(dimBlock), 0 , 0, ctx->results.d_match_coords, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)(ctx->ref->d_node_tex_array), #endif #if !CHILDTEX (_PixelOfChildren*)(ctx->ref->d_children_tex_array), #endif #if !REFTEX (char*)ctx->ref->d_ref_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); } // check if kernel execution generated an error hipError_t err = hipGetLastError(); if ( hipSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } void getMatchResults(MatchContext* ctx, unsigned int page_num) { transferResultsFromDevice(ctx); } void matchQueryBlockToReferencePage(MatchContext* ctx, ReferencePage* page, bool reverse_complement) { fprintf(stderr, "Memory footprint is:\n\tqueries: %lu\n\tref: %lu\n\tresults: %lu\n", ctx->queries->bytes_on_board, ctx->ref->bytes_on_board, ctx->results.bytes_on_board); TIMESTAMP(t0); if (ctx->on_cpu) { matchOnCPU(ctx, reverse_complement); } else { matchOnGPU(ctx, reverse_complement); hipDeviceSynchronize(); } TIMESTAMP(t1); long long ktime = ELAPSED(t0, t1); ctx->statistics.t_match_kernel += ktime; fprintf(stderr, "match kernel time= %lld\n", ktime); getMatchResults(ctx, page->id); unloadResultBuffer(ctx); } int matchSubset(MatchContext* ctx, ReferencePage* page, bool unified) { loadQueries(ctx); fprintf(stderr, "Matching queries %s - %s against ref coords %d - %d\n", ctx->queries->h_names[0], ctx->queries->h_names[ctx->queries->count - 1], page->begin, page->end); loadResultBuffer(ctx); // TODO: renable RC support by calling this twice /w reverse/fwdreverse // idiom. matchQueryBlockToReferencePage(ctx, page, false); if (USE_PRINT_KERNEL && !ctx->on_cpu) { getExactAlignments(ctx, page, false, unified); } else { getExactAlignments(ctx, page, true, unified); } //flushOutput(); unloadQueries(ctx); return 0; } int getFreeDeviceMemory(bool on_cpu) { size_t free_mem = 0; size_t total_mem = 0; // We have to 'prime' CUDA by making an allocation here. cuMemGetInfo // will return zeroes until we do a malloc. int * p = NULL; CUDA_SAFE_CALL(hipMalloc((void**)&p, sizeof(int))); CUDA_SAFE_CALL(hipFree(p)); if (!on_cpu) { boardMemory(&free_mem, &total_mem); fprintf(stderr, "board free memory: %lu total memory: %lu\n", free_mem, total_mem); } else { total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX } return free_mem; } int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page, bool unified) { fprintf(stderr, "Beginning reference page %p\n", page); int free_mem = getFreeDeviceMemory(ctx->on_cpu); int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM; ctx->ref = &(page->ref); loadReference(ctx, unified); while (getQueryBlock(ctx, available_mem)) { matchSubset(ctx, page, unified); ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2; destroyQueryBlock(ctx->queries); if (num_bind_tex_calls > 100) { hipDeviceReset(); num_bind_tex_calls = 0; loadReference(ctx, unified); } } unloadReferenceString(ctx->ref); unloadReferenceTree(ctx); lseek(ctx->queries->qfile, 0, SEEK_SET); return 0; } void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) { unsigned int bases_in_ref = ctx->full_ref_len - 3; unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref; unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size); fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n", num_reference_pages, bases_in_ref, page_size); unsigned int page_overlap = MAX_QUERY_LEN + 1; ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages, sizeof(ReferencePage)); pages[0].begin = 1; pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning pages[0].shadow_left = -1; pages[0].id = 0; for (int i = 1; i < num_reference_pages - 1; ++i) { pages[i].begin = pages[i - 1].end - page_overlap; pages[i].end = pages[i].begin + page_size + page_overlap; pages[i - 1].shadow_right = pages[i].begin; pages[i].shadow_left = pages[i-1].end; pages[i].id = i; } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; pages[last_page].begin = pages[last_page - 1].end - page_overlap; pages[last_page].end = ctx->full_ref_len - 1; pages[last_page - 1].shadow_right = pages[last_page].begin; pages[last_page].shadow_right = -1; pages[last_page].shadow_left = pages[last_page - 1].end; pages[last_page].id = last_page; } *pages_out = pages; *num_pages = num_reference_pages; } int streamReferenceAgainstQueries(MatchContext* ctx, bool unified) { int num_reference_pages = 0; ReferencePage* pages = NULL; initReferencePages(ctx, &num_reference_pages, &pages); buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end, ctx->min_match_length, ctx->dotfilename, ctx->texfilename, &(ctx->statistics), unified); matchQueriesToReferencePage(ctx, &pages[0], unified); destroyReference(&(pages[0].ref)); for (int i = 1; i < num_reference_pages - 1; ++i) { buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics), unified); matchQueriesToReferencePage(ctx, &pages[i], unified); destroyReference(&(pages[i].ref)); } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref, pages[last_page].begin, pages[last_page].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics), unified); matchQueriesToReferencePage(ctx, &pages[last_page], unified); destroyReference(&(pages[last_page].ref)); } free(pages); return 0; } extern "C" int matchQueries(MatchContext* ctx, bool unified) { assert(sizeof(struct PixelOfNode) == sizeof(uint4)); assert(sizeof(struct PixelOfChildren) == sizeof(uint4)); #if TREE_ACCESS_HISTOGRAM ctx->statistics.node_hist_size = 0; ctx->statistics.child_hist_size = 0; #endif resetStats(&(ctx->statistics)); TIMESTAMP(t0); int ret; fprintf(stderr, "Streaming reference pages against all queries\n"); ret = streamReferenceAgainstQueries(ctx, unified); TIMESTAMP(t1); ctx->statistics.t_end_to_end += ELAPSED(t0, t1); writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out"); return ret; }
f8faf1adf23723c0dffa29c20aac8167270eb950.cu
// Includes, system #include <errno.h> #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #include <math.h> #include <assert.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <cuda.h> #include <vector_types.h> // includes, kernels #include <common.cu> #include <mummergpu.h> #include <mummergpu_kernel.cu> int USE_PRINT_KERNEL = 1; #define BREATHING_ROOM (16 * 1024 * 1024) #define BASES_PER_TREE_PAGE 8388608 //#define BASES_PER_TREE_PAGE 7000000 #define BLOCKSIZE 256 unsigned int cuda_calls = 0; void trap_dbg() { fprintf(stderr, "Trapped\n"); } #define CUDA_SAFE_CALL( call) do { \ cuda_calls++; \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %d (%s).\n", \ __FILE__, __LINE__, err, cudaGetErrorString( err) ); \ trap_dbg(); \ exit(EXIT_FAILURE); \ } } while (0) # define CU_SAFE_CALL_NO_SYNC( call ) do { \ CUresult err = call; \ if( CUDA_SUCCESS != err) { \ fprintf(stderr, "Cuda driver error %x in file '%s' in line %i.\n", \ err, __FILE__, __LINE__ ); \ exit(EXIT_FAILURE); \ } } while (0) # define CUT_DEVICE_INIT_DRV(cuDevice) do { \ cuDevice = 0; \ int deviceCount = 0; \ CUresult err = cuInit(0); \ if (CUDA_SUCCESS == err) \ CU_SAFE_CALL_NO_SYNC(cuDeviceGetCount(&deviceCount)); \ if (deviceCount == 0) { \ fprintf(stderr, "There is no device.\n"); \ exit(EXIT_FAILURE); \ } \ int dev; \ for (dev = 0; dev < deviceCount; ++dev) { \ int major, minor; \ CU_SAFE_CALL_NO_SYNC(cuDeviceComputeCapability(&major, &minor, dev));\ if (major >= 1) \ break; \ } \ if (dev == deviceCount) { \ fprintf(stderr, "There is no device supporting CUDA.\n"); \ exit(EXIT_FAILURE); \ } \ else \ CU_SAFE_CALL_NO_SYNC(cuDeviceGet(&cuDevice, dev)); \ } while (0) unsigned int num_bind_tex_calls = 0; #define BIND_TEX(offset, tex, arr, desc, len) do { \ CUDA_SAFE_CALL(cudaBindTexture(offset, tex, arr, desc, len)); \ ++num_bind_tex_calls; \ } while(0) #define BIND_TEX_ARRAY(tex, arr, desc) do { \ CUDA_SAFE_CALL(cudaBindTextureToArray(tex, arr, desc)); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC(ptr, size) do { \ cudaMalloc(ptr, size); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_PITCH(ptr, out_pitch, rowsize, numrows) do { \ cudaMallocPitch(ptr, out_pitch, rowsize, numrows); \ ++num_bind_tex_calls; \ } while(0) #define CUDA_MALLOC_ARRAY(ptr, desc, pitch, rows) do { \ cudaMallocArray(ptr, desc, pitch, rows); \ ++num_bind_tex_calls; \ } while(0) #include "../../timing.h" //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold(MatchResults* results, char* refstr, char* queries, int* queryAddrs, int* queryLengths, PixelOfNode* nodeTexture, PixelOfChildren* childrenTexture, int numQueries, int mismatch_length, int rc); extern "C" void getReferenceString(const char * filename, char** refstr, size_t* reflen); extern "C" void createTreeTexture(const char * filename, PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture, unsigned int* width, unsigned int* node_height, unsigned int* children_height, AuxiliaryNodeData** aux_data, int* num_match_coords, int min_match_len, Statistics* statistics, const char * dotfilename, const char * texfilename); extern "C" void getQueriesTexture(int qfile, char** queryTexture, size_t* queryLength, int** queryAddrs, char*** queryNames, int** queryLengths, unsigned int* numQueries, unsigned int* num_match_coords, unsigned int device_memory_avail, int min_match_length, bool rc); extern "C" int lookupNumLeaves(ReferencePage * page, TextureAddress addr); void printAlignments(ReferencePage* page, Alignment* alignments, char* query, int qrylen, TextureAddress nodeid, int qrypos, int edge_depth, int min_match, bool rc, bool forwardcoordinates); int countLeafNodes(int nodeid); extern "C" void mapQueriesEndToEnd(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* h_alignments, unsigned int numAligments); extern "C" int createReference(const char* fromFile, Reference* ref) { if (!fromFile || !ref) return -1; TIMESTAMP(t0); getReferenceString(fromFile, &(ref->str), &(ref->len)); TIMESTAMP(t1); ref->t_load_from_disk += ELAPSED(t0, t1); return 0; } extern "C" int destroyReference(Reference* ref) { free(ref->h_node_tex_array); free(ref->h_children_tex_array); free(ref->str); #if REORDER_REF free(ref->h_ref_array); #endif free(ref->aux_data); #if TREE_ACCESS_HISTOGRAM free(ref->h_node_hist); free(ref->h_child_hist); #endif ref->str = NULL; ref->len = 0; return 0; } extern "C" int createQuerySet(const char* fromFile, QuerySet* queries) { fprintf(stderr, "Opening %s...\n", fromFile); int qfile = open(fromFile, O_RDONLY); if (qfile == -1) { fprintf(stderr, "Can't open %s: %d\n", fromFile, errno); exit (1); } queries->qfile = qfile; return 0; } extern "C" int destroyQuerySet(QuerySet* queries) { if (queries->qfile) close(queries->qfile); return 0; } extern "C" void printStringForError(int err) { } extern "C" int createMatchContext(Reference* ref, QuerySet* queries, MatchResults* matches, bool on_cpu, int min_match_length, char* stats_file, bool reverse, bool forwardreverse, bool forwardcoordinates, bool showQueryLength, char* dotfilename, char* texfilename, MatchContext* ctx) { ctx->queries = queries; ctx->ref = ref; ctx->full_ref = ref->str; ctx->full_ref_len = ref->len; ctx->on_cpu = on_cpu; ctx->min_match_length = min_match_length; ctx->stats_file = stats_file; ctx->reverse = reverse; ctx->forwardreverse = forwardreverse; ctx->forwardcoordinates = forwardcoordinates; ctx->show_query_length = showQueryLength; ctx->dotfilename = dotfilename; ctx->texfilename = texfilename; return 0; } extern "C" int destroyMatchContext(MatchContext* ctx) { free(ctx->full_ref); //destroyReference(ctx->ref); destroyQuerySet(ctx->queries); return 0; } void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end, int min_match_len, char* dotfilename, char* texfilename, Statistics* statistics, bool unified) { fprintf(stderr, "Building reference texture...\n"); PixelOfNode* nodeTexture = NULL; PixelOfChildren * childrenTexture = NULL; unsigned int width = 0; unsigned int node_height = 0; unsigned int children_height = 0; AuxiliaryNodeData* aux_data = NULL; int num_nodes; TIMESTAMP(t0); ref->len = end - begin + 3; if (unified) { CUDA_SAFE_CALL(cudaMallocManaged(&ref->str, ref->len)); } else { ref->str = (char*)malloc(ref->len); } ref->str[0] = 's'; strncpy(ref->str + 1, full_ref + begin, ref->len - 3); strcpy(ref->str + ref->len - 2, "$"); TIMESTAMP(t1); statistics->t_ref_from_disk += ELAPSED(t0, t1); createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width, &node_height, &children_height, &aux_data, &num_nodes, min_match_len, statistics, dotfilename, texfilename); ref->h_node_tex_array = nodeTexture; ref->h_children_tex_array = childrenTexture; ref->tex_width = width; ref->tex_node_height = node_height; ref->tex_children_height = children_height; #if TREE_ACCESS_HISTOGRAM ref->h_node_hist = (int*)calloc(width * node_height, sizeof(int)); ref->h_child_hist = (int*)calloc(width * children_height, sizeof(int)); #endif ref->aux_data = aux_data; ref->num_nodes = num_nodes; ref->bytes_on_board = (width * node_height * sizeof(PixelOfNode)) + (width * children_height * sizeof(PixelOfChildren)); fprintf(stderr, "This tree will need %lu bytes on the board\n", ref->bytes_on_board); #if REORDER_REF TIMESTAMP(t0); unsigned int refpitch = ref->pitch = 65536; int numrows = ceil(ref->len / ((float)refpitch)); int blocksize = 4; numrows += blocksize; int refstrsize = numrows * refpitch; ref->h_ref_array = (char *) malloc(refstrsize); ref->bytes_on_board += refstrsize; fprintf(stderr, "The refstr (reordered) requires %d bytes\n", refstrsize); int z_max = numrows * refpitch; for (int z = 0; z < z_max; z++) { ref->h_ref_array[z] = 'Z'; } int x, y; int maxx = 0, maxy = 0; size_t reflen = ref->len; char* refstr = ref->str; int block_dim = refpitch * blocksize; for (int i = 0; i < reflen; i++) { int bigx = i % (block_dim); // ref string reorder int bigy = i / (block_dim); y = bigy * blocksize + bigx % blocksize; x = bigx / blocksize; // printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]); assert(x < refpitch); assert(y < numrows); ref->h_ref_array[y*refpitch+x] = refstr[i]; if (x > maxx) { maxx = x; } if (y > maxy) { maxy = y; } } if ((maxx >= refpitch) || (maxy >= numrows)) { fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n", maxx, refpitch, maxy, numrows); exit(1); } TIMETAMP(t1); if (statistics) statistics->t_reorder_ref_str += ELAPSED(t0, t1); #else fprintf(stderr, "The refstr requires %lu bytes\n", ref->len); ref->bytes_on_board += ref->len; #endif } void boardMemory(size_t * free_mem, size_t * total_mem) { // The emulator doesn't allow calls to cuMemGetInfo #ifdef __DEVICE_EMULATION__ *free_mem = 512*1024*1024; *total_mem = 768*1024*1024; #else CU_SAFE_CALL_NO_SYNC(cuMemGetInfo(free_mem, total_mem)); #endif } void loadReferenceTexture(MatchContext* ctx, bool unified) { Reference* ref = ctx->ref; int numrows = ceil(ref->len / ((float)ref->pitch)); int blocksize = 4; numrows += blocksize; cudaChannelFormatDesc refTextureDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned); if (!ctx->on_cpu) { TIMESTAMP(t0); #if REFTEX #if REORDER_REF CUDA_MALLOC_ARRAY((cudaArray**)(&ref->d_ref_array), &refTextureDesc, ref->pitch, numrows); CUDA_SAFE_CALL(cudaMemcpyToArray( (cudaArray*)(ref->d_ref_array), 0, 0, ref->h_ref_array, numrows*ref->pitch, cudaMemcpyHostToDevice)); reftex.addressMode[0] = cudaAddressModeClamp; reftex.addressMode[1] = cudaAddressModeClamp; reftex.filterMode = cudaFilterModePoint; reftex.normalized = false; BIND_TEX_ARRAY(reftex, (cudaArray*)ref->d_ref_array, refTextureDesc); ctx->ref->bytes_on_board += numrows * ref->pitch; #else CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL(cudaMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, cudaMemcpyHostToDevice) ); reftex.addressMode[0] = cudaAddressModeClamp; reftex.filterMode = cudaFilterModePoint; reftex.normalized = false; // access with normalized texture coordinates cudaChannelFormatDesc refDesc = cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned); BIND_TEX(0, reftex, (void*)(ref->d_ref_array), refDesc, ref->len); ctx->ref->bytes_on_board += ref->len; #endif #else #if REORDER_REF size_t refpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_ref_array), &refpitch, ref->pitch * sizeof(char), numrows); CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_ref_array), refpitch, ref->h_ref_array, ref->pitch , ref->pitch * sizeof(char), numrows, cudaMemcpyHostToDevice)); ctx->ref->bytes_on_board += numrows * ref->pitch; #else if (unified) { ref->d_ref_array = ref->str; } else { CUDA_MALLOC( (void**)(&ref->d_ref_array), ref->len); CUDA_SAFE_CALL( cudaMemcpy( (void*)(ref->d_ref_array), ref->str, ref->len, cudaMemcpyHostToDevice) ); } ctx->ref->bytes_on_board += ref->len; #endif #endif TIMESTAMP(t1); ctx->statistics.t_ref_str_to_board += ELAPSED(t0, t1); } else { ref->d_ref_array = NULL; } } void unloadReferenceString(Reference* ref) { #if REFTEX CUDA_SAFE_CALL(cudaUnbindTexture( reftex ) ); #endif #if REORDER_REF && REFTEX CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_array))); #else CUDA_SAFE_CALL(cudaFree((ref->d_ref_array))); #endif ref->d_ref_array = NULL; } void unloadReferenceTree(MatchContext* ctx) { Reference* ref = ctx->ref; #if REORDER_TREE // Unload nodetex #if NODETEX CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) ); CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array))); #else CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array)); #endif ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) ); CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array))); #else CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array)); #endif } ref->d_children_tex_array = NULL; #else #if NODETEX CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) ); #endif CUDA_SAFE_CALL(cudaFree(ref->d_node_tex_array)); ref->d_node_tex_array = NULL; // Unload childrentex if (ref->d_children_tex_array) { #if CHILDTEX CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) ); #endif CUDA_SAFE_CALL(cudaFree(ref->d_children_tex_array)); ref->d_children_tex_array = NULL; } #endif #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(cudaFree(ref->d_node_hist)); ref->d_node_hist = NULL; CUDA_SAFE_CALL(cudaFree(ref->d_child_hist)); ref->d_child_hist = NULL; #endif } //loads a tree and text for [begin, end) in the reference void loadReference(MatchContext* ctx, bool unified) { Reference* ref = ctx->ref; ref->bytes_on_board = 0; loadReferenceTexture(ctx, unified); if (!ctx->on_cpu) { TIMESTAMP(t0); // node texels ref->bytes_on_board += ref->tex_width * ref->tex_node_height * (sizeof(PixelOfNode)); // children texels ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren); #if REORDER_TREE #if NODETEX cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_node_tex_array), &nodeTextureDesc, ref->tex_width, ref->tex_node_height ); CUDA_SAFE_CALL( cudaMemcpyToArray( (cudaArray*)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array, ref->tex_width * ref->tex_node_height * sizeof(PixelOfNode), cudaMemcpyHostToDevice)); nodetex.addressMode[0] = cudaAddressModeClamp; nodetex.addressMode[1] = cudaAddressModeClamp; nodetex.filterMode = cudaFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(nodetex, (cudaArray*)ref->d_node_tex_array, nodeTextureDesc); #else size_t nodepitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_node_tex_array), &nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height ); CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_node_tex_array), nodepitch, ref->h_node_tex_array, nodepitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_node_height, cudaMemcpyHostToDevice)); #endif if (ref->tex_children_height) { #if CHILDTEX cudaChannelFormatDesc childrenTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); CUDA_MALLOC_ARRAY( (cudaArray**)(&ref->d_children_tex_array), &childrenTextureDesc, ref->tex_width, ref->tex_children_height ); CUDA_SAFE_CALL( cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array), 0, 0, ref->h_children_tex_array, ref->tex_width * ref->tex_children_height * sizeof(PixelOfChildren), cudaMemcpyHostToDevice)); childrentex.addressMode[0] = cudaAddressModeClamp; childrentex.addressMode[1] = cudaAddressModeClamp; childrentex.filterMode = cudaFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX_ARRAY(childrentex, (cudaArray*)(ref->d_children_tex_array), childrenTextureDesc); #else size_t childpitch; CUDA_MALLOC_PITCH( (void**)(&ref->d_children_tex_array), &childpitch, ref->tex_width * sizeof(PixelOfChildren), ref->tex_children_height ); CUDA_SAFE_CALL( cudaMemcpy2D((ref->d_children_tex_array), childpitch, ref->h_children_tex_array, childpitch, ref->tex_width * sizeof(PixelOfNode), ref->tex_children_height, cudaMemcpyHostToDevice)); #endif } #if TREE_ACCESS_HISTOGRAM // node hist ref->bytes_on_board += ref->tex_width * ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_width * ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0, ref->tex_width * ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { // children hist ref->bytes_on_board += ref->tex_width * ref->tex_children_height * sizeof(int); fprintf(stderr, "after child_hist ref->bytes_on_board:%lu\n", ref->bytes_on_board); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_width * ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0, ref->tex_width * ref->tex_children_height * sizeof(int))); } #endif #else // NO TREE REORDERING // Node tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_node_tex_array), ref->tex_node_height * sizeof(PixelOfNode)); CUDA_SAFE_CALL( cudaMemcpy( (ref->d_node_tex_array), ref->h_node_tex_array, ref->tex_node_height * sizeof(PixelOfNode), cudaMemcpyHostToDevice)); #if NODETEX cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); nodetex.addressMode[0] = cudaAddressModeClamp; nodetex.filterMode = cudaFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, nodetex, (void*)(ref->d_node_tex_array), nodeTextureDesc, ref->tex_node_height* sizeof(PixelOfNode)); #endif if (ref->tex_children_height) { // Child tex, 1-dimensional CUDA_MALLOC( (void**)(&ref->d_children_tex_array), ref->tex_children_height * sizeof(PixelOfChildren)); CUDA_SAFE_CALL( cudaMemcpy( (ref->d_children_tex_array), ref->h_children_tex_array, ref->tex_children_height * sizeof(PixelOfChildren), cudaMemcpyHostToDevice)); #if CHILDTEX cudaChannelFormatDesc childTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); childrentex.addressMode[0] = cudaAddressModeClamp; childrentex.filterMode = cudaFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates BIND_TEX(0, childrentex, (void*)(ref->d_children_tex_array), childTextureDesc, ref->tex_children_height* sizeof(PixelOfChildren)); #endif } #if TREE_ACCESS_HISTOGRAM ref->bytes_on_board += ref->tex_node_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_node_hist), ref->tex_node_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_node_hist),0, ref->tex_node_height * sizeof(int))); if (ref->tex_children_height) { ref->bytes_on_board += ref->tex_children_height * sizeof(int); CUDA_MALLOC( (void**)(&ref->d_child_hist), ref->tex_children_height *sizeof(int)); CUDA_SAFE_CALL( cudaMemset((ref->d_child_hist),0, ref->tex_children_height * sizeof(int))); } #endif #endif #if TWO_LEVEL_NODE_TREE PixelOfNode node_buf[NODE_THRESH]; memset(node_buf, 0, sizeof(node_buf)); for (unsigned int i = 0; (i < NODE_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[loc]; #elif MERGETEX node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x*2]; #else node_buf[i]= ((PixelOfNode*)(ref->h_node_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( cudaMemcpyToSymbol(node_tree_top, node_buf, sizeof(node_buf))); #endif #if TWO_LEVEL_CHILD_TREE PixelOfChildren child_buf[CHILD_THRESH]; memset(child_buf, 0, sizeof(child_buf)); for (unsigned int i = 0; (i < CHILD_THRESH) && (i < ref->num_nodes); ++i) { TextureAddress myaddress(id2addr(i)); #if MERGETEX && REORDER_TREE myaddress.x &= 0x7FF; myaddress.x *= 2; int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[loc+1]; #elif REORDER_TREE int loc = myaddress.x + myaddress.y*MAX_TEXTURE_DIMENSION; child_buf[i]= ((PixelOfChildren*)(ref->h_children))[loc]; #elif MERGETEX child_buf[i]= ((PixelOfChildren*)(ref->h_node_tex_array))[myaddress.x*2+1]; #else child_buf[i]= ((PixelOfChildren*)(ref->h_children_tex_array))[myaddress.x]; #endif } CUDA_SAFE_CALL( cudaMemcpyToSymbol(child_tree_top, child_buf, sizeof(child_buf))); #endif TIMESTAMP(t1); ctx->statistics.t_tree_to_board += ELAPSED(t0, t1); fprintf(stderr, "done\n"); } else { ref->d_node_tex_array = NULL; ref->d_children_tex_array = NULL; } } void dumpQueryBlockInfo(QuerySet* queries) { fprintf(stderr, "\tProcessing queries %s to %s\n", queries->h_names[0], queries->h_names[queries->count-1]); } void loadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; queries->bytes_on_board = 0; unsigned int numQueries = queries->count; if (!ctx->on_cpu) { fprintf(stderr, "Allocating device memory for queries... "); TIMESTAMP(t0); dumpQueryBlockInfo(queries); CUDA_MALLOC((void**) &queries->d_tex_array, queries->texlen); \ queries->bytes_on_board += queries->texlen; CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_tex_array, queries->h_tex_array + queries->h_addrs_tex_array[0], queries->texlen, cudaMemcpyHostToDevice)); #if QRYTEX qrytex.addressMode[0] = cudaAddressModeClamp; qrytex.filterMode = cudaFilterModePoint; qrytex.normalized = false; // access with normalized texture coordinates cudaChannelFormatDesc qryDesc = cudaCreateChannelDesc(8,0,0,0, cudaChannelFormatKindUnsigned); BIND_TEX(0, qrytex, (void*)(queries->d_tex_array), qryDesc, queries->texlen); #endif CUDA_MALLOC((void**) &queries->d_addrs_tex_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_addrs_tex_array, queries->h_addrs_tex_array, numQueries * sizeof(int), cudaMemcpyHostToDevice)); CUDA_MALLOC((void**) &queries->d_lengths_array, numQueries * sizeof(int)); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpy((void*) queries->d_lengths_array, queries->h_lengths_array, numQueries * sizeof(int), cudaMemcpyHostToDevice)); TIMESTAMP(t1); ctx->statistics.t_queries_to_board += ELAPSED(t0, t1); fprintf(stderr, "\tallocated %ld bytes\n", queries->bytes_on_board); } else { queries->d_addrs_tex_array = NULL; queries->d_tex_array = NULL; queries->d_lengths_array = NULL; fprintf(stderr, " allocated %ld bytes\n", 2 * numQueries*sizeof(int) + queries->texlen); } } void unloadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; CUDA_SAFE_CALL(cudaFree(queries->d_tex_array)); queries->d_tex_array = NULL; CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array)); queries->d_addrs_tex_array = NULL; CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array)); queries->d_lengths_array = NULL; queries->bytes_on_board = 0; } // Computes the location of the first MatchCoord for a given query. NOTE: // Do NOT use this function if COALESCED_QUERIES == 1 inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) { return qry_addrs - qryid * (match_length + 1); } // Construct the offset table for a set of queries. This table will be used // by the printing functions, and if COALESCED_QUERIES == 1, by the matching // kernel. void buildCoordOffsetArray(MatchContext* ctx, int** h_coord_offset_array, unsigned int* num_coords) { int numCoords = 0; int match_length = ctx->min_match_length; int numQueries = ctx->queries->count; int* lengths = ctx->queries->h_lengths_array; int* coord_offsets = (int*)calloc(numQueries, sizeof(int)); #if COALESCED_QUERIES for (unsigned int i = 0; i < numQueries; i += WARP_SIZE) { // Every query in this warp will need at least this many coords int max_num_coords = 0; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { int num_coords = lengths[i + j] - match_length + 1; if ( max_num_coords < num_coords) max_num_coords = num_coords; } unsigned int block_size = max_num_coords * WARP_SIZE; for (unsigned int j = 0; j < WARP_SIZE && (i + j) < numQueries; ++j) { ctx->results.h_coord_tex_array[i + j] = numCoords + j; } numCoords += block_size; } #else for (unsigned int i = 0; i < numQueries; ++i) { int qryoffset = ctx->queries->h_addrs_tex_array[i]; coord_offsets[i] = match_coord_addrs(i, qryoffset, match_length); } if (numQueries > 0) { unsigned int last_qry = numQueries - 1; unsigned int last_qry_len = lengths[last_qry] - match_length + 1; numCoords = coord_offsets[last_qry] + last_qry_len; fprintf(stderr, "Need %d match coords for this result array\n", numCoords); } #endif *num_coords = numCoords; *h_coord_offset_array = coord_offsets; } void loadResultBuffer(MatchContext* ctx) { unsigned int numQueries = ctx->queries->count; assert (numQueries); TIMESTAMP(t0); buildCoordOffsetArray(ctx, &(ctx->results.h_coord_tex_array), &(ctx->results.numCoords)); TIMESTAMP(t1); ctx->statistics.t_build_coord_offsets += ELAPSED(t0, t1); unsigned int numCoords = ctx->results.numCoords; fprintf(stderr, "Allocating result array for %d queries (%lu bytes) ...", numQueries, numCoords*sizeof(MatchCoord) ); size_t boardFreeMemory = 0; size_t total_mem = 0; boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr,"board free memory: %lu total memory: %lu\n", boardFreeMemory, total_mem); ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord)); if (ctx->results.h_match_coords == NULL) { trap_dbg(); exit(EXIT_FAILURE); } if (!ctx->on_cpu) { TIMESTAMP(t2); ctx->results.bytes_on_board = 0; CUDA_MALLOC( (void**) &ctx->results.d_match_coords, numCoords * sizeof(MatchCoord)); ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord); CUDA_SAFE_CALL( cudaMemset( (void*)ctx->results.d_match_coords, 0, numCoords * sizeof(MatchCoord))); #if COALESCED_QUERIES CUDA_MALLOC((void**) &ctx->results.d_coord_tex_array, numQueries * sizeof(int)); ctx->results.bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpy((void*) ctx->results.d_coord_tex_array, ctx->results.h_coord_tex_array, numQueries * sizeof(int), cudaMemcpyHostToDevice)); #endif TIMESTAMP(t3); ctx->statistics.t_match_coords_to_board += ELAPSED(t2, t3); } else { ctx->results.d_match_coords = NULL; } fprintf(stderr, "done\n"); } void unloadResultBuffer(MatchContext* ctx) { CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords)); ctx->results.d_match_coords = NULL; ctx->results.bytes_on_board = 0; #if COALESCED_QUERIES CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords)); #endif } void transferResultsFromDevice(MatchContext* ctx) { if (!ctx->on_cpu) { TIMESTAMP(t0); CUDA_SAFE_CALL(cudaMemcpy(ctx->results.h_match_coords, ctx->results.d_match_coords, ctx->results.numCoords * sizeof(MatchCoord), cudaMemcpyDeviceToHost) ); #if TREE_ACCESS_HISTOGRAM CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_node_hist, ctx->ref->d_node_hist, ctx->ref->tex_node_height * ctx->ref->tex_width * sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL(cudaMemcpy(ctx->ref->h_child_hist, ctx->ref->d_child_hist, ctx->ref->tex_children_height * ctx->ref->tex_width * sizeof(int), cudaMemcpyDeviceToHost) ); if (ctx->statistics.node_hist_size < ctx->ref->tex_width * ctx->ref->tex_node_height) { int* temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_node_height, sizeof(int)); if (ctx->statistics.node_hist_size) memcpy(temp, ctx->statistics.node_hist, ctx->statistics.node_hist_size * sizeof(int)); ctx->statistics.node_hist = temp; ctx->statistics.node_hist_size = ctx->ref->tex_width * ctx->ref->tex_node_height; } if (ctx->statistics.child_hist_size < ctx->ref->tex_width * ctx->ref->tex_children_height) { temp = (int*)calloc(ctx->ref->tex_width * ctx->ref->tex_children_height, sizeof(int)); if (ctx->statistics.hist_size) memcpy(temp, ctx->statistics.child_hist, ctx->statistics.hist_size * sizeof(int)); ctx->statistics.child_hist = temp; ctx->statistics.child_hist_size = ctx->ref->tex_width * ctx->ref->tex_children_height; } for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { ctx->statistics.node_hist[i] += ctx->ref->h_node_hist[i]; } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { ctx->statistics.child_hist[i] += ctx->ref->h_child_hist[i]; } #endif TIMESTAMP(t1); ctx->statistics.t_match_coords_from_board += ELAPSED(t0, t1); } } int flushOutput(); int addToBuffer(char* string); char numbuffer[32]; MatchCoord* coordForQueryChar(MatchContext* ctx, unsigned int qryid, unsigned int qrychar) { MatchResults* results = &(ctx->results); MatchCoord* coords = results->h_match_coords; #if COALESCED_QUERIES return coords + results->h_coord_tex_array[qryid] + qrychar * WARP_SIZE; #else return coords + results->h_coord_tex_array[qryid] + qrychar; #endif } void coordsToPrintBuffers(MatchContext* ctx, ReferencePage* page, MatchInfo** matches, Alignment** alignments, unsigned int mem_avail, unsigned int* coord_idx, unsigned int* match_idx, unsigned int* align_idx, unsigned int* nextqry, unsigned int* nextqrychar, bool unified) { unsigned int numQueries = ctx->queries->count; int match_length = ctx->min_match_length; unsigned int cidx = *coord_idx; unsigned int midx = 0; unsigned int numCoords = ctx->results.numCoords; unsigned int numMatches = 0; unsigned int numAlignments = 0; int DEBUG = 0; if (DEBUG && cidx == 0) { for (int j = 0; j < numCoords; ++j) { MatchCoord * coord = ctx->results.h_match_coords+j; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { //fprintf(stdout, "node: %d\n", // coord->node); fprintf(stdout, "node: %d leaves:%d\n", coord->node.data, lookupNumLeaves(page, coord->node)); } } exit(0); } // How much can we fit into mem_avail? for (int j = cidx; j < numCoords; ++j) { MatchCoord* coord = ctx->results.h_match_coords + j; int queryAlignments = 0; int queryMatches = 0; if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { int numLeaves = lookupNumLeaves(page, coord->node); queryAlignments += numLeaves; queryMatches++; } int allMatches = numMatches + queryMatches; int allAlignments = numAlignments + queryAlignments; int neededSize = allMatches * sizeof(MatchInfo) + allAlignments * sizeof(Alignment); if (neededSize > mem_avail || (allMatches/BLOCKSIZE) >= MAX_GRID_DIMENSION) { // adding this match won't fit on the board break; } ++cidx; numMatches = allMatches; numAlignments = allAlignments; } MatchInfo* M; if (unified) { cudaMallocManaged(&M, numMatches * sizeof(MatchInfo)); memset(M, 0, numMatches * sizeof(MatchInfo)); } else { M = (MatchInfo*)calloc(numMatches, sizeof(MatchInfo)); } unsigned int alignmentOffset = 0; int qry = *nextqry; int qrychar = *nextqrychar; bool set_full = false; while (qry < numQueries) { // h_lengths_array doesn't count the 'q' at the beginning of each query int qlen = ctx->queries->h_lengths_array[qry] + 1 - match_length; while (qrychar < qlen) { if (midx >= numMatches) { set_full = true; break; } MatchCoord* coord = coordForQueryChar(ctx, qry, qrychar); if (coord->node.data > 0 && !(coord->edge_match_length & FRMASK)) { MatchInfo m; m.resultsoffset = alignmentOffset; m.qrystartpos = qrychar; m.matchnode = coord->node; m.edgematch = coord->edge_match_length; m.numLeaves = lookupNumLeaves(page, m.matchnode); m.queryid = qry; alignmentOffset += m.numLeaves; M[midx++] = m; } ++qrychar; } if (set_full) break; ++qry; qrychar = 0; } *coord_idx = cidx; *match_idx = midx; *align_idx = alignmentOffset; *matches = M; *nextqry = qry; *nextqrychar = qrychar; fprintf(stderr, "Allocing %lu bytes of host memory for %u alignments\n", alignmentOffset * sizeof(Alignment), numAlignments); if (unified) { cudaMallocManaged(alignments, alignmentOffset * sizeof(Alignment)); memset(alignments, 0, alignmentOffset * sizeof(Alignment)); } else { *alignments = (struct Alignment *) calloc(alignmentOffset, sizeof(Alignment)); } //cudaMallocHost((void**)alignments, numAlignments * sizeof(Alignment)); } void runPrintKernel(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments, bool unified) { MatchInfo* d_matches; size_t matchesSize = numMatches * sizeof(MatchInfo); if (!unified) { CUDA_MALLOC(&d_matches, matchesSize); } struct Alignment* d_alignments; size_t alignmentSize = numAlignments * sizeof(Alignment); if (!unified) { CUDA_MALLOC(&d_alignments, alignmentSize); CUDA_SAFE_CALL(cudaMemset((void*) d_alignments, 0, alignmentSize)); } TIMESTAMP(t0); // Copy matches to card fprintf(stderr, "prepared %d matches %d alignments\n", numMatches, numAlignments); fprintf(stderr, "Copying %lu bytes to host memory for %d alignments\n", numAlignments * sizeof(Alignment), numAlignments); int DEBUG = 0; if (DEBUG) { for (int i = 0; i < numMatches; i++) { printf("m[%d]:\t%d\t%d\t%d\t%d\t%d\t%d\n", i, h_matches[i].resultsoffset, h_matches[i].queryid, h_matches[i].matchnode.data, h_matches[i].numLeaves, h_matches[i].edgematch, h_matches[i].qrystartpos); } exit(0); } if (unified) { d_matches = h_matches; d_alignments = alignments; } else { CUDA_SAFE_CALL(cudaMemcpy(d_matches, h_matches, matchesSize, cudaMemcpyHostToDevice)); } TIMESTAMP(t1); long long mtime = ELAPSED(t0, t1); // Launch the kernel int blocksize = (numMatches > BLOCKSIZE) ? BLOCKSIZE : numMatches; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numMatches / (float)BLOCKSIZE), 1, 1); fprintf(stderr, " Calling print kernel... "); printKernel <<< dimGrid, dimBlock, 0 >>> (d_matches, numMatches, d_alignments, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)ctx->ref->d_node_tex_array, #endif #if !CHILDTEX (_PixelOfChildren*)ctx->ref->d_children_tex_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, page->begin, page->end, page->shadow_left, page->shadow_right, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); cudaThreadSynchronize(); cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } TIMESTAMP(t2); // Copy the results back to the host if (!unified) { CUDA_SAFE_CALL(cudaMemcpy((void*)alignments, (void*)d_alignments, alignmentSize, cudaMemcpyDeviceToHost)); } cudaThreadSynchronize(); TIMESTAMP(t3); long long atime = ELAPSED(t2, t3); fprintf(stderr, "memcpy time= %lld\n", atime + mtime); // Cleanup if (!unified) { CUDA_SAFE_CALL(cudaFree(d_alignments)); CUDA_SAFE_CALL(cudaFree(d_matches)); } } // TODO: need reverse-complement printing support void runPrintOnCPU(MatchContext* ctx, ReferencePage* page, MatchInfo* h_matches, unsigned int numMatches, Alignment* alignments, unsigned int numAlignments) { unsigned int min_match_length = ctx->min_match_length; int* addrs = ctx->queries->h_addrs_tex_array; int* lengths = ctx->queries->h_lengths_array; char* qrychars = ctx->queries->h_tex_array; if (!numMatches) return; int qry = -1; unsigned int qrylen; for (int i = 0; i < numMatches; ++i) { MatchInfo& match = h_matches[i]; if (match.queryid != qry) { qry = match.queryid; qrylen = lengths[qry]; } if (!(match.edgematch & FRMASK)) { printAlignments(page, alignments + match.resultsoffset, #if COALESCED_QUERIES qrychars + sizeof(int) * addrs[qry], #else qrychars + addrs[qry], #endif qrylen, match.matchnode, match.qrystartpos, match.edgematch, min_match_length, 0, ctx->forwardcoordinates); } } } int addMatchToBuffer(int left_in_ref, int qrypos, int matchlen); void getExactAlignments(MatchContext * ctx, ReferencePage * page, bool on_cpu, bool unified) { assert(!ctx->reverse && !ctx->forwardreverse); size_t boardFreeMemory; size_t total_mem; if (!on_cpu) { boardMemory(&boardFreeMemory, &total_mem); fprintf(stderr, "board free memory: %lu total memory: %lu\n", boardFreeMemory, total_mem); } else { boardFreeMemory = 256 * 1024 * 1024; total_mem = boardFreeMemory; } #ifdef __DEVICE_EMULATION__ boardFreeMemory = 512 * 1024 * 1024; #endif boardFreeMemory -= BREATHING_ROOM; fprintf(stderr, "board free memory: %lu\n", boardFreeMemory); int rTotalMatches = 0; int rTotalAlignments = 0; int totalRounds = 0; unsigned int last_coord = ctx->results.numCoords; unsigned int next_coord = 0; unsigned int nextqry = 0; unsigned int nextqrychar = 0; int lastqry = -1; while (next_coord < last_coord) { // see how many queries will fit on the board totalRounds++; unsigned int numMatches = 0; unsigned int numAlignments = 0; MatchInfo* h_matches = NULL; Alignment* h_alignments = NULL; int coord_left = next_coord; TIMESTAMP(t0); coordsToPrintBuffers(ctx, page, &h_matches, &h_alignments, boardFreeMemory, &next_coord, &numMatches, &numAlignments, &nextqry, &nextqrychar, unified); TIMESTAMP(t1); long long btime = ELAPSED(t0, t1); ctx->statistics.t_coords_to_buffers += btime; fprintf(stderr, "buffer prep time= %lld\n", btime); fprintf(stderr, "Round %d: Printing results for match coords [%d-%d) of %d using %d matches and %d alignments\n", totalRounds, coord_left, next_coord, last_coord, numMatches, numAlignments); if (numMatches == 0) continue; char buf[256]; //assert(qryend > qrystart); rTotalAlignments += numAlignments; rTotalMatches += numMatches; if (num_bind_tex_calls > 100) { cudaThreadExit(); num_bind_tex_calls = 0; loadReference(ctx, unified); loadQueries(ctx); } TIMESTAMP(t2); if (on_cpu) { runPrintOnCPU(ctx, page, h_matches, numMatches, h_alignments, numAlignments); } else { runPrintKernel(ctx, page, h_matches, numMatches, h_alignments, numAlignments, unified); } TIMESTAMP(t3); long long ktime = ELAPSED(t2, t3); ctx->statistics.t_print_kernel += ktime; fprintf(stderr, "print kernel time= %lld\n", ktime); //Process the alignments for (int m = 0; m < numMatches; m++) { int base = h_matches[m].resultsoffset; for (int i = 0; i < h_matches[m].numLeaves; i++) { // See if there are any more left maximal alignments for this match if (h_alignments[base+i].left_in_ref == 0) { break; } if (h_matches[m].queryid != lastqry) { lastqry = h_matches[m].queryid; //addToBuffer("> "); //addToBuffer(*(ctx->queries->h_names + lastqry)); //addToBuffer("\n"); } //sprintf(buf, "%d\t%d\t%d\n", // h_alignments[base+i].left_in_ref, // h_matches[m].qrystartpos + 1, // h_alignments[base+i].matchlen); //addToBuffer(buf); // addMatchToBuffer(h_alignments[base+i].left_in_ref, // h_matches[m].qrystartpos + 1, // h_alignments[base+i].matchlen); } } //flushOutput(); TIMESTAMP(t4); ctx->statistics.t_results_to_disk += ELAPSED(t3, t4); free(h_matches); free(h_alignments); } free(ctx->results.h_coord_tex_array); free(ctx->results.h_match_coords); ctx->results.h_coord_tex_array = NULL; ctx->results.h_match_coords = NULL; fprintf(stderr, "Finished processing %d matches and %d potential alignments in %d rounds\n", rTotalMatches, rTotalAlignments, totalRounds); } int getQueryBlock(MatchContext* ctx, size_t device_mem_avail) { QuerySet* queries = ctx->queries; char * queryTex = NULL; int* queryAddrs = NULL; int* queryLengths = NULL; unsigned int numQueries; unsigned int num_match_coords; size_t queryLen; char** names; fprintf(stderr, "Loading query block... "); TIMESTAMP(t0); getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names, &queryLengths, &numQueries, &num_match_coords, device_mem_avail, ctx->min_match_length, ctx->reverse || ctx->forwardreverse); TIMESTAMP(t1); ctx->statistics.t_queries_from_disk += ELAPSED(t0, t1); queries->h_tex_array = queryTex; queries->count = numQueries; queries->h_addrs_tex_array = queryAddrs; queries->texlen = queryLen; queries->h_names = names; queries->h_lengths_array = queryLengths; ctx->results.numCoords = num_match_coords; fprintf(stderr, "done.\n"); return numQueries; } void destroyQueryBlock(QuerySet* queries) { free(queries->h_tex_array); queries->h_tex_array = NULL; for (int i = 0; i < queries->count; ++i) free(queries->h_names[i]); free(queries->h_names); queries->count = 0; queries->texlen = 0; free(queries->h_addrs_tex_array); queries->h_addrs_tex_array = NULL; free(queries->h_lengths_array); queries->h_lengths_array = NULL; } void resetStats(Statistics* stats) { stats->t_end_to_end = 0.0; stats->t_match_kernel = 0.0; stats->t_print_kernel = 0.0; stats->t_queries_to_board = 0.0; stats->t_match_coords_to_board = 0.0; stats->t_match_coords_from_board = 0.0; stats->t_tree_to_board = 0.0; stats->t_ref_str_to_board = 0.0; stats->t_queries_from_disk = 0.0; stats->t_ref_from_disk = 0.0; stats->t_results_to_disk = 0.0; stats->t_tree_construction = 0.0; stats->t_tree_reorder = 0.0; stats->t_tree_flatten = 0.0; stats->t_reorder_ref_str = 0.0; stats->t_build_coord_offsets = 0.0; stats->t_coords_to_buffers = 0.0; stats->bp_avg_query_length = 0.0; #if TREE_ACCESS_HISTOGRAM if (stats->node_hist_size) { free(stats->node_hist); stats->node_hist = NULL; stats->node_hist_size = 0; } if (stats->child_hist_size) { free(stats->child_hist); stats->child_hist = NULL; stats->child_hist_size = 0; } #endif } void writeStatisticsFile(Statistics* stats, char* stats_filename, char* node_hist_filename = NULL, char* child_hist_filename = NULL) { if (stats_filename) { FILE* f = fopen(stats_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename); } else { fprintf(f, "Q"); fprintf(f, ",R"); fprintf(f, ",T"); fprintf(f, ",m"); fprintf(f, ",r"); fprintf(f, ",t"); fprintf(f, ",n"); fprintf(f, ",Total"); fprintf(f, ",Match kernel"); fprintf(f, ",Print Kernel"); fprintf(f, ",Queries to board"); fprintf(f, ",Match coords to board"); fprintf(f, ",Match coords from board"); fprintf(f, ",Tree to board"); fprintf(f, ",Ref str to board"); fprintf(f, ",Queries from disk"); fprintf(f, ",Ref from disk"); fprintf(f, ",Output to disk"); fprintf(f, ",Tree construction"); fprintf(f, ",Tree reorder"); fprintf(f, ",Tree flatten"); fprintf(f, ",Ref reorder"); fprintf(f, ",Build coord table"); fprintf(f, ",Coords to buffers"); fprintf(f, ",Avg qry length"); fprintf(f, "\n"); fprintf(f, "%d", QRYTEX); fprintf(f, ",%d", REFTEX); fprintf(f, ",%d", TREETEX); fprintf(f, ",%d", MERGETEX); fprintf(f, ",%d", REORDER_REF); fprintf(f, ",%d", REORDER_TREE); fprintf(f, ",%d", RENUMBER_TREE); fprintf(f, ",%f", stats->t_end_to_end * 1e-6); fprintf(f, ",%f", stats->t_match_kernel * 1e-6); fprintf(f, ",%f", stats->t_print_kernel * 1e-6); fprintf(f, ",%f", stats->t_queries_to_board * 1e-6); fprintf(f, ",%f", stats->t_match_coords_to_board * 1e-6); fprintf(f, ",%f", stats->t_match_coords_from_board * 1e-6); fprintf(f, ",%f", stats->t_tree_to_board * 1e-6); fprintf(f, ",%f", stats->t_ref_str_to_board * 1e-6); fprintf(f, ",%f", stats->t_queries_from_disk * 1e-6); fprintf(f, ",%f", stats->t_ref_from_disk * 1e-6); fprintf(f, ",%f", stats->t_results_to_disk * 1e-6); fprintf(f, ",%f", stats->t_tree_construction * 1e-6); fprintf(f, ",%f", stats->t_tree_reorder * 1e-6); fprintf(f, ",%f", stats->t_tree_flatten * 1e-6); fprintf(f, ",%f", stats->t_reorder_ref_str * 1e-6); fprintf(f, ",%f", stats->t_build_coord_offsets * 1e-6); fprintf(f, ",%f", stats->t_coords_to_buffers * 1e-6); fprintf(f, ",%f", stats->bp_avg_query_length * 1e-6); fprintf(f,"\n"); fclose(f); } } #if TREE_ACCESS_HISTOGRAM if (node_hist_filename) { FILE* f = fopen(node_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", node_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.node_hist[i]); } } if (child_hist_filename) { FILE* f = fopen(child_hist_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", child_hist_filename); } else { for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) fprintf(f, "%d\t%d\n", i, ctx->statistics.child_hist[i]); } } float total_node_hits = 0; float tree_top_node_hits = 0; float total_child_hits = 0; float tree_top_child_hits = 0; for (unsigned int i = 0; i < ctx->statistics.node_hist_size; ++i) { total_node_hits +=ctx->statistics.node_hist[i]; if (i < 256) { tree_top_node_hits += ctx->statistics.node_hist[i]; } } for (unsigned int i = 0; i < ctx->statistics.child_hist_size; ++i) { total_child_hits +=ctx->statistics.child_hist[i]; if (i < 256) { tree_top_child_hits += ctx->statistics.child_hist[i]; } } fprintf(stderr, "Tree top node hits (%d/%d) = %f percent\n",(int)tree_top_node_hits, (int)total_node_hits, tree_top_node_hits /total_node_hits); fprintf(stderr, "Tree top child hits (%d/%d) = %f percent\n",(int)tree_top_child_hits, (int)total_child_hits, tree_top_child_hits /total_child_hits); #endif } void matchOnCPU(MatchContext* ctx, bool doRC) { //TODO: CPU is matching is disabled. if (doRC) { // Match the reverse complement of the queries to the ref computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, FORWARD); } } void matchOnGPU(MatchContext* ctx, bool doRC) { int numQueries = ctx->queries->count; int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries; dim3 dimBlock(blocksize, 1, 1); dim3 dimGrid(ceil(numQueries / (float)BLOCKSIZE), 1, 1); // Match the reverse complement of the queries to the ref if (doRC) { //TODO: GPU RC is disabled mummergpuRCKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } else { mummergpuKernel <<< dimGrid, dimBlock, 0 >>> (ctx->results.d_match_coords, #if COALESCED_QUERIES ctx->results.d_coord_tex_array, #endif #if !QRYTEX #if COALESCED_QUERIES (int*) #endif ctx->queries->d_tex_array, #endif #if !NODETEX (_PixelOfNode*)(ctx->ref->d_node_tex_array), #endif #if !CHILDTEX (_PixelOfChildren*)(ctx->ref->d_children_tex_array), #endif #if !REFTEX (char*)ctx->ref->d_ref_array, #endif ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length #if TREE_ACCESS_HISTOGRAM , ctx->ref->d_node_hist, ctx->ref->d_child_hist #endif ); } // check if kernel execution generated an error cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void getMatchResults(MatchContext* ctx, unsigned int page_num) { transferResultsFromDevice(ctx); } void matchQueryBlockToReferencePage(MatchContext* ctx, ReferencePage* page, bool reverse_complement) { fprintf(stderr, "Memory footprint is:\n\tqueries: %lu\n\tref: %lu\n\tresults: %lu\n", ctx->queries->bytes_on_board, ctx->ref->bytes_on_board, ctx->results.bytes_on_board); TIMESTAMP(t0); if (ctx->on_cpu) { matchOnCPU(ctx, reverse_complement); } else { matchOnGPU(ctx, reverse_complement); cudaThreadSynchronize(); } TIMESTAMP(t1); long long ktime = ELAPSED(t0, t1); ctx->statistics.t_match_kernel += ktime; fprintf(stderr, "match kernel time= %lld\n", ktime); getMatchResults(ctx, page->id); unloadResultBuffer(ctx); } int matchSubset(MatchContext* ctx, ReferencePage* page, bool unified) { loadQueries(ctx); fprintf(stderr, "Matching queries %s - %s against ref coords %d - %d\n", ctx->queries->h_names[0], ctx->queries->h_names[ctx->queries->count - 1], page->begin, page->end); loadResultBuffer(ctx); // TODO: renable RC support by calling this twice /w reverse/fwdreverse // idiom. matchQueryBlockToReferencePage(ctx, page, false); if (USE_PRINT_KERNEL && !ctx->on_cpu) { getExactAlignments(ctx, page, false, unified); } else { getExactAlignments(ctx, page, true, unified); } //flushOutput(); unloadQueries(ctx); return 0; } int getFreeDeviceMemory(bool on_cpu) { size_t free_mem = 0; size_t total_mem = 0; // We have to 'prime' CUDA by making an allocation here. cuMemGetInfo // will return zeroes until we do a malloc. int * p = NULL; CUDA_SAFE_CALL(cudaMalloc((void**)&p, sizeof(int))); CUDA_SAFE_CALL(cudaFree(p)); if (!on_cpu) { boardMemory(&free_mem, &total_mem); fprintf(stderr, "board free memory: %lu total memory: %lu\n", free_mem, total_mem); } else { total_mem = free_mem = 804585472; // pretend we are on a 8800 GTX } return free_mem; } int matchQueriesToReferencePage(MatchContext* ctx, ReferencePage* page, bool unified) { fprintf(stderr, "Beginning reference page %p\n", page); int free_mem = getFreeDeviceMemory(ctx->on_cpu); int available_mem = free_mem - page->ref.bytes_on_board - BREATHING_ROOM; ctx->ref = &(page->ref); loadReference(ctx, unified); while (getQueryBlock(ctx, available_mem)) { matchSubset(ctx, page, unified); ctx->statistics.bp_avg_query_length = ctx->queries->texlen / (float)(ctx->queries->count) - 2; destroyQueryBlock(ctx->queries); if (num_bind_tex_calls > 100) { cudaThreadExit(); num_bind_tex_calls = 0; loadReference(ctx, unified); } } unloadReferenceString(ctx->ref); unloadReferenceTree(ctx); lseek(ctx->queries->qfile, 0, SEEK_SET); return 0; } void initReferencePages( MatchContext* ctx , int* num_pages, ReferencePage** pages_out) { unsigned int bases_in_ref = ctx->full_ref_len - 3; unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref; unsigned int num_reference_pages = ceil((bases_in_ref + 0.0) / page_size); fprintf(stderr, "Stream will use %d pages for %d bases, page size = %d\n", num_reference_pages, bases_in_ref, page_size); unsigned int page_overlap = MAX_QUERY_LEN + 1; ReferencePage* pages = (ReferencePage*) calloc(num_reference_pages, sizeof(ReferencePage)); pages[0].begin = 1; pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning pages[0].shadow_left = -1; pages[0].id = 0; for (int i = 1; i < num_reference_pages - 1; ++i) { pages[i].begin = pages[i - 1].end - page_overlap; pages[i].end = pages[i].begin + page_size + page_overlap; pages[i - 1].shadow_right = pages[i].begin; pages[i].shadow_left = pages[i-1].end; pages[i].id = i; } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; pages[last_page].begin = pages[last_page - 1].end - page_overlap; pages[last_page].end = ctx->full_ref_len - 1; pages[last_page - 1].shadow_right = pages[last_page].begin; pages[last_page].shadow_right = -1; pages[last_page].shadow_left = pages[last_page - 1].end; pages[last_page].id = last_page; } *pages_out = pages; *num_pages = num_reference_pages; } int streamReferenceAgainstQueries(MatchContext* ctx, bool unified) { int num_reference_pages = 0; ReferencePage* pages = NULL; initReferencePages(ctx, &num_reference_pages, &pages); buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end, ctx->min_match_length, ctx->dotfilename, ctx->texfilename, &(ctx->statistics), unified); matchQueriesToReferencePage(ctx, &pages[0], unified); destroyReference(&(pages[0].ref)); for (int i = 1; i < num_reference_pages - 1; ++i) { buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics), unified); matchQueriesToReferencePage(ctx, &pages[i], unified); destroyReference(&(pages[i].ref)); } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref, pages[last_page].begin, pages[last_page].end, ctx->min_match_length, NULL, NULL, &(ctx->statistics), unified); matchQueriesToReferencePage(ctx, &pages[last_page], unified); destroyReference(&(pages[last_page].ref)); } free(pages); return 0; } extern "C" int matchQueries(MatchContext* ctx, bool unified) { assert(sizeof(struct PixelOfNode) == sizeof(uint4)); assert(sizeof(struct PixelOfChildren) == sizeof(uint4)); #if TREE_ACCESS_HISTOGRAM ctx->statistics.node_hist_size = 0; ctx->statistics.child_hist_size = 0; #endif resetStats(&(ctx->statistics)); TIMESTAMP(t0); int ret; fprintf(stderr, "Streaming reference pages against all queries\n"); ret = streamReferenceAgainstQueries(ctx, unified); TIMESTAMP(t1); ctx->statistics.t_end_to_end += ELAPSED(t0, t1); writeStatisticsFile(&(ctx->statistics), ctx->stats_file, "node_hist.out", "child_hist.out"); return ret; }
85989753aa4554d8906a7299ca0ac725473cd3eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file KNDemoKernel.cu //---------------------------------------------------------------------------// #include "KNDemoKernel.hh" #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include "base/ArrayUtils.hh" #include "base/Assert.hh" #include "physics/base/ParticleTrackView.hh" #include "physics/base/SecondaryAllocatorView.hh" #include "physics/em/detail/KleinNishinaInteractor.hh" #include "random/cuda/RngEngine.hh" #include "random/distributions/ExponentialDistribution.hh" #include "physics/grid/PhysicsGridCalculator.hh" #include "DetectorView.hh" using namespace celeritas; using celeritas::detail::KleinNishinaInteractor; namespace demo_interactor { namespace { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// /*! * Kernel to initialize particle data. * * For testing purposes (this might not be the case for the final app) we use a * grid-stride loop rather than requiring that each thread correspond exactly * to a particle track. In other words, this method allows a single warp to * operate on two 32-thread chunks of data. * https://developer.nvidia.com/blog/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/ */ __global__ void initialize_kernel(ParamPointers const params, StatePointers const states, InitialPointers const init) { // Grid-stride loop, see for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < static_cast<int>(states.size()); tid += blockDim.x * gridDim.x) { ParticleTrackView particle( params.particle, states.particle, ThreadId(tid)); particle = init.particle; // Particles begin alive and in the +z direction states.direction[tid] = {0, 0, 1}; states.position[tid] = {0, 0, 0}; states.time[tid] = 0; states.alive[tid] = true; } } //---------------------------------------------------------------------------// /*! * Perform a single interaction per particle track. * * The interaction: * - Clears the energy deposition * - Samples the KN interaction * - Allocates and emits a secondary * - Kills the secondary, depositing its local energy * - Applies the interaction (updating track direction and energy) */ __global__ void iterate_kernel(ParamPointers const params, StatePointers const states, SecondaryAllocatorPointers const secondaries, DetectorPointers const detector) { SecondaryAllocatorView allocate_secondaries(secondaries); DetectorView detector_hit(detector); PhysicsGridCalculator calc_xs(params.xs); for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < static_cast<int>(states.size()); tid += blockDim.x * gridDim.x) { // Skip loop if already dead if (!states.alive[tid]) { continue; } // Construct particle accessor from immutable and thread-local data ParticleTrackView particle( params.particle, states.particle, ThreadId(tid)); RngEngine rng(states.rng, ThreadId(tid)); // Move to collision { // Calculate cross section at the particle's energy real_type sigma = calc_xs(particle.energy()); ExponentialDistribution<real_type> sample_distance(sigma); // Sample distance-to-collision real_type distance = sample_distance(rng); // Move particle axpy(distance, states.direction[tid], &states.position[tid]); // Update time states.time[tid] += distance * unit_cast(particle.speed()); } Hit h; h.pos = states.position[tid]; h.thread = ThreadId(tid); h.time = states.time[tid]; if (particle.energy() < units::MevEnergy{0.01}) { // Particle is below interaction energy h.dir = states.direction[tid]; h.energy_deposited = particle.energy(); // Deposit energy and kill detector_hit(h); states.alive[tid] = false; continue; } // Construct RNG and interaction interfaces KleinNishinaInteractor interact(params.kn_interactor, particle, states.direction[tid], allocate_secondaries); // Perform interaction: should emit a single particle (an electron) Interaction interaction = interact(rng); CELER_ASSERT(interaction); CELER_ASSERT(interaction.secondaries.size() == 1); // Deposit energy from the secondary (effectively, an infinite energy // cutoff) { const auto& secondary = interaction.secondaries.front(); h.dir = secondary.direction; h.energy_deposited = secondary.energy; detector_hit(h); } // Update post-interaction state (apply interaction) states.direction[tid] = interaction.direction; particle.energy(interaction.energy); } } } // namespace //---------------------------------------------------------------------------// // KERNEL INTERFACES //---------------------------------------------------------------------------// /*! * Initialize particle states. */ void initialize(const CudaGridParams& grid, const ParamPointers& params, const StatePointers& states, const InitialPointers& initial) { CELER_EXPECT(states.alive.size() == states.size()); CELER_EXPECT(states.rng.size() == states.size()); hipLaunchKernelGGL(( initialize_kernel), dim3(grid.grid_size), dim3(grid.block_size), 0, 0, params, states, initial); } //---------------------------------------------------------------------------// /*! * Run an iteration. */ void iterate(const CudaGridParams& grid, const ParamPointers& params, const StatePointers& state, const SecondaryAllocatorPointers& secondaries, const celeritas::DetectorPointers& detector) { hipLaunchKernelGGL(( iterate_kernel), dim3(grid.grid_size), dim3(grid.block_size), 0, 0, params, state, secondaries, detector); // Note: the device synchronize is useful for debugging and necessary for // timing diagnostics. CELER_CUDA_CALL(hipDeviceSynchronize()); } //---------------------------------------------------------------------------// /*! * Sum the total number of living particles. */ size_type reduce_alive(Span<bool> alive) { size_type result = thrust::reduce( thrust::device_pointer_cast(alive.data()), thrust::device_pointer_cast(alive.data() + alive.size()), size_type(0), thrust::plus<size_type>()); CELER_CUDA_CALL(hipDeviceSynchronize()); return result; } //---------------------------------------------------------------------------// } // namespace demo_interactor
85989753aa4554d8906a7299ca0ac725473cd3eb.cu
//---------------------------------*-CUDA-*----------------------------------// // Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file KNDemoKernel.cu //---------------------------------------------------------------------------// #include "KNDemoKernel.hh" #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include "base/ArrayUtils.hh" #include "base/Assert.hh" #include "physics/base/ParticleTrackView.hh" #include "physics/base/SecondaryAllocatorView.hh" #include "physics/em/detail/KleinNishinaInteractor.hh" #include "random/cuda/RngEngine.hh" #include "random/distributions/ExponentialDistribution.hh" #include "physics/grid/PhysicsGridCalculator.hh" #include "DetectorView.hh" using namespace celeritas; using celeritas::detail::KleinNishinaInteractor; namespace demo_interactor { namespace { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// /*! * Kernel to initialize particle data. * * For testing purposes (this might not be the case for the final app) we use a * grid-stride loop rather than requiring that each thread correspond exactly * to a particle track. In other words, this method allows a single warp to * operate on two 32-thread chunks of data. * https://developer.nvidia.com/blog/cuda-pro-tip-write-flexible-kernels-grid-stride-loops/ */ __global__ void initialize_kernel(ParamPointers const params, StatePointers const states, InitialPointers const init) { // Grid-stride loop, see for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < static_cast<int>(states.size()); tid += blockDim.x * gridDim.x) { ParticleTrackView particle( params.particle, states.particle, ThreadId(tid)); particle = init.particle; // Particles begin alive and in the +z direction states.direction[tid] = {0, 0, 1}; states.position[tid] = {0, 0, 0}; states.time[tid] = 0; states.alive[tid] = true; } } //---------------------------------------------------------------------------// /*! * Perform a single interaction per particle track. * * The interaction: * - Clears the energy deposition * - Samples the KN interaction * - Allocates and emits a secondary * - Kills the secondary, depositing its local energy * - Applies the interaction (updating track direction and energy) */ __global__ void iterate_kernel(ParamPointers const params, StatePointers const states, SecondaryAllocatorPointers const secondaries, DetectorPointers const detector) { SecondaryAllocatorView allocate_secondaries(secondaries); DetectorView detector_hit(detector); PhysicsGridCalculator calc_xs(params.xs); for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < static_cast<int>(states.size()); tid += blockDim.x * gridDim.x) { // Skip loop if already dead if (!states.alive[tid]) { continue; } // Construct particle accessor from immutable and thread-local data ParticleTrackView particle( params.particle, states.particle, ThreadId(tid)); RngEngine rng(states.rng, ThreadId(tid)); // Move to collision { // Calculate cross section at the particle's energy real_type sigma = calc_xs(particle.energy()); ExponentialDistribution<real_type> sample_distance(sigma); // Sample distance-to-collision real_type distance = sample_distance(rng); // Move particle axpy(distance, states.direction[tid], &states.position[tid]); // Update time states.time[tid] += distance * unit_cast(particle.speed()); } Hit h; h.pos = states.position[tid]; h.thread = ThreadId(tid); h.time = states.time[tid]; if (particle.energy() < units::MevEnergy{0.01}) { // Particle is below interaction energy h.dir = states.direction[tid]; h.energy_deposited = particle.energy(); // Deposit energy and kill detector_hit(h); states.alive[tid] = false; continue; } // Construct RNG and interaction interfaces KleinNishinaInteractor interact(params.kn_interactor, particle, states.direction[tid], allocate_secondaries); // Perform interaction: should emit a single particle (an electron) Interaction interaction = interact(rng); CELER_ASSERT(interaction); CELER_ASSERT(interaction.secondaries.size() == 1); // Deposit energy from the secondary (effectively, an infinite energy // cutoff) { const auto& secondary = interaction.secondaries.front(); h.dir = secondary.direction; h.energy_deposited = secondary.energy; detector_hit(h); } // Update post-interaction state (apply interaction) states.direction[tid] = interaction.direction; particle.energy(interaction.energy); } } } // namespace //---------------------------------------------------------------------------// // KERNEL INTERFACES //---------------------------------------------------------------------------// /*! * Initialize particle states. */ void initialize(const CudaGridParams& grid, const ParamPointers& params, const StatePointers& states, const InitialPointers& initial) { CELER_EXPECT(states.alive.size() == states.size()); CELER_EXPECT(states.rng.size() == states.size()); initialize_kernel<<<grid.grid_size, grid.block_size>>>( params, states, initial); } //---------------------------------------------------------------------------// /*! * Run an iteration. */ void iterate(const CudaGridParams& grid, const ParamPointers& params, const StatePointers& state, const SecondaryAllocatorPointers& secondaries, const celeritas::DetectorPointers& detector) { iterate_kernel<<<grid.grid_size, grid.block_size>>>( params, state, secondaries, detector); // Note: the device synchronize is useful for debugging and necessary for // timing diagnostics. CELER_CUDA_CALL(cudaDeviceSynchronize()); } //---------------------------------------------------------------------------// /*! * Sum the total number of living particles. */ size_type reduce_alive(Span<bool> alive) { size_type result = thrust::reduce( thrust::device_pointer_cast(alive.data()), thrust::device_pointer_cast(alive.data() + alive.size()), size_type(0), thrust::plus<size_type>()); CELER_CUDA_CALL(cudaDeviceSynchronize()); return result; } //---------------------------------------------------------------------------// } // namespace demo_interactor
fe429cb8687108879c7057bdf9c7cea5eabbd3ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <iomanip> #include <fstream> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "time.h" #include "Pass.h" #include "constants.h" #include "lattice_PAR.h" using namespace std; void emittance(COORD *y, REAL *PEx, REAL *PEy, REAL *PEdelta) { REAL avg_x=0,avg_xp=0,avg_y=0,avg_yp=0,avg_delta=0,sig_xx=0,sig_xpxp=0,sig_xxp=0,sig_yy=0,sig_ypyp=0,sig_yyp=0,sig_delta=0; for(int i=0;i<_Npart;i++) { avg_x+=y[i].x[x_]/_Npart; avg_xp+=y[i].x[px_]/(1+y[i].x[delta_])/_Npart; avg_y+=y[i].x[y_]/_Npart; avg_yp+=y[i].x[py_]/(1+y[i].x[delta_])/_Npart; avg_delta+=y[i].x[delta_]/_Npart; } for(int i=0;i<_Npart;i++) { sig_xx+=(y[i].x[x_]-avg_x)*(y[i].x[x_]-avg_x)/_Npart; sig_xpxp+=(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_xxp+=(y[i].x[x_]-avg_x)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_yy+=(y[i].x[y_]-avg_y)*(y[i].x[y_]-avg_y)/_Npart; sig_ypyp+=(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_yyp+=(y[i].x[y_]-avg_y)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_delta+=(y[i].x[delta_]-avg_delta)*(y[i].x[delta_]-avg_delta)/_Npart; } *PEx=sqrt(sig_xx*sig_xpxp-sig_xxp*sig_xxp); *PEy=sqrt(sig_yy*sig_ypyp-sig_yyp*sig_yyp); *PEdelta=sqrt(sig_delta); } int main(int argc, char** argv) { clock_t start, finish; start = clock(); //get a bunch of random numbers REAL *queue, *dqueue; queue=(REAL*)malloc(_pool*sizeof(REAL)); hipMalloc(&dqueue,_pool*sizeof(REAL)); ifstream infile1("queue"); for(int i1=0;i1<_pool;i1++) infile1>>queue[i1]; infile1.close(); hipMemcpy(dqueue,queue,_pool*sizeof(REAL),hipMemcpyHostToDevice); free(queue); //initialization const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_default; r = gsl_rng_alloc (T); COORD *part, *dpart; int size = _Npart * sizeof(COORD); hipHostMalloc( (void**)&part,size,hipHostMallocDefault ); // part=(COORD*)malloc(size); hipMalloc(&dpart,size); REAL phi_x,phi_y,Jx,Jy,Ex,Ey,Sdelta; int i,n; for(i=0;i<_Npart;i++) { do {Jx=gsl_ran_exponential(r, 2*E_x);} while(Jx>E_x*180); do {Jy=gsl_ran_exponential(r, 2*E_y);} while(Jy>E_y*180); phi_x=gsl_ran_flat(r,0,2*M_PI); phi_y=gsl_ran_flat(r,0,2*M_PI); part[i].x[x_]=sqrt(Jx*Beta_x)*cos(phi_x); part[i].x[px_]=sqrt(Jx/Beta_x)*sin(phi_x); part[i].x[y_]=sqrt(Jy*Beta_y)*cos(phi_y); part[i].x[py_]=sqrt(Jy/Beta_y)*sin(phi_y); part[i].x[z_]=0; part[i].x[delta_]=0.00; } // part[0].x[0]=0.000;part[0].x[1]=0.000;part[0].x[2]=0.000;part[0].x[3]=0.000;part[0].x[5]=0.01; hipMemcpy(dpart,part,size,hipMemcpyHostToDevice); //Initiate lattice Initiate_lattice(); ofstream outfile("abc.txt"); outfile.close(); for(n=0;n<_Nturn1;n++) { hipLaunchKernelGGL(( Track), dim3(_BlockNum),dim3(_ThreadNum), 0, 0, dpart,dqueue,n); hipMemcpy(part,dpart,size,hipMemcpyDeviceToHost); emittance(part,&Ex,&Ey,&Sdelta); ofstream outfile("abc.txt",ios::app); outfile<<setw(4)<<n*_Nturn2<<scientific<<setw(15)<<Ex<<scientific<<setw(15)<<Ey<<setw(15)<<scientific<<Sdelta<<endl; // outfile<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; // for(int k=0;k<_Npart;k++) { if(abs(part[k].x[0])>10||abs(part[k].x[1])>10||part[k].x[2]>10||part[k].x[3]>10||part[k].x[5]>2) {cout<<n<<" "<<part[k].x[0]<<" "<<part[k].x[1]<<" "<<part[k].x[2]<<" "<<part[k].x[3]<<" "<<part[k].x[4]<<" "<<part[k].x[5]<<endl;} } outfile.close(); /* Jx=part[0].x[0]*part[0].x[0]/Beta_x + part[0].x[1]*part[0].x[1]*Beta_x; Jy=part[0].x[2]*part[0].x[2]/Beta_y + part[0].x[3]*part[0].x[3]*Beta_y; REAL J2=Jx+2*Jy; phi_x=acos( part[i].x[0]/sqrt(Jx*Beta_x)); unsigned num=(int) phi_x;if(num>10) phi_x=0; phi_y=acos( part[i].x[2]/sqrt(Jy*Beta_y)); num=(int) phi_y;if(num>10) phi_y=0; REAL Hamilt=sqrt(Jy)*Jx * abs( cos(2*phi_x-phi_y) ) / (sqrt(J2)*J2); //scaled Hamiltonian cout<<J2<<endl; */ } //particles' distribution const int Nbin = 100; unsigned int binx[Nbin+1]={0},biny[Nbin+1]={0},num; REAL phix,phiy,Hamilt,J2; // ofstream outfile2("particles_action"); for(i=0;i<_Npart;++i) { Jx=part[i].x[0]*part[i].x[0]/Beta_x + part[i].x[1]*part[i].x[1]*Beta_x; Jy=part[i].x[2]*part[i].x[2]/Beta_y + part[i].x[3]*part[i].x[3]*Beta_y; J2=Jx+2*Jy; phix=atan2( part[i].x[1]*sqrt(Beta_x),part[i].x[0]/sqrt(Beta_x)); //num=(int) phix;if(num>10) phix=0; phiy=atan2( part[i].x[3]*sqrt(Beta_y),part[i].x[2]/sqrt(Beta_y)); //num=(int) phiy;if(num>10) phiy=0; Hamilt=sqrt(Jy)*Jx * abs( cos(2*phix-phiy) ) / (sqrt(J2)*J2); //scaled Hamiltonian // outfile2<<setw(15)<<scientific<<Jx<<setw(15)<<scientific<<Jy<<endl; if(Jx>=10*Ex) { ++binx[Nbin]; } else { num=(int) (Jx/ (10*Ex/Nbin)); ++binx[num]; } if(Jy>=10*Ey) { ++biny[Nbin]; } else { num=(int) (Jy/ (10*Ey/Nbin)); ++biny[num]; } /* if(Hamilt>=0.3) { ++binx[Nbin]; } else { num=(int) (Hamilt/ (0.3/Nbin)); ++binx[num]; } if( (Jx+2*Jy)/(Ex+2*Ey)>=10.0) { ++biny[Nbin]; } else { num=(int) ((Jx+2*Jy)/(Ex+2*Ey)/ (10.0/Nbin)); ++biny[num]; } */ } // outfile2.close(); //histogram ofstream outfile3("histo"); for(i=0;i<Nbin;++i) { outfile3<<setw(15)<<10.0/Nbin*(i+0.5)<<setw(6)<<binx[i]<<setw(15)<<10.0/Nbin*(i+0.5)<<setw(6)<<biny[i]<<endl; // outfile3<<setw(15)<<0.3/Nbin*(i+0.5)<<setw(6)<<binx[i]<<setw(15)<<10.0/Nbin*(i+0.5)<<setw(6)<<biny[i]<<endl; } outfile3.close(); cout<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; hipHostFree( part ); // free(part); hipFree(dpart); hipFree(dqueue); gsl_rng_free (r); finish = clock(); cout<<(finish-start)/CLOCKS_PER_SEC<<" sec"<<endl; }
fe429cb8687108879c7057bdf9c7cea5eabbd3ae.cu
#include <iostream> #include <iomanip> #include <fstream> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <curand.h> #include <curand_kernel.h> #include "time.h" #include "Pass.h" #include "constants.h" #include "lattice_PAR.h" using namespace std; void emittance(COORD *y, REAL *PEx, REAL *PEy, REAL *PEdelta) { REAL avg_x=0,avg_xp=0,avg_y=0,avg_yp=0,avg_delta=0,sig_xx=0,sig_xpxp=0,sig_xxp=0,sig_yy=0,sig_ypyp=0,sig_yyp=0,sig_delta=0; for(int i=0;i<_Npart;i++) { avg_x+=y[i].x[x_]/_Npart; avg_xp+=y[i].x[px_]/(1+y[i].x[delta_])/_Npart; avg_y+=y[i].x[y_]/_Npart; avg_yp+=y[i].x[py_]/(1+y[i].x[delta_])/_Npart; avg_delta+=y[i].x[delta_]/_Npart; } for(int i=0;i<_Npart;i++) { sig_xx+=(y[i].x[x_]-avg_x)*(y[i].x[x_]-avg_x)/_Npart; sig_xpxp+=(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_xxp+=(y[i].x[x_]-avg_x)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_yy+=(y[i].x[y_]-avg_y)*(y[i].x[y_]-avg_y)/_Npart; sig_ypyp+=(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_yyp+=(y[i].x[y_]-avg_y)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_delta+=(y[i].x[delta_]-avg_delta)*(y[i].x[delta_]-avg_delta)/_Npart; } *PEx=sqrt(sig_xx*sig_xpxp-sig_xxp*sig_xxp); *PEy=sqrt(sig_yy*sig_ypyp-sig_yyp*sig_yyp); *PEdelta=sqrt(sig_delta); } int main(int argc, char** argv) { clock_t start, finish; start = clock(); //get a bunch of random numbers REAL *queue, *dqueue; queue=(REAL*)malloc(_pool*sizeof(REAL)); cudaMalloc(&dqueue,_pool*sizeof(REAL)); ifstream infile1("queue"); for(int i1=0;i1<_pool;i1++) infile1>>queue[i1]; infile1.close(); cudaMemcpy(dqueue,queue,_pool*sizeof(REAL),cudaMemcpyHostToDevice); free(queue); //initialization const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_default; r = gsl_rng_alloc (T); COORD *part, *dpart; int size = _Npart * sizeof(COORD); cudaHostAlloc( (void**)&part,size,cudaHostAllocDefault ); // part=(COORD*)malloc(size); cudaMalloc(&dpart,size); REAL phi_x,phi_y,Jx,Jy,Ex,Ey,Sdelta; int i,n; for(i=0;i<_Npart;i++) { do {Jx=gsl_ran_exponential(r, 2*E_x);} while(Jx>E_x*180); do {Jy=gsl_ran_exponential(r, 2*E_y);} while(Jy>E_y*180); phi_x=gsl_ran_flat(r,0,2*M_PI); phi_y=gsl_ran_flat(r,0,2*M_PI); part[i].x[x_]=sqrt(Jx*Beta_x)*cos(phi_x); part[i].x[px_]=sqrt(Jx/Beta_x)*sin(phi_x); part[i].x[y_]=sqrt(Jy*Beta_y)*cos(phi_y); part[i].x[py_]=sqrt(Jy/Beta_y)*sin(phi_y); part[i].x[z_]=0; part[i].x[delta_]=0.00; } // part[0].x[0]=0.000;part[0].x[1]=0.000;part[0].x[2]=0.000;part[0].x[3]=0.000;part[0].x[5]=0.01; cudaMemcpy(dpart,part,size,cudaMemcpyHostToDevice); //Initiate lattice Initiate_lattice(); ofstream outfile("abc.txt"); outfile.close(); for(n=0;n<_Nturn1;n++) { Track<<<_BlockNum,_ThreadNum>>>(dpart,dqueue,n); cudaMemcpy(part,dpart,size,cudaMemcpyDeviceToHost); emittance(part,&Ex,&Ey,&Sdelta); ofstream outfile("abc.txt",ios::app); outfile<<setw(4)<<n*_Nturn2<<scientific<<setw(15)<<Ex<<scientific<<setw(15)<<Ey<<setw(15)<<scientific<<Sdelta<<endl; // outfile<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; // for(int k=0;k<_Npart;k++) { if(abs(part[k].x[0])>10||abs(part[k].x[1])>10||part[k].x[2]>10||part[k].x[3]>10||part[k].x[5]>2) {cout<<n<<" "<<part[k].x[0]<<" "<<part[k].x[1]<<" "<<part[k].x[2]<<" "<<part[k].x[3]<<" "<<part[k].x[4]<<" "<<part[k].x[5]<<endl;} } outfile.close(); /* Jx=part[0].x[0]*part[0].x[0]/Beta_x + part[0].x[1]*part[0].x[1]*Beta_x; Jy=part[0].x[2]*part[0].x[2]/Beta_y + part[0].x[3]*part[0].x[3]*Beta_y; REAL J2=Jx+2*Jy; phi_x=acos( part[i].x[0]/sqrt(Jx*Beta_x)); unsigned num=(int) phi_x;if(num>10) phi_x=0; phi_y=acos( part[i].x[2]/sqrt(Jy*Beta_y)); num=(int) phi_y;if(num>10) phi_y=0; REAL Hamilt=sqrt(Jy)*Jx * abs( cos(2*phi_x-phi_y) ) / (sqrt(J2)*J2); //scaled Hamiltonian cout<<J2<<endl; */ } //particles' distribution const int Nbin = 100; unsigned int binx[Nbin+1]={0},biny[Nbin+1]={0},num; REAL phix,phiy,Hamilt,J2; // ofstream outfile2("particles_action"); for(i=0;i<_Npart;++i) { Jx=part[i].x[0]*part[i].x[0]/Beta_x + part[i].x[1]*part[i].x[1]*Beta_x; Jy=part[i].x[2]*part[i].x[2]/Beta_y + part[i].x[3]*part[i].x[3]*Beta_y; J2=Jx+2*Jy; phix=atan2( part[i].x[1]*sqrt(Beta_x),part[i].x[0]/sqrt(Beta_x)); //num=(int) phix;if(num>10) phix=0; phiy=atan2( part[i].x[3]*sqrt(Beta_y),part[i].x[2]/sqrt(Beta_y)); //num=(int) phiy;if(num>10) phiy=0; Hamilt=sqrt(Jy)*Jx * abs( cos(2*phix-phiy) ) / (sqrt(J2)*J2); //scaled Hamiltonian // outfile2<<setw(15)<<scientific<<Jx<<setw(15)<<scientific<<Jy<<endl; if(Jx>=10*Ex) { ++binx[Nbin]; } else { num=(int) (Jx/ (10*Ex/Nbin)); ++binx[num]; } if(Jy>=10*Ey) { ++biny[Nbin]; } else { num=(int) (Jy/ (10*Ey/Nbin)); ++biny[num]; } /* if(Hamilt>=0.3) { ++binx[Nbin]; } else { num=(int) (Hamilt/ (0.3/Nbin)); ++binx[num]; } if( (Jx+2*Jy)/(Ex+2*Ey)>=10.0) { ++biny[Nbin]; } else { num=(int) ((Jx+2*Jy)/(Ex+2*Ey)/ (10.0/Nbin)); ++biny[num]; } */ } // outfile2.close(); //histogram ofstream outfile3("histo"); for(i=0;i<Nbin;++i) { outfile3<<setw(15)<<10.0/Nbin*(i+0.5)<<setw(6)<<binx[i]<<setw(15)<<10.0/Nbin*(i+0.5)<<setw(6)<<biny[i]<<endl; // outfile3<<setw(15)<<0.3/Nbin*(i+0.5)<<setw(6)<<binx[i]<<setw(15)<<10.0/Nbin*(i+0.5)<<setw(6)<<biny[i]<<endl; } outfile3.close(); cout<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; cudaFreeHost( part ); // free(part); cudaFree(dpart); cudaFree(dqueue); gsl_rng_free (r); finish = clock(); cout<<(finish-start)/CLOCKS_PER_SEC<<" sec"<<endl; }
485a365a6c64e2ea0a301b47506c46dc16680718.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Simulation.h" #include <stdint.h> #include "colormap.cu" #define DERIV_X(v) (1/(2*dx)*(v[East] - v[West])) #define DERIV_Z(v) (1/(2*dz)*(v[North] - v[South])) #define LAPLACE(v) (inv_dx_quad*(v[East] -2*v[Point] + v[West] )\ +inv_dz_quad*(v[North] -2*v[Point] + v[South])) #define ADVECT(v)((axp*(v[Point] - v[West])/dx + axm*(v[East] - v[Point])/dx \ + azp*(v[Point] - v[South])/dz + azm*(v[North] - v[Point])/dz)) __global__ void solver ( float* in_T, float* out_T, float* in_vx, float* out_vx, float* in_vz, float* out_vz, float* in_rho, float* out_rho, int nx, int ny, float dt) { const unsigned int IDx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int IDy = blockIdx.y * blockDim.y + threadIdx.y; int Point = IDy*nx + IDx; int North = Point + nx; int South = Point - nx; int East = Point + 1; int West = Point - 1; float dx = 2./nx, dz = 1./ny; float c2 = 10000000; float Pr = 0.7; float Ra = 100000000; float inv_dx_quad = 1./(dx*dx); float inv_dz_quad = 1./(dz*dz); if (IDx > 1 && IDx < nx - 1 && IDy > 1 && IDy < ny - 1){ float axp = max(in_vx[Point], 0.0f); float axm = min(in_vx[Point], 0.0f); float azp = max(in_vz[Point], 0.0f); float azm = min(in_vz[Point], 0.0f); out_rho[Point] = in_rho[Point] + dt*(-DERIV_X(in_vx) - DERIV_Z(in_vz)); out_vx[Point] = in_vx[Point] + dt*(-ADVECT(in_vx) - c2*DERIV_X(in_rho) + Pr*LAPLACE(in_vx)); out_vz[Point] = in_vz[Point] + dt*(-ADVECT(in_vz) - c2*DERIV_Z(in_rho) + Pr*LAPLACE(in_vz) + Pr*Ra*in_T[Point]); out_T[Point] = in_T[Point] + dt*(-ADVECT(in_T) + LAPLACE(in_T) + in_vz[Point]); } } __global__ void kernel(hipSurfaceObject_t surface, float* input, int nx, int ny) { const unsigned int IDx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int IDy = blockIdx.y * blockDim.y + threadIdx.y; float y = IDy/(float)ny; float v = input[IDx + IDy*nx] + (1- y); uint8_t r, g, b; r = (int) (colormap_red(v)*255); g = (int) (colormap_green(v)*255); b = (int) (colormap_blue(v)*255); uchar4 data = make_uchar4(r, g, b, 0xff); surf2Dwrite(data, surface, IDx*sizeof(uchar4), IDy); } Simulation::Simulation(){} Simulation::Simulation(GLuint &texId_handle, int width_, int height_) : width(width_), height(height_) { hipGraphicsGLRegisterImage(&texRes, texId_handle, GL_TEXTURE_2D, hipGraphicsRegisterFlagsSurfaceLoadStore); state = State(width, height); } void Simulation::Step() { hipGraphicsMapResources(1, &texRes); hipGraphicsSubResourceGetMappedArray(&data, texRes, 0, 0); hipResourceDesc resoureDescription; hipSurfaceObject_t surface = 0; memset(&resoureDescription, 0, sizeof(resoureDescription)); resoureDescription.resType = hipResourceTypeArray; resoureDescription.res.array.array = data; hipCreateSurfaceObject(&surface, &resoureDescription); int nthread = 32; dim3 threads(nthread, nthread); dim3 grids(width/nthread, height/nthread); float dt = 0.0000001; hipLaunchKernelGGL(( solver), dim3(grids), dim3(threads), 0, 0, state.T.device, state.T.buffer, state.vx.device, state.vx.buffer, state.vz.device, state.vz.buffer, state.rho.device, state.rho.buffer, width, height, dt ); hipLaunchKernelGGL(( solver), dim3(grids), dim3(threads), 0, 0, state.T.buffer, state.T.device, state.vx.buffer, state.vx.device, state.vz.buffer, state.vz.device, state.rho.buffer, state.rho.device, width, height, dt ); hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads), 0, 0, surface, state.T.device, width, height); hipGraphicsUnmapResources(1, &texRes); hipDestroySurfaceObject(surface); } Simulation::~Simulation() { hipGraphicsUnregisterResource(texRes); }
485a365a6c64e2ea0a301b47506c46dc16680718.cu
#include "Simulation.h" #include <stdint.h> #include "colormap.cu" #define DERIV_X(v) (1/(2*dx)*(v[East] - v[West])) #define DERIV_Z(v) (1/(2*dz)*(v[North] - v[South])) #define LAPLACE(v) (inv_dx_quad*(v[East] -2*v[Point] + v[West] )\ +inv_dz_quad*(v[North] -2*v[Point] + v[South])) #define ADVECT(v)((axp*(v[Point] - v[West])/dx + axm*(v[East] - v[Point])/dx \ + azp*(v[Point] - v[South])/dz + azm*(v[North] - v[Point])/dz)) __global__ void solver ( float* in_T, float* out_T, float* in_vx, float* out_vx, float* in_vz, float* out_vz, float* in_rho, float* out_rho, int nx, int ny, float dt) { const unsigned int IDx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int IDy = blockIdx.y * blockDim.y + threadIdx.y; int Point = IDy*nx + IDx; int North = Point + nx; int South = Point - nx; int East = Point + 1; int West = Point - 1; float dx = 2./nx, dz = 1./ny; float c2 = 10000000; float Pr = 0.7; float Ra = 100000000; float inv_dx_quad = 1./(dx*dx); float inv_dz_quad = 1./(dz*dz); if (IDx > 1 && IDx < nx - 1 && IDy > 1 && IDy < ny - 1){ float axp = max(in_vx[Point], 0.0f); float axm = min(in_vx[Point], 0.0f); float azp = max(in_vz[Point], 0.0f); float azm = min(in_vz[Point], 0.0f); out_rho[Point] = in_rho[Point] + dt*(-DERIV_X(in_vx) - DERIV_Z(in_vz)); out_vx[Point] = in_vx[Point] + dt*(-ADVECT(in_vx) - c2*DERIV_X(in_rho) + Pr*LAPLACE(in_vx)); out_vz[Point] = in_vz[Point] + dt*(-ADVECT(in_vz) - c2*DERIV_Z(in_rho) + Pr*LAPLACE(in_vz) + Pr*Ra*in_T[Point]); out_T[Point] = in_T[Point] + dt*(-ADVECT(in_T) + LAPLACE(in_T) + in_vz[Point]); } } __global__ void kernel(cudaSurfaceObject_t surface, float* input, int nx, int ny) { const unsigned int IDx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int IDy = blockIdx.y * blockDim.y + threadIdx.y; float y = IDy/(float)ny; float v = input[IDx + IDy*nx] + (1- y); uint8_t r, g, b; r = (int) (colormap_red(v)*255); g = (int) (colormap_green(v)*255); b = (int) (colormap_blue(v)*255); uchar4 data = make_uchar4(r, g, b, 0xff); surf2Dwrite(data, surface, IDx*sizeof(uchar4), IDy); } Simulation::Simulation(){} Simulation::Simulation(GLuint &texId_handle, int width_, int height_) : width(width_), height(height_) { cudaGraphicsGLRegisterImage(&texRes, texId_handle, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsSurfaceLoadStore); state = State(width, height); } void Simulation::Step() { cudaGraphicsMapResources(1, &texRes); cudaGraphicsSubResourceGetMappedArray(&data, texRes, 0, 0); cudaResourceDesc resoureDescription; cudaSurfaceObject_t surface = 0; memset(&resoureDescription, 0, sizeof(resoureDescription)); resoureDescription.resType = cudaResourceTypeArray; resoureDescription.res.array.array = data; cudaCreateSurfaceObject(&surface, &resoureDescription); int nthread = 32; dim3 threads(nthread, nthread); dim3 grids(width/nthread, height/nthread); float dt = 0.0000001; solver<<<grids, threads>>> ( state.T.device, state.T.buffer, state.vx.device, state.vx.buffer, state.vz.device, state.vz.buffer, state.rho.device, state.rho.buffer, width, height, dt ); solver<<<grids, threads>>> ( state.T.buffer, state.T.device, state.vx.buffer, state.vx.device, state.vz.buffer, state.vz.device, state.rho.buffer, state.rho.device, width, height, dt ); kernel<<<grids, threads>>>(surface, state.T.device, width, height); cudaGraphicsUnmapResources(1, &texRes); cudaDestroySurfaceObject(surface); } Simulation::~Simulation() { cudaGraphicsUnregisterResource(texRes); }
7a9189a134ae819ff15ec985212b750170e1fa18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "TH.h" #include "THHUNN.h" #include "common.h" #include "THHNumerics.cuh" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" const int WARP_SIZE = 32; // The maximum number of threads in a block const int MAX_BLOCK_SIZE = 512; // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } template <typename Dtype, typename Acctype> struct Float2 { Acctype v1, v2; __device__ Float2() {} __device__ Float2(Dtype v1, Dtype v2) : v1(ScalarConvert<Dtype, Acctype>::to(v1)), v2(ScalarConvert<Dtype, Acctype>::to(v2)) {} __device__ Float2(Dtype v) : v1(ScalarConvert<Dtype, Acctype>::to(v)), v2(ScalarConvert<Dtype, Acctype>::to(v)) {} __device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct SumOp { __device__ SumOp(const DeviceTensor3 t) : tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { return ScalarConvert<Dtype, Acctype>::to(tensor[batch][plane][n]); } const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct VarOp { __device__ VarOp(Acctype m, const DeviceTensor3 t) : mean(m), tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { Dtype val = tensor[batch][plane][n]; return (val - mean) * (val - mean); } const Acctype mean; const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<Dtype, Acctype> operator()(int batch, int plane, int n) { Dtype g = gradOutput[batch][plane][n]; Dtype c = ScalarConvert<Acctype, Dtype>::to(input[batch][plane][n] - mean); return Float2<Dtype, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; // Sum across all threads within a warp template <typename T> static __device__ __forceinline__ T warpSum(T val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } #else __shared__ T values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } template <typename Dtype, typename Acctype> static __device__ __forceinline__ Float2<Dtype, Acctype> warpSum(Float2<Dtype, Acctype> value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } template<typename T, typename Op, typename DeviceTensor3, typename IndexTensor> __device__ T reduce_vl(Op op, DeviceTensor3 tensor, int plane, IndexTensor input_lengths) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_inference_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, DeviceTensor3 output, DeviceTensor1 runningMean, DeviceTensor1 runningVar, const DeviceTensor1 weight, const DeviceTensor1 bias, Acctype epsilon) { int plane = blockIdx.x; Acctype invstd = Acctype(1) / sqrt(runningVar[plane].ldg() + epsilon); Acctype mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane].ldg()); Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane].ldg()) : Acctype(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane].ldg()) : Acctype(0); // Write normalized and update the output for (int batch = 0; batch < input.getSize(0); batch++) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invstd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_mean_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, DeviceTensor1 out_mean) { int plane = blockIdx.x; Acctype norm = Acctype(1) / length_sum; Acctype mean = reduce_vl<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane, input_lengths) * norm; if (threadIdx.x == 0) { out_mean[plane] = ScalarConvert<Acctype, Dtype>::to(mean); } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_val_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, const DeviceTensor1 in_mean, DeviceTensor1 out_var) { int plane = blockIdx.x; Acctype norm = Acctype(1) / length_sum; Acctype mean = ScalarConvert<Dtype, Acctype>::to(in_mean[plane]); Acctype var = reduce_vl<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane, input_lengths) * norm; if (threadIdx.x == 0) { out_var[plane] = ScalarConvert<Acctype, Dtype>::to(var); } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_output_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, DeviceTensor3 output, const DeviceTensor1 weight, const DeviceTensor1 bias, const Acctype epsilon, const Acctype momentum, DeviceTensor1 runningMean, DeviceTensor1 runningVar, DeviceTensor1 saveMean, DeviceTensor1 saveVar) { int plane = blockIdx.x; int N = length_sum; Acctype mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); Acctype var = ScalarConvert<Dtype, Acctype>::to(saveVar[plane]); Acctype invStd = 1 / sqrt(var + epsilon); // Save the mean, variance, and moving averages if (threadIdx.x == 0) { // Momentum based writeback Acctype unbiasedVar = var * N / (N - 1); runningMean[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningMean[plane] + momentum * mean); runningVar[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningVar[plane] + momentum * unbiasedVar); } // Write normalized and update the output Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : ScalarConvert<int, Acctype>::to(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane]) : ScalarConvert<int, Acctype>::to(0); for (int batch = 0; batch < input.getSize(0); ++batch) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { //for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { // int t = input_lengths[batch]; // printf("block: %d, batch: %d, input_length: %d, x:%d\n", blockIdx.x, batch, t, x); Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invStd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_grad_stats_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, const DeviceTensor3 gradOutput, const DeviceTensor1 runningMean, const DeviceTensor1 saveMean, DeviceTensor1 gradOutputMean_all, DeviceTensor1 dotP_all, bool train) { int plane = blockIdx.x; int N = length_sum; Acctype mean; if (train) { mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); } else { mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]); } Acctype norm = Acctype(1) / N; GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput); Float2<Dtype, Acctype> res = reduce_vl<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3> (g, gradOutput, plane, input_lengths); Acctype gradOutputMean = res.v1 * norm; Acctype dotP = res.v2 * norm; if (threadIdx.x == 0) { gradOutputMean_all[plane] = ScalarConvert<Acctype, Dtype>::to(gradOutputMean); dotP_all[plane] = ScalarConvert<Acctype, Dtype>::to(dotP); } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_backward_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, const DeviceTensor3 gradOutput, const DeviceTensor1 gradOutputMean, const DeviceTensor1 dotP_all, DeviceTensor3 gradInput, DeviceTensor1 gradWeight, DeviceTensor1 gradBias, const DeviceTensor1 weight, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 saveMean, const DeviceTensor1 saveVar, bool train, Acctype scale, double eps) { int plane = blockIdx.x; int N = length_sum; Acctype mean, stdVal; if (train) { mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); stdVal = 1 / sqrt(ScalarConvert<Dtype, Acctype>::to(saveVar[plane]) + eps); } else { mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]); stdVal = 1 / sqrt(runningVar[plane] + eps); } Acctype weightVal = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : Acctype(1); // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(gradOutput) // 2. DotProduct(input - mean, gradOutput) // Acctype gradOutputSum = res.v1; Acctype gradOutputSum = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]) * N; Acctype dotP = ScalarConvert<Dtype, Acctype>::to(dotP_all[plane]); // Acctype gradMean = gradOutputSum * norm; Acctype gradMean = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]); // Acctype projScale = dotP * norm * stdVal * stdVal; Acctype projScale = dotP * stdVal * stdVal; Acctype gradScale = stdVal * weightVal; if (gradInput.numElements() > 0) { for (int batch = 0; batch < gradOutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { Dtype gradOut = gradOutput[batch][plane][x]; if (train) { Dtype inp = input[batch][plane][x]; Acctype proj = (inp - mean) * projScale; gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to((gradOut - proj - gradMean) * gradScale); } else { gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gradOut * gradScale); } } } } if (gradWeight.numElements() > 0) { if (threadIdx.x == 0) { gradWeight[plane] += ScalarConvert<Acctype, Dtype>::to(scale * dotP * stdVal); } } if (gradBias.numElements() > 0) { if (threadIdx.x == 0) { gradBias[plane] += ScalarConvert<Acctype, Dtype>::to(scale * gradOutputSum); } } } #define FloatTensor3 THCDeviceTensor<float, 3> #define FloatTensor1 THCDeviceTensor<float, 1> #define IntTensor1 THCDeviceTensor<int, 1> template <typename Dtype, int Dim> static THCDeviceTensor<Dtype, Dim> devicetensor(THCState *state, THCTensor *t) { if (!t) { return THCDeviceTensor<Dtype, Dim>(); } int inDim = t->dim(); THAssert(inDim == Dim); return toDeviceTensor<Dtype, Dim>(state, t); } extern "C" void THNN_CudaBatchnormUpdateOutput( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *output_, THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double momentum, double eps); extern "C" void THNN_CudaBatchnormMean( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, int length_sum); extern "C" void THNN_CudaBatchnormVar( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, THCTensor *saveVar_, int length_sum); void THNN_CudaBatchnormMean( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, int length_sum) { FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); hipStream_t s = THCState_getCurrentStream(state); hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state); dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( batchnorm_mean_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1>) , dim3(blocks), dim3(threads), 0, s, input, input_lengths, length_sum, saveMean); THCudaCheck(hipGetLastError()); } void THNN_CudaBatchnormVar( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, THCTensor *saveVar_, int length_sum) { FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); FloatTensor1 saveVar = devicetensor<float, 1>(state, saveVar_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); hipStream_t s = THCState_getCurrentStream(state); hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state); dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( batchnorm_val_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1>) , dim3(blocks), dim3(threads), 0, s, input, input_lengths, length_sum, saveMean, saveVar); THCudaCheck(hipGetLastError()); } void THNN_CudaBatchnormUpdateOutput( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *output_, THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double momentum, double eps) { THCTensor_resizeAs(state, output_, input_); FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor3 output = devicetensor<float, 3>(state, output_); FloatTensor1 weight = devicetensor<float, 1>(state, weight_); FloatTensor1 bias = devicetensor<float, 1>(state, bias_); FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_); FloatTensor1 runningVar = devicetensor<float, 1>(state, runningVar_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); FloatTensor1 saveStd = devicetensor<float, 1>(state, saveStd_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); hipStream_t s = THCState_getCurrentStream(state); hipDeviceProp_t *prop = THCState_getCurrentDeviceProperties(state); if (!train) { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( batchnorm_inference_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1>) , dim3(blocks), dim3(threads), 0, s, input, input_lengths, output, runningMean, runningVar, weight, bias, eps); } else { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); hipLaunchKernelGGL(( batchnorm_output_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1>) , dim3(blocks), dim3(threads), 0, s, input, input_lengths, length_sum, output, weight, bias, eps, momentum, runningMean, runningVar, saveMean, saveStd); } THCudaCheck(hipGetLastError()); } extern "C" void THNN_CudaBatchnormBackward( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *gradOutputMean_, THCTensor *dotP, THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_, THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double scale, double eps); extern "C" void THNN_CudaBatchnormGradStats( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *runningMean_, THCTensor *saveMean_, THCTensor *gradOutputMean_, THCTensor *dotP_, int length_sum, int train); void THNN_CudaBatchnormGradStats( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *runningMean_, THCTensor *saveMean_, THCTensor *gradOutputMean_, THCTensor *dotP_, int length_sum, int train) { // THCUNN_check_shape(state, input_, gradOutput_); FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor3 gradOutput = devicetensor<float, 3>(state, gradOutput_); FloatTensor1 gradOutputMean = devicetensor<float, 1>(state, gradOutputMean_); FloatTensor1 dotP = devicetensor<float, 1>(state, dotP_); FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); hipStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); hipLaunchKernelGGL(( batchnorm_grad_stats_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1>) , dim3(blocks), dim3(threads), 0, s, input, input_lengths, length_sum, gradOutput, runningMean, saveMean, gradOutputMean, dotP, train); THCudaCheck(hipGetLastError()); } void THNN_CudaBatchnormBackward( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *gradOutputMean_, THCTensor *dotP_, THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_, THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double scale, double eps) { // THCUNN_check_shape(state, input_, gradOutput_); FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor3 gradOutput = devicetensor<float, 3>(state, gradOutput_); FloatTensor1 gradOutputMean = devicetensor<float, 1>(state, gradOutputMean_); FloatTensor1 dotP = devicetensor<float, 1>(state, dotP_); FloatTensor3 gradInput = devicetensor<float, 3>(state, gradInput_); FloatTensor1 gradWeight = devicetensor<float, 1>(state, gradWeight_); FloatTensor1 gradBias = devicetensor<float, 1>(state, gradBias_); FloatTensor1 weight = devicetensor<float, 1>(state, weight_); FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_); FloatTensor1 runningVar = devicetensor<float, 1>(state, runningVar_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); FloatTensor1 saveStd = devicetensor<float, 1>(state, saveStd_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); hipStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); hipLaunchKernelGGL(( batchnorm_backward_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1>) , dim3(blocks), dim3(threads), 0, s, input, input_lengths, length_sum, gradOutput, gradOutputMean, dotP, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, saveMean, saveStd, train, scale, eps); THCudaCheck(hipGetLastError()); }
7a9189a134ae819ff15ec985212b750170e1fa18.cu
#include "TH.h" #include "THCUNN.h" #include "common.h" #include "THCNumerics.cuh" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" const int WARP_SIZE = 32; // The maximum number of threads in a block const int MAX_BLOCK_SIZE = 512; // Number of threads in a block given an input size up to MAX_BLOCK_SIZE static int getNumThreads(int nElem) { int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; for (int i = 0; i != 5; ++i) { if (nElem <= threadSizes[i]) { return threadSizes[i]; } } return MAX_BLOCK_SIZE; } // Returns the index of the most significant 1 bit in `val`. __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } template <typename Dtype, typename Acctype> struct Float2 { Acctype v1, v2; __device__ Float2() {} __device__ Float2(Dtype v1, Dtype v2) : v1(ScalarConvert<Dtype, Acctype>::to(v1)), v2(ScalarConvert<Dtype, Acctype>::to(v2)) {} __device__ Float2(Dtype v) : v1(ScalarConvert<Dtype, Acctype>::to(v)), v2(ScalarConvert<Dtype, Acctype>::to(v)) {} __device__ Float2(int v) : v1(ScalarConvert<int, Acctype>::to(v)), v2(ScalarConvert<int, Acctype>::to(v)) {} __device__ Float2& operator+=(const Float2& a) { v1 += a.v1; v2 += a.v2; return *this; } }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct SumOp { __device__ SumOp(const DeviceTensor3 t) : tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { return ScalarConvert<Dtype, Acctype>::to(tensor[batch][plane][n]); } const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct VarOp { __device__ VarOp(Acctype m, const DeviceTensor3 t) : mean(m), tensor(t) {} __device__ __forceinline__ Acctype operator()(int batch, int plane, int n) { Dtype val = tensor[batch][plane][n]; return (val - mean) * (val - mean); } const Acctype mean; const DeviceTensor3 tensor; }; template <typename Dtype, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<Dtype, Acctype> operator()(int batch, int plane, int n) { Dtype g = gradOutput[batch][plane][n]; Dtype c = ScalarConvert<Acctype, Dtype>::to(input[batch][plane][n] - mean); return Float2<Dtype, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; // Sum across all threads within a warp template <typename T> static __device__ __forceinline__ T warpSum(T val) { #if __CUDA_ARCH__ >= 300 for (int i = 0; i < getMSB(WARP_SIZE); ++i) { val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); } #else __shared__ T values[MAX_BLOCK_SIZE]; values[threadIdx.x] = val; __threadfence_block(); const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; for (int i = 1; i < WARP_SIZE; i++) { val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; } #endif return val; } template <typename Dtype, typename Acctype> static __device__ __forceinline__ Float2<Dtype, Acctype> warpSum(Float2<Dtype, Acctype> value) { value.v1 = warpSum(value.v1); value.v2 = warpSum(value.v2); return value; } template<typename T, typename Op, typename DeviceTensor3, typename IndexTensor> __device__ T reduce_vl(Op op, DeviceTensor3 tensor, int plane, IndexTensor input_lengths) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_inference_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, DeviceTensor3 output, DeviceTensor1 runningMean, DeviceTensor1 runningVar, const DeviceTensor1 weight, const DeviceTensor1 bias, Acctype epsilon) { int plane = blockIdx.x; Acctype invstd = Acctype(1) / sqrt(runningVar[plane].ldg() + epsilon); Acctype mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane].ldg()); Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane].ldg()) : Acctype(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane].ldg()) : Acctype(0); // Write normalized and update the output for (int batch = 0; batch < input.getSize(0); batch++) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invstd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_mean_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, DeviceTensor1 out_mean) { int plane = blockIdx.x; Acctype norm = Acctype(1) / length_sum; Acctype mean = reduce_vl<Acctype>(SumOp<Dtype, Acctype, DeviceTensor3>(input), input, plane, input_lengths) * norm; if (threadIdx.x == 0) { out_mean[plane] = ScalarConvert<Acctype, Dtype>::to(mean); } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_val_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, const DeviceTensor1 in_mean, DeviceTensor1 out_var) { int plane = blockIdx.x; Acctype norm = Acctype(1) / length_sum; Acctype mean = ScalarConvert<Dtype, Acctype>::to(in_mean[plane]); Acctype var = reduce_vl<Acctype>(VarOp<Dtype, Acctype, DeviceTensor3>(mean, input), input, plane, input_lengths) * norm; if (threadIdx.x == 0) { out_var[plane] = ScalarConvert<Acctype, Dtype>::to(var); } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_output_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, DeviceTensor3 output, const DeviceTensor1 weight, const DeviceTensor1 bias, const Acctype epsilon, const Acctype momentum, DeviceTensor1 runningMean, DeviceTensor1 runningVar, DeviceTensor1 saveMean, DeviceTensor1 saveVar) { int plane = blockIdx.x; int N = length_sum; Acctype mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); Acctype var = ScalarConvert<Dtype, Acctype>::to(saveVar[plane]); Acctype invStd = 1 / sqrt(var + epsilon); // Save the mean, variance, and moving averages if (threadIdx.x == 0) { // Momentum based writeback Acctype unbiasedVar = var * N / (N - 1); runningMean[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningMean[plane] + momentum * mean); runningVar[plane] = ScalarConvert<Acctype, Dtype>::to((1 - momentum) * runningVar[plane] + momentum * unbiasedVar); } // Write normalized and update the output Acctype gamma = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : ScalarConvert<int, Acctype>::to(1); Acctype beta = bias.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(bias[plane]) : ScalarConvert<int, Acctype>::to(0); for (int batch = 0; batch < input.getSize(0); ++batch) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { //for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { // int t = input_lengths[batch]; // printf("block: %d, batch: %d, input_length: %d, x:%d\n", blockIdx.x, batch, t, x); Dtype inp = input[batch][plane][x].ldg(); output[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gamma * (inp - mean) * invStd + beta); } } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_grad_stats_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, const DeviceTensor3 gradOutput, const DeviceTensor1 runningMean, const DeviceTensor1 saveMean, DeviceTensor1 gradOutputMean_all, DeviceTensor1 dotP_all, bool train) { int plane = blockIdx.x; int N = length_sum; Acctype mean; if (train) { mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); } else { mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]); } Acctype norm = Acctype(1) / N; GradOp<Dtype, Acctype, DeviceTensor3> g(mean, input, gradOutput); Float2<Dtype, Acctype> res = reduce_vl<Float2<Dtype, Acctype>, GradOp<Dtype, Acctype, DeviceTensor3>, DeviceTensor3> (g, gradOutput, plane, input_lengths); Acctype gradOutputMean = res.v1 * norm; Acctype dotP = res.v2 * norm; if (threadIdx.x == 0) { gradOutputMean_all[plane] = ScalarConvert<Acctype, Dtype>::to(gradOutputMean); dotP_all[plane] = ScalarConvert<Acctype, Dtype>::to(dotP); } } template <typename Dtype, typename Acctype, typename DeviceTensor1, typename DeviceTensor3, typename IndexTensor> __global__ void batchnorm_backward_kernel( const DeviceTensor3 input, const IndexTensor input_lengths, const int length_sum, const DeviceTensor3 gradOutput, const DeviceTensor1 gradOutputMean, const DeviceTensor1 dotP_all, DeviceTensor3 gradInput, DeviceTensor1 gradWeight, DeviceTensor1 gradBias, const DeviceTensor1 weight, const DeviceTensor1 runningMean, const DeviceTensor1 runningVar, const DeviceTensor1 saveMean, const DeviceTensor1 saveVar, bool train, Acctype scale, double eps) { int plane = blockIdx.x; int N = length_sum; Acctype mean, stdVal; if (train) { mean = ScalarConvert<Dtype, Acctype>::to(saveMean[plane]); stdVal = 1 / sqrt(ScalarConvert<Dtype, Acctype>::to(saveVar[plane]) + eps); } else { mean = ScalarConvert<Dtype, Acctype>::to(runningMean[plane]); stdVal = 1 / sqrt(runningVar[plane] + eps); } Acctype weightVal = weight.numElements() > 0 ? ScalarConvert<Dtype, Acctype>::to(weight[plane]) : Acctype(1); // Compute two values across (batch, x/y/z) in one pass: // 1. Sum(gradOutput) // 2. DotProduct(input - mean, gradOutput) // Acctype gradOutputSum = res.v1; Acctype gradOutputSum = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]) * N; Acctype dotP = ScalarConvert<Dtype, Acctype>::to(dotP_all[plane]); // Acctype gradMean = gradOutputSum * norm; Acctype gradMean = ScalarConvert<Dtype, Acctype>::to(gradOutputMean[plane]); // Acctype projScale = dotP * norm * stdVal * stdVal; Acctype projScale = dotP * stdVal * stdVal; Acctype gradScale = stdVal * weightVal; if (gradInput.numElements() > 0) { for (int batch = 0; batch < gradOutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < input_lengths[batch]; x += blockDim.x) { Dtype gradOut = gradOutput[batch][plane][x]; if (train) { Dtype inp = input[batch][plane][x]; Acctype proj = (inp - mean) * projScale; gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to((gradOut - proj - gradMean) * gradScale); } else { gradInput[batch][plane][x] = ScalarConvert<Acctype, Dtype>::to(gradOut * gradScale); } } } } if (gradWeight.numElements() > 0) { if (threadIdx.x == 0) { gradWeight[plane] += ScalarConvert<Acctype, Dtype>::to(scale * dotP * stdVal); } } if (gradBias.numElements() > 0) { if (threadIdx.x == 0) { gradBias[plane] += ScalarConvert<Acctype, Dtype>::to(scale * gradOutputSum); } } } #define FloatTensor3 THCDeviceTensor<float, 3> #define FloatTensor1 THCDeviceTensor<float, 1> #define IntTensor1 THCDeviceTensor<int, 1> template <typename Dtype, int Dim> static THCDeviceTensor<Dtype, Dim> devicetensor(THCState *state, THCTensor *t) { if (!t) { return THCDeviceTensor<Dtype, Dim>(); } int inDim = t->dim(); THAssert(inDim == Dim); return toDeviceTensor<Dtype, Dim>(state, t); } extern "C" void THNN_CudaBatchnormUpdateOutput( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *output_, THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double momentum, double eps); extern "C" void THNN_CudaBatchnormMean( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, int length_sum); extern "C" void THNN_CudaBatchnormVar( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, THCTensor *saveVar_, int length_sum); void THNN_CudaBatchnormMean( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, int length_sum) { FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); cudaStream_t s = THCState_getCurrentStream(state); cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state); dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); batchnorm_mean_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>( input, input_lengths, length_sum, saveMean); THCudaCheck(cudaGetLastError()); } void THNN_CudaBatchnormVar( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *saveMean_, THCTensor *saveVar_, int length_sum) { FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); FloatTensor1 saveVar = devicetensor<float, 1>(state, saveVar_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); cudaStream_t s = THCState_getCurrentStream(state); cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state); dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); batchnorm_val_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>( input, input_lengths, length_sum, saveMean, saveVar); THCudaCheck(cudaGetLastError()); } void THNN_CudaBatchnormUpdateOutput( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *output_, THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double momentum, double eps) { THCTensor_resizeAs(state, output_, input_); FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor3 output = devicetensor<float, 3>(state, output_); FloatTensor1 weight = devicetensor<float, 1>(state, weight_); FloatTensor1 bias = devicetensor<float, 1>(state, bias_); FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_); FloatTensor1 runningVar = devicetensor<float, 1>(state, runningVar_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); FloatTensor1 saveStd = devicetensor<float, 1>(state, saveStd_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); cudaStream_t s = THCState_getCurrentStream(state); cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state); if (!train) { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); batchnorm_inference_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>( input, input_lengths, output, runningMean, runningVar, weight, bias, eps); } else { dim3 blocks(input.getSize(1)); dim3 threads(getNumThreads(input.getSize(2))); batchnorm_output_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>( input, input_lengths, length_sum, output, weight, bias, eps, momentum, runningMean, runningVar, saveMean, saveStd); } THCudaCheck(cudaGetLastError()); } extern "C" void THNN_CudaBatchnormBackward( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *gradOutputMean_, THCTensor *dotP, THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_, THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double scale, double eps); extern "C" void THNN_CudaBatchnormGradStats( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *runningMean_, THCTensor *saveMean_, THCTensor *gradOutputMean_, THCTensor *dotP_, int length_sum, int train); void THNN_CudaBatchnormGradStats( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *runningMean_, THCTensor *saveMean_, THCTensor *gradOutputMean_, THCTensor *dotP_, int length_sum, int train) { // THCUNN_check_shape(state, input_, gradOutput_); FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor3 gradOutput = devicetensor<float, 3>(state, gradOutput_); FloatTensor1 gradOutputMean = devicetensor<float, 1>(state, gradOutputMean_); FloatTensor1 dotP = devicetensor<float, 1>(state, dotP_); FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); cudaStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); batchnorm_grad_stats_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>( input, input_lengths, length_sum, gradOutput, runningMean, saveMean, gradOutputMean, dotP, train); THCudaCheck(cudaGetLastError()); } void THNN_CudaBatchnormBackward( THCState *state, THCTensor *input_, THCTensor *input_lengths_, THCTensor *gradOutput_, THCTensor *gradOutputMean_, THCTensor *dotP_, THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_, THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, int length_sum, int train, double scale, double eps) { // THCUNN_check_shape(state, input_, gradOutput_); FloatTensor3 input = devicetensor<float, 3>(state, input_); FloatTensor3 gradOutput = devicetensor<float, 3>(state, gradOutput_); FloatTensor1 gradOutputMean = devicetensor<float, 1>(state, gradOutputMean_); FloatTensor1 dotP = devicetensor<float, 1>(state, dotP_); FloatTensor3 gradInput = devicetensor<float, 3>(state, gradInput_); FloatTensor1 gradWeight = devicetensor<float, 1>(state, gradWeight_); FloatTensor1 gradBias = devicetensor<float, 1>(state, gradBias_); FloatTensor1 weight = devicetensor<float, 1>(state, weight_); FloatTensor1 runningMean = devicetensor<float, 1>(state, runningMean_); FloatTensor1 runningVar = devicetensor<float, 1>(state, runningVar_); FloatTensor1 saveMean = devicetensor<float, 1>(state, saveMean_); FloatTensor1 saveStd = devicetensor<float, 1>(state, saveStd_); IntTensor1 input_lengths = devicetensor<int, 1>(state, input_lengths_); cudaStream_t s = THCState_getCurrentStream(state); dim3 blocks(gradOutput.getSize(1)); dim3 threads(getNumThreads(gradOutput.getSize(2))); batchnorm_backward_kernel<float, float, FloatTensor1, FloatTensor3, IntTensor1> <<<blocks, threads, 0, s>>>( input, input_lengths, length_sum, gradOutput, gradOutputMean, dotP, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, saveMean, saveStd, train, scale, eps); THCudaCheck(cudaGetLastError()); }