hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
6ea2eb2af03f71463401efd122de15fc9ff3030f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/manual_awkward_NumpyArray_getitem_boolean_nonzero.cu", line) #include "awkward/kernels.h" #include "standard_parallel_algorithms.h" __global__ void awkward_NumpyArray_getitem_boolean_nonzero_filter_mask( const int8_t* fromptr, int64_t* filtered_index, int64_t stride, int64_t length) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < length && thread_id % stride == 0) { if (fromptr[thread_id] != 0) { filtered_index[thread_id] = 1; } } } template <typename T> __global__ void awkward_NumpyArray_getitem_boolean_nonzero_kernel( T* toptr, int64_t* prefixedsum_mask, const int8_t* fromptr, int64_t length, int64_t stride) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < length && thread_id % stride == 0) { if(fromptr[thread_id] != 0) { toptr[prefixedsum_mask[thread_id] - 1] = thread_id; } } } template <typename T> ERROR awkward_NumpyArray_getitem_boolean_nonzero( T* toptr, const int8_t* fromptr, int64_t length, int64_t stride) { int64_t* res_temp; int64_t* filtered_index; dim3 blocks_per_grid = blocks(length); dim3 threads_per_block = threads(length); HANDLE_ERROR(hipMalloc((void**)&res_temp, sizeof(int64_t) * length)); HANDLE_ERROR(hipMalloc((void**)&filtered_index, sizeof(int64_t) * length)); HANDLE_ERROR(hipMemset(filtered_index, 0, sizeof(int64_t) * length)); hipLaunchKernelGGL(( awkward_NumpyArray_getitem_boolean_nonzero_filter_mask), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, fromptr, filtered_index, stride, length); exclusive_scan<int64_t, int64_t>(res_temp, filtered_index, length); hipLaunchKernelGGL(( awkward_NumpyArray_getitem_boolean_nonzero_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, toptr, res_temp, fromptr, length, stride); hipDeviceSynchronize(); return success(); } ERROR awkward_NumpyArray_getitem_boolean_nonzero_64( int64_t* toptr, const int8_t* fromptr, int64_t length, int64_t stride) { return awkward_NumpyArray_getitem_boolean_nonzero<int64_t>( toptr, fromptr, length, stride); }
6ea2eb2af03f71463401efd122de15fc9ff3030f.cu
#define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/manual_awkward_NumpyArray_getitem_boolean_nonzero.cu", line) #include "awkward/kernels.h" #include "standard_parallel_algorithms.h" __global__ void awkward_NumpyArray_getitem_boolean_nonzero_filter_mask( const int8_t* fromptr, int64_t* filtered_index, int64_t stride, int64_t length) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < length && thread_id % stride == 0) { if (fromptr[thread_id] != 0) { filtered_index[thread_id] = 1; } } } template <typename T> __global__ void awkward_NumpyArray_getitem_boolean_nonzero_kernel( T* toptr, int64_t* prefixedsum_mask, const int8_t* fromptr, int64_t length, int64_t stride) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < length && thread_id % stride == 0) { if(fromptr[thread_id] != 0) { toptr[prefixedsum_mask[thread_id] - 1] = thread_id; } } } template <typename T> ERROR awkward_NumpyArray_getitem_boolean_nonzero( T* toptr, const int8_t* fromptr, int64_t length, int64_t stride) { int64_t* res_temp; int64_t* filtered_index; dim3 blocks_per_grid = blocks(length); dim3 threads_per_block = threads(length); HANDLE_ERROR(cudaMalloc((void**)&res_temp, sizeof(int64_t) * length)); HANDLE_ERROR(cudaMalloc((void**)&filtered_index, sizeof(int64_t) * length)); HANDLE_ERROR(cudaMemset(filtered_index, 0, sizeof(int64_t) * length)); awkward_NumpyArray_getitem_boolean_nonzero_filter_mask<<< blocks_per_grid, threads_per_block>>>(fromptr, filtered_index, stride, length); exclusive_scan<int64_t, int64_t>(res_temp, filtered_index, length); awkward_NumpyArray_getitem_boolean_nonzero_kernel<<<blocks_per_grid, threads_per_block>>>( toptr, res_temp, fromptr, length, stride); cudaDeviceSynchronize(); return success(); } ERROR awkward_NumpyArray_getitem_boolean_nonzero_64( int64_t* toptr, const int8_t* fromptr, int64_t length, int64_t stride) { return awkward_NumpyArray_getitem_boolean_nonzero<int64_t>( toptr, fromptr, length, stride); }
e2d953a1132aacc67cba1a1f792b92874b16e1d3.hip
// !!! This is a file automatically generated by hipify!!! /********************************************************************* run-cg.cu Main program. Computes the all the versions of CG and tests each module. **********************************************************************/ #define MAIN_PROGRAM #include "common.h" #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include "global.h" #include "geometry.h" #include "linalg.h" #include "transform.h" #include "cg.h" #include "cg_gpu.h" #include "cg_gpu_sp.h" #include "cg_gpu_mp.h" #include "mp_refinement.h" #include "mp_refinement_cpu.h" #include "cg_pm.h" #include "cg_gpu_pm.h" #include "pow_method.h" #include "pow_method_gpu.h" #include "dot_prod_test.h" #include "cg_test.h" #include "pm_test.h" int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int nBytes, nBytes_fl, status, N; double *w,*w_cpu,*w_gpu,*v,*v_cpu,*v_gpu,*x,*w_fl_d; double *w_pm,*lamb_max,*lamb_min,*v_pm, *w_pm_gpu, *v_pm_gpu,*lamb_max_gpu,*lamb_min_gpu; float *w_fl, *v_fl, *w_fl_mp, *v_fl_mp; double iStart, iElaps; N=256; int dimx = 256; int dimy = 1; if (argc>1) { N=atoi(argv[1]); } if (argc>3) { dimx=atoi(argv[2]); dimy=atoi(argv[3]); } // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // Global variables // Inner points in x- and y-directions Nx=N; Ny=N; // Number of gridpoints npts=(Nx+2)*(Ny+2); // Active points array active_pts(); // Memoryspace per vector in Byte nBytes=npts*sizeof(double); nBytes_fl=npts*sizeof(float); // Host allocation w=(double*)malloc(nBytes); v=(double*)malloc(nBytes); w_cpu=(double*)malloc(nBytes); v_cpu=(double*)malloc(nBytes); w_gpu=(double*)malloc(nBytes); v_gpu=(double*)malloc(nBytes); w_fl=(float*)malloc(nBytes_fl); v_fl=(float*)malloc(nBytes_fl); v_fl_mp=(float*)malloc(nBytes_fl); w_fl_mp=(float*)malloc(nBytes_fl); w_fl_d=(double*)malloc(nBytes); w_pm=(double*)malloc(nBytes); lamb_max=(double*)malloc(nBytes); lamb_min=(double*)malloc(nBytes); v_pm=(double*)malloc(nBytes); w_pm_gpu=(double*)malloc(nBytes); v_pm_gpu=(double*)malloc(nBytes); lamb_max_gpu=(double*)malloc(nBytes); lamb_min_gpu=(double*)malloc(nBytes); // Setting to zero memset(w, 0, nBytes); memset(v, 0, nBytes); memset(w_cpu, 0, nBytes); memset(v_cpu, 0, nBytes); memset(w_gpu, 0, nBytes); memset(v_gpu, 0, nBytes); memset(w_fl, 0, nBytes_fl); memset(v_fl, 0, nBytes_fl); memset(w_fl_mp, 0, nBytes_fl); memset(v_fl_mp, 0, nBytes_fl); memset(w_fl_d, 0, nBytes); memset(w_pm, 0, nBytes); memset(lamb_max, 0, nBytes); memset(lamb_min, 0, nBytes); memset(v_pm, 0, nBytes); memset(w_pm_gpu, 0, nBytes); memset(v_pm_gpu, 0, nBytes); memset(lamb_max_gpu, 0, nBytes); memset(lamb_min_gpu, 0, nBytes); // Active points if ((Nx<=16)&&(Ny<=16)) print_active(); random_vector(w); random_vector(v); assign_v2v(w_cpu,w); assign_v2v(w_gpu,w); assign_v2v(w_pm,w); assign_v2v(w_pm_gpu,w); // Device allocation double *d_v, *d_w, *d_x; CHECK(hipMalloc((void **)&d_v, nBytes)); CHECK(hipMalloc((void **)&d_w, nBytes)); // transfer data from host to device CHECK(hipMemcpy(d_v, v, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_w, w, nBytes, hipMemcpyHostToDevice)); // invoke kernel at host side // Threads per block block.x=dimx; block.y=dimy; block.z=1; // Blocks per grid grid.x=(Nx + block.x - 1) / block.x; grid.y=(Ny + block.y - 1) / block.y; grid.z=1; // Test reduction int Nunroll=8; if (npts>256 && Nunroll>1) { double cpu_sum=0.0; iStart = seconds(); for (int i = 0; i < npts; i++) cpu_sum += v[i]; iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %f\n", iElaps, cpu_sum); dim3 block2 (256,1); int nblk = (npts + (block2.x*Nunroll) - 1)/(block2.x*Nunroll); dim3 grid2 (nblk,1); CHECK(hipMalloc((void **)&d_x, nblk*sizeof(double))); CHECK(hipMemset(d_x,0,nblk*sizeof(double))); x=(double*)malloc(nblk*sizeof(double)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling), dim3(grid2), dim3(block2), 0, 0, d_v, d_x, npts); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(x, d_x, nblk * sizeof(double),hipMemcpyDeviceToHost)); double gpu_sum = 0.0; for (int i = 0; i < grid2.x; i++) gpu_sum += x[i]; printf("gpu Unrolling elapsed %f sec gpu_sum: %f <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid2.x, block2.x); assert(abs((gpu_sum-cpu_sum)/cpu_sum)<sqrt(npts)*DBL_EPSILON); } double nrm,nrm_mp; float nrm_fl; // Einheitsvektor memset(v, 0, nBytes); v[coord2index(Nx/2,Nx/2)]=1.0; // v=0, ausser am Gitterpunkt (Nx/2+1,Ny/2+1) //print_vector("v",v,1); assign_v2v(v_cpu,v); assign_v2v(v_gpu,v); assign_v2v(v_pm,v); assign_v2v(v_pm_gpu,v); d2fl(w_fl,w); d2fl(v_fl,v); d2fl(w_fl_mp,w); d2fl(v_fl_mp,v); printf("\n"); printf("N = %d\n", N); printf("\n"); // CPU Version printf("CPU\n"); iStart = seconds(); cg(w_cpu,v_cpu,100000,1e-10,&status); iElaps = seconds() - iStart; printf("CG on CPU: %f sec\n", iElaps); nrm=norm_sqr(w_cpu); printf("||x|| = %.8f\n",sqrt(nrm)); printf("\n"); // GPU Version printf("GPU\n"); iStart = seconds(); cg_gpu(w_gpu,v_gpu,100000,1e-10,&status, Nx, Ny, nBytes); iElaps = seconds() - iStart; printf("CG on GPU: %f sec\n", iElaps); nrm=norm_sqr(w_gpu); printf("||x|| = %.8f\n",sqrt(nrm)); printf("\n"); // GPU Version in SP printf("GPU in single precission\n"); iStart = seconds(); cg_gpu_sp(w_fl,v_fl,100000,1e-08,&status, Nx, Ny, nBytes_fl); iElaps = seconds() - iStart; printf("SP-CG on GPU: %f sec\n", iElaps); nrm_fl=norm_sqr_fl(w_fl); printf("||x|| = %.8f\n",sqrt(nrm_fl)); printf("Rel. deviation of x = %e\n",abs((sqrt(nrm_fl)-sqrt(nrm))/sqrt(nrm))); printf("\n"); double *w_mp_gpu, *v_mp_gpu; w_mp_gpu=(double*)malloc(nBytes); v_mp_gpu=(double*)malloc(nBytes); memset(w_mp_gpu, 0, nBytes); memset(v_mp_gpu, 0, nBytes); v_mp_gpu[coord2index(Nx/2,Nx/2)]=1.0; int iterMP = 1000; // Mixed precission printf("Initiating MP refinement (GPU)\n"); iStart = seconds(); mp_refinement(w_mp_gpu,v_mp_gpu,100000,iterMP,1e-10,&status,Nx,Ny); iElaps = seconds() - iStart; printf("MP refinement: %f sec\n", iElaps); nrm_mp=norm_sqr(w_mp_gpu); printf("||x|| = %.8f\n",sqrt(nrm_mp)); printf("Rel. deviation of x = %e\n",abs((sqrt(nrm_mp)-sqrt(nrm))/sqrt(nrm))); printf("\n"); // Power iteration CPU printf("Power iteration CPU\n"); iStart = seconds(); pow_method(w_pm,v_pm,lamb_max,lamb_min,10000,&status); iElaps = seconds() - iStart; printf("PM on CPU: %f sec\n", iElaps); printf("Maximum eigenvalue l_max= %.8f\n",*lamb_max); printf("Minimum eigenvalue l_min= %.8f\n",*lamb_min); printf("Condition number = %.8f\n",(*lamb_max)/(*lamb_min)); printf("\n"); // Power iteration GPU printf("Power iteration GPU\n"); iStart = seconds(); pow_method_gpu(w_pm_gpu,v_pm_gpu,lamb_max_gpu,lamb_min_gpu,100000,&status,Nx,Ny); iElaps = seconds() - iStart; printf("PM on GPU: %f sec\n", iElaps); printf("Maximum eigenvalue l_max= %.8f\n",*lamb_max_gpu); printf("Minimum eigenvalue l_min= %.8f\n",*lamb_min_gpu); printf("Condition number = %.8f\n",(*lamb_max_gpu)/(*lamb_min_gpu)); //-------------------Tests--------------------------------- //--Scalar product------// double *a,*b; a=(double*)malloc(nBytes); b=(double*)malloc(nBytes); random_vector(a); random_vector(b); printf("\n"); printf("\n N = %d \n", N); dot_prod_test(a, b, nBytes); printf("\n"); //--CG------------------// double res=0.0; double *v_test, *w_test; v_test=(double*)malloc(nBytes); memset(v_test, 0, nBytes); w_test=(double*)malloc(nBytes); memset(w_test, 0, nBytes); fl2d(w_test,w_fl); printf("\n"); printf("\n N = %d \n", N); printf("\n"); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_cpu,v_test); printf("\n Norm. residuum (CPU) ||r|| = ||b -Ax||: %e", res); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_gpu,v_test); printf("\n Norm. residuum (GPU) ||r|| = ||b -Ax||: %e", res); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_test,v_test); printf("\n Norm. residuum (SP-GPU) ||r|| = ||b -Ax||: %e", res); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_mp_gpu,v_test); printf("\n Norm. residuum (MP-GPU) ||r|| = ||b -Ax||: %e", res); printf("\n"); printf("\n N = %d \n", N); //----Power Method-----------// res=pm_test(w_pm,lamb_max); printf("\n Norm. residuum for lambda ||A x - lambda x||: %e", res); free(active); free(w); free(v); free(w_gpu); free(v_gpu); free(w_cpu); free(v_cpu); free(w_fl); free(v_fl); free(v_fl_mp); free(w_test); free(v_test); free(a); free(b); //free(res); hipFree(d_w); hipFree(d_v); hipFree(d_x); return (0); }
e2d953a1132aacc67cba1a1f792b92874b16e1d3.cu
/********************************************************************* run-cg.cu Main program. Computes the all the versions of CG and tests each module. **********************************************************************/ #define MAIN_PROGRAM #include "common.h" #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include "global.h" #include "geometry.h" #include "linalg.h" #include "transform.h" #include "cg.h" #include "cg_gpu.h" #include "cg_gpu_sp.h" #include "cg_gpu_mp.h" #include "mp_refinement.h" #include "mp_refinement_cpu.h" #include "cg_pm.h" #include "cg_gpu_pm.h" #include "pow_method.h" #include "pow_method_gpu.h" #include "dot_prod_test.h" #include "cg_test.h" #include "pm_test.h" int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int nBytes, nBytes_fl, status, N; double *w,*w_cpu,*w_gpu,*v,*v_cpu,*v_gpu,*x,*w_fl_d; double *w_pm,*lamb_max,*lamb_min,*v_pm, *w_pm_gpu, *v_pm_gpu,*lamb_max_gpu,*lamb_min_gpu; float *w_fl, *v_fl, *w_fl_mp, *v_fl_mp; double iStart, iElaps; N=256; int dimx = 256; int dimy = 1; if (argc>1) { N=atoi(argv[1]); } if (argc>3) { dimx=atoi(argv[2]); dimy=atoi(argv[3]); } // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // Global variables // Inner points in x- and y-directions Nx=N; Ny=N; // Number of gridpoints npts=(Nx+2)*(Ny+2); // Active points array active_pts(); // Memoryspace per vector in Byte nBytes=npts*sizeof(double); nBytes_fl=npts*sizeof(float); // Host allocation w=(double*)malloc(nBytes); v=(double*)malloc(nBytes); w_cpu=(double*)malloc(nBytes); v_cpu=(double*)malloc(nBytes); w_gpu=(double*)malloc(nBytes); v_gpu=(double*)malloc(nBytes); w_fl=(float*)malloc(nBytes_fl); v_fl=(float*)malloc(nBytes_fl); v_fl_mp=(float*)malloc(nBytes_fl); w_fl_mp=(float*)malloc(nBytes_fl); w_fl_d=(double*)malloc(nBytes); w_pm=(double*)malloc(nBytes); lamb_max=(double*)malloc(nBytes); lamb_min=(double*)malloc(nBytes); v_pm=(double*)malloc(nBytes); w_pm_gpu=(double*)malloc(nBytes); v_pm_gpu=(double*)malloc(nBytes); lamb_max_gpu=(double*)malloc(nBytes); lamb_min_gpu=(double*)malloc(nBytes); // Setting to zero memset(w, 0, nBytes); memset(v, 0, nBytes); memset(w_cpu, 0, nBytes); memset(v_cpu, 0, nBytes); memset(w_gpu, 0, nBytes); memset(v_gpu, 0, nBytes); memset(w_fl, 0, nBytes_fl); memset(v_fl, 0, nBytes_fl); memset(w_fl_mp, 0, nBytes_fl); memset(v_fl_mp, 0, nBytes_fl); memset(w_fl_d, 0, nBytes); memset(w_pm, 0, nBytes); memset(lamb_max, 0, nBytes); memset(lamb_min, 0, nBytes); memset(v_pm, 0, nBytes); memset(w_pm_gpu, 0, nBytes); memset(v_pm_gpu, 0, nBytes); memset(lamb_max_gpu, 0, nBytes); memset(lamb_min_gpu, 0, nBytes); // Active points if ((Nx<=16)&&(Ny<=16)) print_active(); random_vector(w); random_vector(v); assign_v2v(w_cpu,w); assign_v2v(w_gpu,w); assign_v2v(w_pm,w); assign_v2v(w_pm_gpu,w); // Device allocation double *d_v, *d_w, *d_x; CHECK(cudaMalloc((void **)&d_v, nBytes)); CHECK(cudaMalloc((void **)&d_w, nBytes)); // transfer data from host to device CHECK(cudaMemcpy(d_v, v, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_w, w, nBytes, cudaMemcpyHostToDevice)); // invoke kernel at host side // Threads per block block.x=dimx; block.y=dimy; block.z=1; // Blocks per grid grid.x=(Nx + block.x - 1) / block.x; grid.y=(Ny + block.y - 1) / block.y; grid.z=1; // Test reduction int Nunroll=8; if (npts>256 && Nunroll>1) { double cpu_sum=0.0; iStart = seconds(); for (int i = 0; i < npts; i++) cpu_sum += v[i]; iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %f\n", iElaps, cpu_sum); dim3 block2 (256,1); int nblk = (npts + (block2.x*Nunroll) - 1)/(block2.x*Nunroll); dim3 grid2 (nblk,1); CHECK(cudaMalloc((void **)&d_x, nblk*sizeof(double))); CHECK(cudaMemset(d_x,0,nblk*sizeof(double))); x=(double*)malloc(nblk*sizeof(double)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling<<<grid2, block2>>>(d_v, d_x, npts); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(x, d_x, nblk * sizeof(double),cudaMemcpyDeviceToHost)); double gpu_sum = 0.0; for (int i = 0; i < grid2.x; i++) gpu_sum += x[i]; printf("gpu Unrolling elapsed %f sec gpu_sum: %f <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid2.x, block2.x); assert(abs((gpu_sum-cpu_sum)/cpu_sum)<sqrt(npts)*DBL_EPSILON); } double nrm,nrm_mp; float nrm_fl; // Einheitsvektor memset(v, 0, nBytes); v[coord2index(Nx/2,Nx/2)]=1.0; // v=0, ausser am Gitterpunkt (Nx/2+1,Ny/2+1) //print_vector("v",v,1); assign_v2v(v_cpu,v); assign_v2v(v_gpu,v); assign_v2v(v_pm,v); assign_v2v(v_pm_gpu,v); d2fl(w_fl,w); d2fl(v_fl,v); d2fl(w_fl_mp,w); d2fl(v_fl_mp,v); printf("\n"); printf("N = %d\n", N); printf("\n"); // CPU Version printf("CPU\n"); iStart = seconds(); cg(w_cpu,v_cpu,100000,1e-10,&status); iElaps = seconds() - iStart; printf("CG on CPU: %f sec\n", iElaps); nrm=norm_sqr(w_cpu); printf("||x|| = %.8f\n",sqrt(nrm)); printf("\n"); // GPU Version printf("GPU\n"); iStart = seconds(); cg_gpu(w_gpu,v_gpu,100000,1e-10,&status, Nx, Ny, nBytes); iElaps = seconds() - iStart; printf("CG on GPU: %f sec\n", iElaps); nrm=norm_sqr(w_gpu); printf("||x|| = %.8f\n",sqrt(nrm)); printf("\n"); // GPU Version in SP printf("GPU in single precission\n"); iStart = seconds(); cg_gpu_sp(w_fl,v_fl,100000,1e-08,&status, Nx, Ny, nBytes_fl); iElaps = seconds() - iStart; printf("SP-CG on GPU: %f sec\n", iElaps); nrm_fl=norm_sqr_fl(w_fl); printf("||x|| = %.8f\n",sqrt(nrm_fl)); printf("Rel. deviation of x = %e\n",abs((sqrt(nrm_fl)-sqrt(nrm))/sqrt(nrm))); printf("\n"); double *w_mp_gpu, *v_mp_gpu; w_mp_gpu=(double*)malloc(nBytes); v_mp_gpu=(double*)malloc(nBytes); memset(w_mp_gpu, 0, nBytes); memset(v_mp_gpu, 0, nBytes); v_mp_gpu[coord2index(Nx/2,Nx/2)]=1.0; int iterMP = 1000; // Mixed precission printf("Initiating MP refinement (GPU)\n"); iStart = seconds(); mp_refinement(w_mp_gpu,v_mp_gpu,100000,iterMP,1e-10,&status,Nx,Ny); iElaps = seconds() - iStart; printf("MP refinement: %f sec\n", iElaps); nrm_mp=norm_sqr(w_mp_gpu); printf("||x|| = %.8f\n",sqrt(nrm_mp)); printf("Rel. deviation of x = %e\n",abs((sqrt(nrm_mp)-sqrt(nrm))/sqrt(nrm))); printf("\n"); // Power iteration CPU printf("Power iteration CPU\n"); iStart = seconds(); pow_method(w_pm,v_pm,lamb_max,lamb_min,10000,&status); iElaps = seconds() - iStart; printf("PM on CPU: %f sec\n", iElaps); printf("Maximum eigenvalue l_max= %.8f\n",*lamb_max); printf("Minimum eigenvalue l_min= %.8f\n",*lamb_min); printf("Condition number = %.8f\n",(*lamb_max)/(*lamb_min)); printf("\n"); // Power iteration GPU printf("Power iteration GPU\n"); iStart = seconds(); pow_method_gpu(w_pm_gpu,v_pm_gpu,lamb_max_gpu,lamb_min_gpu,100000,&status,Nx,Ny); iElaps = seconds() - iStart; printf("PM on GPU: %f sec\n", iElaps); printf("Maximum eigenvalue l_max= %.8f\n",*lamb_max_gpu); printf("Minimum eigenvalue l_min= %.8f\n",*lamb_min_gpu); printf("Condition number = %.8f\n",(*lamb_max_gpu)/(*lamb_min_gpu)); //-------------------Tests--------------------------------- //--Scalar product------// double *a,*b; a=(double*)malloc(nBytes); b=(double*)malloc(nBytes); random_vector(a); random_vector(b); printf("\n"); printf("\n N = %d \n", N); dot_prod_test(a, b, nBytes); printf("\n"); //--CG------------------// double res=0.0; double *v_test, *w_test; v_test=(double*)malloc(nBytes); memset(v_test, 0, nBytes); w_test=(double*)malloc(nBytes); memset(w_test, 0, nBytes); fl2d(w_test,w_fl); printf("\n"); printf("\n N = %d \n", N); printf("\n"); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_cpu,v_test); printf("\n Norm. residuum (CPU) ||r|| = ||b -Ax||: %e", res); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_gpu,v_test); printf("\n Norm. residuum (GPU) ||r|| = ||b -Ax||: %e", res); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_test,v_test); printf("\n Norm. residuum (SP-GPU) ||r|| = ||b -Ax||: %e", res); v_test[coord2index(Nx/2,Nx/2)]=1.0; res=cg_test(w_mp_gpu,v_test); printf("\n Norm. residuum (MP-GPU) ||r|| = ||b -Ax||: %e", res); printf("\n"); printf("\n N = %d \n", N); //----Power Method-----------// res=pm_test(w_pm,lamb_max); printf("\n Norm. residuum for lambda ||A x - lambda x||: %e", res); free(active); free(w); free(v); free(w_gpu); free(v_gpu); free(w_cpu); free(v_cpu); free(w_fl); free(v_fl); free(v_fl_mp); free(w_test); free(v_test); free(a); free(b); //free(res); cudaFree(d_w); cudaFree(d_v); cudaFree(d_x); return (0); }
1a440a8451c10ca8af2cb3c29fc1a94024524f20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <device_launch_parameters.h> #include <cstdio> extern "C" { __global__ void simpleBfs(int N, int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent, int *changed) { int thid = blockIdx.x * blockDim.x + threadIdx.x; int valueChange = 0; if (thid < N && d_distance[thid] == level) { int u = thid; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (level + 1 < d_distance[v]) { d_distance[v] = level + 1; d_parent[v] = i; valueChange = 1; } } } if (valueChange) { *changed = valueChange; } } __global__ void queueBfs(int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent, int queueSize, int *nextQueueSize, int *d_currentQueue, int *d_nextQueue) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { int u = d_currentQueue[thid]; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (d_distance[v] == INT_MAX && atomicMin(&d_distance[v], level + 1) == INT_MAX) { d_parent[v] = i; int position = atomicAdd(nextQueueSize, 1); d_nextQueue[position] = v; } } } } //Scan bfs __global__ void nextLayer(int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent, int queueSize, int *d_currentQueue) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { int u = d_currentQueue[thid]; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (level + 1 < d_distance[v]) { d_distance[v] = level + 1; d_parent[v] = i; } } } } __global__ void countDegrees(int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_parent, int queueSize, int *d_currentQueue, int *d_degrees) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { int u = d_currentQueue[thid]; int degree = 0; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (d_parent[v] == i && v != u) { ++degree; } } d_degrees[thid] = degree; } } __global__ void scanDegrees(int size, int *d_degrees, int *incrDegrees) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < size) { //write initial values to shared memory __shared__ int prefixSum[1024]; int modulo = threadIdx.x; prefixSum[modulo] = d_degrees[thid]; __syncthreads(); //calculate scan on this block //go up for (int nodeSize = 2; nodeSize <= 1024; nodeSize <<= 1) { if ((modulo & (nodeSize - 1)) == 0) { if (thid + (nodeSize >> 1) < size) { int nextPosition = modulo + (nodeSize >> 1); prefixSum[modulo] += prefixSum[nextPosition]; } } __syncthreads(); } //write information for increment prefix sums if (modulo == 0) { int block = thid >> 10; incrDegrees[block + 1] = prefixSum[modulo]; } //go down for (int nodeSize = 1024; nodeSize > 1; nodeSize >>= 1) { if ((modulo & (nodeSize - 1)) == 0) { if (thid + (nodeSize >> 1) < size) { int next_position = modulo + (nodeSize >> 1); int tmp = prefixSum[modulo]; prefixSum[modulo] -= prefixSum[next_position]; prefixSum[next_position] = tmp; } } __syncthreads(); } d_degrees[thid] = prefixSum[modulo]; } } __global__ void assignVerticesNextQueue(int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_parent, int queueSize, int *d_currentQueue, int *d_nextQueue, int *d_degrees, int *incrDegrees, int nextQueueSize) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { __shared__ int sharedIncrement; if (!threadIdx.x) { sharedIncrement = incrDegrees[thid >> 10]; } __syncthreads(); int sum = 0; if (threadIdx.x) { sum = d_degrees[thid - 1]; } int u = d_currentQueue[thid]; int counter = 0; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (d_parent[v] == i && v != u) { int nextQueuePlace = sharedIncrement + sum + counter; d_nextQueue[nextQueuePlace] = v; counter++; } } } } }
1a440a8451c10ca8af2cb3c29fc1a94024524f20.cu
#include <device_launch_parameters.h> #include <cstdio> extern "C" { __global__ void simpleBfs(int N, int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent, int *changed) { int thid = blockIdx.x * blockDim.x + threadIdx.x; int valueChange = 0; if (thid < N && d_distance[thid] == level) { int u = thid; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (level + 1 < d_distance[v]) { d_distance[v] = level + 1; d_parent[v] = i; valueChange = 1; } } } if (valueChange) { *changed = valueChange; } } __global__ void queueBfs(int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent, int queueSize, int *nextQueueSize, int *d_currentQueue, int *d_nextQueue) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { int u = d_currentQueue[thid]; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (d_distance[v] == INT_MAX && atomicMin(&d_distance[v], level + 1) == INT_MAX) { d_parent[v] = i; int position = atomicAdd(nextQueueSize, 1); d_nextQueue[position] = v; } } } } //Scan bfs __global__ void nextLayer(int level, int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_distance, int *d_parent, int queueSize, int *d_currentQueue) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { int u = d_currentQueue[thid]; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (level + 1 < d_distance[v]) { d_distance[v] = level + 1; d_parent[v] = i; } } } } __global__ void countDegrees(int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_parent, int queueSize, int *d_currentQueue, int *d_degrees) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { int u = d_currentQueue[thid]; int degree = 0; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (d_parent[v] == i && v != u) { ++degree; } } d_degrees[thid] = degree; } } __global__ void scanDegrees(int size, int *d_degrees, int *incrDegrees) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < size) { //write initial values to shared memory __shared__ int prefixSum[1024]; int modulo = threadIdx.x; prefixSum[modulo] = d_degrees[thid]; __syncthreads(); //calculate scan on this block //go up for (int nodeSize = 2; nodeSize <= 1024; nodeSize <<= 1) { if ((modulo & (nodeSize - 1)) == 0) { if (thid + (nodeSize >> 1) < size) { int nextPosition = modulo + (nodeSize >> 1); prefixSum[modulo] += prefixSum[nextPosition]; } } __syncthreads(); } //write information for increment prefix sums if (modulo == 0) { int block = thid >> 10; incrDegrees[block + 1] = prefixSum[modulo]; } //go down for (int nodeSize = 1024; nodeSize > 1; nodeSize >>= 1) { if ((modulo & (nodeSize - 1)) == 0) { if (thid + (nodeSize >> 1) < size) { int next_position = modulo + (nodeSize >> 1); int tmp = prefixSum[modulo]; prefixSum[modulo] -= prefixSum[next_position]; prefixSum[next_position] = tmp; } } __syncthreads(); } d_degrees[thid] = prefixSum[modulo]; } } __global__ void assignVerticesNextQueue(int *d_adjacencyList, int *d_edgesOffset, int *d_edgesSize, int *d_parent, int queueSize, int *d_currentQueue, int *d_nextQueue, int *d_degrees, int *incrDegrees, int nextQueueSize) { int thid = blockIdx.x * blockDim.x + threadIdx.x; if (thid < queueSize) { __shared__ int sharedIncrement; if (!threadIdx.x) { sharedIncrement = incrDegrees[thid >> 10]; } __syncthreads(); int sum = 0; if (threadIdx.x) { sum = d_degrees[thid - 1]; } int u = d_currentQueue[thid]; int counter = 0; for (int i = d_edgesOffset[u]; i < d_edgesOffset[u] + d_edgesSize[u]; i++) { int v = d_adjacencyList[i]; if (d_parent[v] == i && v != u) { int nextQueuePlace = sharedIncrement + sum + counter; d_nextQueue[nextQueuePlace] = v; counter++; } } } } }
bd965ea6839aca97b0a256e35e2390a1fa519dc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/permute_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void PermuteKernel(const int nthreads, Dtype *const bottom_data, const bool forward, const int *permute_order, const int *old_steps, const int *new_steps, const int num_axes, Dtype *const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp_idx = index; int old_idx = 0; for (int i = 0; i < num_axes; ++i) { int order = permute_order[i]; old_idx += (temp_idx / new_steps[i]) * old_steps[order]; temp_idx %= new_steps[i]; } if (forward) { top_data[index] = bottom_data[old_idx]; } else { bottom_data[old_idx] = top_data[index]; } } } template <typename Dtype> void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { if (need_permute_) { Dtype *bottom_data = bottom[0]->mutable_gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); const int *permute_order = permute_order_.gpu_data(); const int *new_steps = new_steps_.gpu_data(); const int *old_steps = old_steps_.gpu_data(); bool foward = true; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PermuteKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, foward, permute_order, old_steps, new_steps, num_axes_, top_data); CUDA_POST_KERNEL_CHECK; } else { // If there is no need to permute, we share data to save memory. top[0]->ShareData(*bottom[0]); } } template <typename Dtype> void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if (need_permute_) { Dtype *top_diff = top[0]->mutable_gpu_diff(); Dtype *bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); const int *permute_order = permute_order_.gpu_data(); const int *new_steps = new_steps_.gpu_data(); const int *old_steps = old_steps_.gpu_data(); bool foward = false; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PermuteKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_diff, foward, permute_order, old_steps, new_steps, num_axes_, top_diff); CUDA_POST_KERNEL_CHECK; } else { // If there is no need to permute, we share diff to save memory. bottom[0]->ShareDiff(*top[0]); } } INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer); } // namespace caffe
bd965ea6839aca97b0a256e35e2390a1fa519dc8.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/permute_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void PermuteKernel(const int nthreads, Dtype *const bottom_data, const bool forward, const int *permute_order, const int *old_steps, const int *new_steps, const int num_axes, Dtype *const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int temp_idx = index; int old_idx = 0; for (int i = 0; i < num_axes; ++i) { int order = permute_order[i]; old_idx += (temp_idx / new_steps[i]) * old_steps[order]; temp_idx %= new_steps[i]; } if (forward) { top_data[index] = bottom_data[old_idx]; } else { bottom_data[old_idx] = top_data[index]; } } } template <typename Dtype> void PermuteLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { if (need_permute_) { Dtype *bottom_data = bottom[0]->mutable_gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); const int *permute_order = permute_order_.gpu_data(); const int *new_steps = new_steps_.gpu_data(); const int *old_steps = old_steps_.gpu_data(); bool foward = true; // NOLINT_NEXT_LINE(whitespace/operators) PermuteKernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, foward, permute_order, old_steps, new_steps, num_axes_, top_data); CUDA_POST_KERNEL_CHECK; } else { // If there is no need to permute, we share data to save memory. top[0]->ShareData(*bottom[0]); } } template <typename Dtype> void PermuteLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { if (need_permute_) { Dtype *top_diff = top[0]->mutable_gpu_diff(); Dtype *bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); const int *permute_order = permute_order_.gpu_data(); const int *new_steps = new_steps_.gpu_data(); const int *old_steps = old_steps_.gpu_data(); bool foward = false; // NOLINT_NEXT_LINE(whitespace/operators) PermuteKernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_diff, foward, permute_order, old_steps, new_steps, num_axes_, top_diff); CUDA_POST_KERNEL_CHECK; } else { // If there is no need to permute, we share diff to save memory. bottom[0]->ShareDiff(*top[0]); } } INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer); } // namespace caffe
a5f14760585d0ef35dc847c57a27eb486d706b35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This code contains NVIDIA Confidential Information and is disclosed to you // under a form of NVIDIA software license agreement provided separately to you. // // Notice // NVIDIA Corporation and its licensors retain all intellectual property and // proprietary rights in and to this software and related documentation and // any modifications thereto. Any use, reproduction, disclosure, or // distribution of this software and related documentation without an express // license agreement from NVIDIA Corporation is strictly prohibited. // // ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES // NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO // THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, // MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. // // Information and code furnished is believed to be accurate and reliable. // However, NVIDIA Corporation assumes no responsibility for the consequences of use of such // information or for any infringement of patents or other rights of third parties that may // result from its use. No license is granted by implication or otherwise under any patent // or patent rights of NVIDIA Corporation. Details are subject to change without notice. // This code supersedes and replaces all information previously supplied. // NVIDIA Corporation products are not authorized for use as critical // components in life support devices or systems without express written approval of // NVIDIA Corporation. // // Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "CuSolverKernel.h" #include "CuClothData.h" #include "CuPhaseConfig.h" #include <new> // placement new /* For detailed comments about the algorithm check SwSolverKernel.cpp (or the documentation) The CPU implementation is generally easier to read, and comments are not duplicated in other implementations. Only CUDA implementation specific comments are left in this implementation. */ #ifndef FLT_EPSILON #define FLT_EPSILON 1.192092896e-07F #endif #ifndef FLT_MAX #define FLT_MAX 3.402823466e+38F #endif // Converting pointers to shared/global addresses is faster than doing generic loads on SM50 #define CONVERT_ADDRESSES (__CUDA_ARCH__ >= 500) #if !defined(_WIN64) && !defined(__x86_64__) #define POINTER_CONSTRAINT "r" #define POINTER_TYPE "u32" #else #define POINTER_CONSTRAINT "l" #define POINTER_TYPE "u64" #endif #ifndef __CUDA_ARCH__ #define assert(x) #endif extern "C" { extern _CRTIMP __host__ __device__ int __cdecl printf(const char*, ...); } using namespace nv; // shared memory copy (instead of relying on constant cache) __shared__ cloth::CuClothData gClothData; __shared__ cloth::CuFrameData gFrameData; __shared__ cloth::CuIterationData gIterData; // Our way to create stream local variables __shared__ void* gProfileBuffer; __shared__ uint32_t gProfileBaseId; static const uint32_t gCuClothDataSize = sizeof(cloth::CuClothData) / sizeof(float); static const uint32_t gCuFrameDataSize = sizeof(cloth::CuFrameData) / sizeof(float); static const uint32_t gCuIterationDataSize = sizeof(cloth::CuIterationData) / sizeof(float); static const uint32_t gCuPhaseConfigSize = sizeof(cloth::CuPhaseConfig) / sizeof(float); /* Memory block for all temporary data in shared memory (in 'allocation' order). The numbers indicate the allocation slot if used a stack allocator. 0) simulate*()::configs (numPhases*sizeof(CuPhaseConfig)) 1) simulate*()::particles ({0,1,2}*4*numParticles floats) 2) CuCollision::mCapsuleIndices, mCapsuleMasks, mConvexMasks (numCapsules*4+numConvexes ints) 3) CuCollision::mPrevData (4*numSpheres+10*numCones floats) 4) CuCollision::collideConvexes() (4*numPlanes floats) 4) CuCollision::collideTriangles() (19*numTriangles floats) 4) CuCollision::mCurData::Spheres (4*numSpheres floats) 5) computeParticleBounds()::dst (192 floats written, 208 float read) 5) computeSphereBounds()::dst (192 floats written, 208 floats read) 5) CuCollision::mCurData::Cones (10*numCones floats) 6) CuCollision::mShapeGrid (2*6*sGridSize=96 floats) 4) CuSelfCollision::buildAcceleration()::buffer (34*16=544 ints) */ extern __shared__ float gSharedMemory[]; extern __shared__ int32_t gSharedSigned[]; extern __shared__ uint32_t gSharedUnsigned[]; /***************** Pointer Wrappers **********************/ enum AddressSpace { Shared, Global }; template <AddressSpace, typename T> __device__ T load(const T* ptr); template <AddressSpace, typename T> __device__ void store(T* ptr, const T& value); #if !CONVERT_ADDRESSES template <AddressSpace, typename T> __device__ T load(const T* ptr) { return *ptr; } template <AddressSpace, typename T> __device__ void store(T* ptr, const T& value) { *ptr = value; } #else template <> __device__ float load<Shared>(const float* ptr) { float value; asm("ld.shared.f32 %0, [%1];" : "=f"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ int32_t load<Shared>(const int32_t* ptr) { int32_t value; asm("ld.shared.s32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ uint32_t load<Shared>(const uint32_t* ptr) { uint32_t value; asm("ld.shared.u32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ void store<Shared>(int32_t* ptr, const int32_t& value) { asm("st.shared.s32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } template <> __device__ void store<Shared>(float* ptr, const float& value) { asm("st.shared.f32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "f"(value) : "memory"); } template <> __device__ void store<Shared>(uint32_t* ptr, const uint32_t& value) { asm("st.shared.u32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } template <> __device__ float load<Global>(const float* ptr) { float value; asm("ld.global.f32 %0, [%1];" : "=f"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ int32_t load<Global>(const int32_t* ptr) { int32_t value; asm("ld.global.s32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ uint32_t load<Global>(const uint32_t* ptr) { uint32_t value; asm("ld.global.u32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ void store<Global>(int32_t* ptr, const int32_t& value) { asm("st.global.s32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } template <> __device__ void store<Global>(float* ptr, const float& value) { asm("st.global.f32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "f"(value) : "memory"); } template <> __device__ void store<Global>(uint32_t* ptr, const uint32_t& value) { asm("st.global.u32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } #endif template <AddressSpace, typename> struct Pointer; template <AddressSpace S, typename T> struct Reference { template <AddressSpace, typename> friend struct Reference; friend struct Pointer<S, T>; __device__ Reference() { } __device__ Reference(const Reference& other) : mPtr(other.mPtr) { } template <typename U> __device__ Reference(const Reference<S, U>& other) : mPtr(other.mPtr) { } __device__ Reference& operator = (const Reference& other) { return *this = static_cast<T>(other); } template <typename U> __device__ Reference& operator = (const Reference<S, U>& other) { return *this = static_cast<U>(other); } __device__ Reference& operator += (const T& value) { return *this = *this + value; } __device__ Reference& operator |= (const T& value) { return *this = *this | value; } __device__ Reference& operator &= (const T& value) { return *this = *this & value; } __device__ Reference& operator *= (const T& value) { return *this = *this * value; } __device__ operator T() const { return load<S>(mPtr); } __device__ Reference& operator = (const T& value) { store<S>(mPtr, value); return *this; } //private: T* mPtr; __device__ explicit Reference(T& ref) : mPtr(&ref) { } template <typename U> friend __device__ void atomicAdd(Reference& ref, U value) { ::atomicAdd(ref.mPtr, value); } }; template <AddressSpace S, typename T> struct Convert { static __device__ T* from(T* ptr) { return ptr; } static __device__ T* to(T* ptr) { return ptr; } }; #if CONVERT_ADDRESSES template <typename T> struct Convert<Shared, T> { static __device__ T* from(T* ptr) { asm("cvta.shared." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } static __device__ T* to(T* ptr) { asm("cvta.to.shared." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } }; template <typename T> struct Convert<Global, T> { static __device__ T* from(T* ptr) { asm("cvta.global." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } static __device__ T* to(T* ptr) { asm("cvta.to.global." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } }; #endif template <AddressSpace S, typename T> __device__ T* generic(const Pointer<S, T>&); // pointer forced to point to shared memory (only works for sizeof(T) <= 4) template <AddressSpace S, typename T> struct Pointer { template <AddressSpace, typename> friend struct Pointer; friend __device__ T* generic<S, T>(const Pointer<S, T>&); friend struct GlobalParticleData; __device__ Pointer() { } __device__ Pointer(const Pointer& other) : mPtr(other.mPtr) { } template <typename U> __device__ Pointer(const Pointer<S, U>& other) : mPtr(other.mPtr) { } // construct from generic pointer __device__ explicit Pointer(T* ptr) : mPtr(Convert<S, T>::to(ptr)) { } __device__ bool operator!=(const Pointer& other) const { return mPtr != other.mPtr; } __device__ bool operator<(const Pointer& other) const { return mPtr < other.mPtr; } __device__ Pointer operator + (ptrdiff_t i) const { return Pointer(*this) += i; } __device__ Pointer& operator += (ptrdiff_t i) { mPtr += i * stride(); return *this; } __device__ Pointer operator - (ptrdiff_t i) const { return Pointer(*this) -= i; } __device__ Pointer& operator -= (ptrdiff_t i) { mPtr -= i * stride(); return *this; } __device__ Pointer& operator ++ () { mPtr += stride(); return *this; } __device__ Pointer& operator -- () { mPtr -= stride(); return *this; } __device__ Reference<S, T> operator*() const { return Reference<S, T>(*mPtr); } __device__ Reference<S, T> operator[](int32_t i) const { return Reference<S, T>(mPtr[i * stride()]); } private: // convert back to generic pointer, private for safety, use generic() instead __device__ operator T*() const { return Convert<S, T>::from(mPtr); } __device__ static size_t stride() { return 1; } template <typename U> __device__ Pointer(const Pointer<S, U>& other, ptrdiff_t stridedOffset) : mPtr(other.mPtr + stridedOffset) { } T* mPtr; }; // pointers to global memory are all referring to particle data // stored as array of structs, so they have a stride of 4. template<> __device__ size_t Pointer<Global, float>::stride() { return 4; } template<> __device__ size_t Pointer<Global, const float>::stride() { return 4; } template <AddressSpace S, typename T> __device__ T* generic(const Pointer<S, T>& ptr) { return ptr; } #if !CONVERT_ADDRESSES template <typename T> __device__ T* generic(T* ptr) { return ptr; } #endif /***************** Particle Data **********************/ template <typename T> struct SharedParticleReference { __device__ operator float3() const { float3 result; result.x = mReferences[0]; result.y = mReferences[1]; result.z = mReferences[2]; return result; } __device__ SharedParticleReference& operator = (const float3& vec) { mReferences[0] = vec.x; mReferences[1] = vec.y; mReferences[2] = vec.z; return *this; } __device__ operator float4() const { float4 result; result.x = mReferences[0]; result.y = mReferences[1]; result.z = mReferences[2]; result.w = mReferences[3]; return result; } __device__ SharedParticleReference& operator = (const float4& vec) { mReferences[0] = vec.x; mReferences[1] = vec.y; mReferences[2] = vec.z; mReferences[3] = vec.w; return *this; } Reference<Shared, T> mReferences[4]; }; struct SharedParticleData { typedef float3 VectorType; typedef Pointer<Shared, float> PointerType; typedef Pointer<Shared, const float> ConstPointerType; typedef Reference<Shared, float> ReferenceType; typedef Reference<Shared, const float> ConstReferenceType; typedef SharedParticleReference<float> ParticleReferenceType; typedef SharedParticleReference<const float> ParticleConstReferenceType; __device__ ReferenceType operator()(int32_t index, int32_t element) { return mPointers[element][index]; } __device__ ConstReferenceType operator()(int32_t index, int32_t element) const { return mPointers[element][index]; } __device__ ParticleReferenceType operator()(int32_t index) { ParticleReferenceType result = { mPointers[0][index], mPointers[1][index], mPointers[2][index], mPointers[3][index] }; return result; } __device__ ParticleConstReferenceType operator()(int32_t index) const { ParticleConstReferenceType result = { mPointers[0][index], mPointers[1][index], mPointers[2][index], mPointers[3][index] }; return result; } __device__ const PointerType& operator[](int32_t element) { return mPointers[element]; } __device__ ConstPointerType operator[](int32_t element) const { return mPointers[element]; } PointerType mPointers[4]; }; template <typename T> struct GlobalParticleReference { __device__ GlobalParticleReference(Pointer<Global, T> ref) : mPtr(reinterpret_cast<T* const&>(ref)) { } #if CONVERT_ADDRESSES __device__ operator float4() const { float4 vec; asm("ld.global.v4.f32 {%0, %1, %2, %3}, [%4];" : "=f"(vec.x), "=f"(vec.y), "=f"(vec.z), "=f"(vec.w) : POINTER_CONSTRAINT(mPtr)); return vec; } __device__ GlobalParticleReference& operator = (const float4& vec) { asm("st.global.v4.f32 [%0], {%1, %2, %3, %4};" ::POINTER_CONSTRAINT(mPtr), "f"(vec.x), "f"(vec.y), "f"(vec.z), "f"(vec.w) : "memory"); return *this; } __device__ operator float3() const { float4 vec = *this; return make_float3(vec.x, vec.y, vec.z); } #else __device__ operator float4() const { return *reinterpret_cast<const float4*>(mPtr); } __device__ GlobalParticleReference& operator = (const float4& vec) { *reinterpret_cast<float4*>(mPtr) = vec; return *this; } __device__ operator float3() const { return *reinterpret_cast<const float3*>(mPtr); } __device__ GlobalParticleReference& operator = (const float3& vec) { *reinterpret_cast<float3*>(mPtr) = vec; return *this; } #endif T* mPtr; // pointer to global address }; struct GlobalParticleData { #if CONVERT_ADDRESSES // ld.global.v4 saturates memory bandwidth better than 3x ld.global typedef float4 VectorType; #else // the same isn't true for ld without state space typedef float3 VectorType; #endif typedef Pointer<Global, float> PointerType; typedef Pointer<Global, const float> ConstPointerType; typedef Reference<Global, float> ReferenceType; typedef Reference<Global, const float> ConstReferenceType; typedef GlobalParticleReference<float> ParticleReferenceType; typedef GlobalParticleReference<const float> ParticleConstReferenceType; __device__ ReferenceType operator()(int32_t index, int32_t element) { return *PointerType(mPtr, index * 4 + element); } __device__ ConstReferenceType operator()(int32_t index, int32_t element) const { return *ConstPointerType(mPtr, index * 4 + element); } __device__ ParticleReferenceType operator()(int32_t index) { return PointerType(mPtr, index * 4); } __device__ ParticleConstReferenceType operator()(int32_t index) const { return ConstPointerType(mPtr, index * 4); } __device__ PointerType operator[](int32_t element) { return PointerType(mPtr, element); } __device__ ConstPointerType operator[](int32_t element) const { return ConstPointerType(mPtr, element); } PointerType mPtr; }; /***************** Profiling **********************/ struct ProfileDisabledZone { __device__ ProfileDisabledZone(cloth::CuProfileZoneIds::Enum) { } }; #if defined(__CUDA_ARCH__) && defined(PX_PROFILE) // profile zones enabled for profile build // code below is copied from GPUProfile.h and needs to be kept in sync. #define NUM_WARPS_PER_PROFILE_BUFFER (4 * 1024 * 1024) struct __align__(16) WarpProfileEvent { __device__ WarpProfileEvent(uint16_t id) : block(blockIdx.x + gridDim.x * blockIdx.y), warp(threadIdx.x >> 5), userData(0), eventId(id) { uint32_t smid32, warpid32; asm volatile("mov.u32 %0, %smid;" : "=r"(smid32)); asm volatile("mov.u32 %0, %warpid;" : "=r"(warpid32)); asm volatile("mov.u32 %0, %clock;" : "=r"(startTime)); smid = smid32; warpid = warpid32; endTime = startTime; } uint16_t block; uint8_t warp; uint8_t smid; uint8_t warpid; uint8_t userData; uint16_t eventId; uint32_t startTime; uint32_t endTime; }; struct ProfileZone { __device__ ProfileZone(cloth::CuProfileZoneIds::Enum id) : mEvent(0) { if (!gProfileBuffer || threadIdx.x & 0x1f) return; // +1: first entry reserved for counter uint32_t index = atomicAdd(reinterpret_cast<uint32_t*>(gProfileBuffer), 1) + 1; if (index >= NUM_WARPS_PER_PROFILE_BUFFER) return; mEvent = reinterpret_cast<WarpProfileEvent*>(gProfileBuffer) + index; new (mEvent) WarpProfileEvent(gProfileBaseId + id); } __device__ ~ProfileZone() { if (mEvent) mEvent->endTime = clock(); } WarpProfileEvent* mEvent; }; #else typedef ProfileDisabledZone ProfileZone; #endif #if 1 // set to 1 to enable detailed profile zones typedef ProfileZone ProfileDetailZone; #else typedef ProfileDisabledZone ProfileDetailZone; #endif namespace { // cut down version of thrust::uninitialized // avoids warning about non-empty c'tor template <typename T> struct uninitialized { __device__ inline T& get() { return *reinterpret_cast<T*>(data); } // maximum alignment required by device code is 16 __align__(16) unsigned char data[sizeof(T)]; }; } #if __CUDA_ARCH__ < 320 namespace { template <typename T> __device__ T __ldg(const T* __restrict ptr) { return *ptr; } } #endif #define CU_SOLVER_KERNEL_CU #include "CuCollision.h" #include "CuSelfCollision.h" namespace { __device__ void loadIterData(const cloth::CuIterationData* __restrict iterData) { if (threadIdx.x < gCuIterationDataSize) { gIterData.mIntegrationTrafo[threadIdx.x] = __ldg(iterData->mIntegrationTrafo + threadIdx.x); } } // integrate particle positions and store transposed template <bool IsTurning, typename CurrentT, typename PreviousT> __device__ void integrateParticles(CurrentT& current, PreviousT& previous) { ProfileDetailZone zone(cloth::CuProfileZoneIds::INTEGRATE); const float* __restrict trafo = gIterData.mIntegrationTrafo; for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 prev = previous(i); float4 next = current(i); float4 cur = { next.x, next.y, next.z, prev.w }; if (next.w == 0.0f) next.w = prev.w; if (next.w > 0.0f) { if (IsTurning) { next.x = next.x + trafo[3] + cur.x * trafo[15] + prev.x * trafo[6] + cur.y * trafo[16] + prev.y * trafo[7] + cur.z * trafo[17] + prev.z * trafo[8]; next.y = next.y + trafo[4] + cur.x * trafo[18] + prev.x * trafo[9] + cur.y * trafo[19] + prev.y * trafo[10] + cur.z * trafo[20] + prev.z * trafo[11]; next.z = next.z + trafo[5] + cur.x * trafo[21] + prev.x * trafo[12] + cur.y * trafo[22] + prev.y * trafo[13] + cur.z * trafo[23] + prev.z * trafo[14]; } else { next.x += (cur.x - prev.x) * trafo[6] + trafo[3]; next.y += (cur.y - prev.y) * trafo[9] + trafo[4]; next.z += (cur.z - prev.z) * trafo[12] + trafo[5]; } cur.x += trafo[0]; cur.y += trafo[1]; cur.z += trafo[2]; } current(i) = next; previous(i) = cur; } } template <typename CurrentT, typename PreviousT> __device__ void integrateParticles(CurrentT& current, PreviousT& previous) { if (gIterData.mIsTurning) integrateParticles<true>(current, previous); else integrateParticles<false>(current, previous); } template <typename CurrentT> __device__ void accelerateParticles(CurrentT& current) { // might be better to move this into integrate particles const float* __restrict accelerations = gFrameData.mParticleAccelerations; if (!accelerations) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::ACCELERATE); __syncthreads(); // looping with 4 instead of 1 thread per particle float sqrIterDt = ~threadIdx.x & 0x3 ? gFrameData.mIterDt * gFrameData.mIterDt : 0.0f; typename CurrentT::PointerType sharedCurPos = current[threadIdx.x % 4]; for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) { // turning this into __ldg slows kernel down even without particle accelerations (!) if (current(i / 4, 3) > 0.0f) sharedCurPos[i / 4] += accelerations[i] * sqrIterDt; } __syncthreads(); } __device__ float3 operator + (const float3& u, const float3& v) { return make_float3(u.x + v.x, u.y + v.y, u.z + v.z); } __device__ float3 operator - (const float3& u, const float3& v) { return make_float3(u.x - v.x, u.y - v.y, u.z - v.z); } __device__ float3 operator*(float s, const float3& v) { return make_float3(v.x * s, v.y * s, v.z * s); } __device__ float dot3(const float3& u, const float3& v) { return u.x * v.x + u.y * v.y + u.z * v.z; } __device__ float3 cross3(const float3& u, const float3& v) { return make_float3(u.y * v.z - u.z * v.y, u.z * v.x - u.x * v.z, u.x * v.y - u.y * v.x); } __device__ void applyImpulse(SharedParticleData::ParticleReferenceType pos, const float3& impulse) { float scale = -pos.mReferences[3]; #if CONVERT_ADDRESSES // Use this instead of atomicAdd function to work around compiler issue treating the pointer as global memory instead of shared memory asm("red.shared.add.f32 [%0], %1;" ::POINTER_CONSTRAINT(pos.mReferences[0].mPtr), "f"(impulse.x * scale)); asm("red.shared.add.f32 [%0], %1;" ::POINTER_CONSTRAINT(pos.mReferences[1].mPtr), "f"(impulse.y * scale)); asm("red.shared.add.f32 [%0], %1;" ::POINTER_CONSTRAINT(pos.mReferences[2].mPtr), "f"(impulse.z * scale)); #else atomicAdd(pos.mReferences[0].mPtr, impulse.x * scale); atomicAdd(pos.mReferences[1].mPtr, impulse.y * scale); atomicAdd(pos.mReferences[2].mPtr, impulse.z * scale); #endif } __device__ void applyImpulse(GlobalParticleData::ParticleReferenceType pos, const float3& impulse) { float scale = -pos.mPtr[3]; atomicAdd(pos.mPtr + 0, impulse.x * scale); atomicAdd(pos.mPtr + 1, impulse.y * scale); atomicAdd(pos.mPtr + 2, impulse.z * scale); } template <bool IsTurning, typename CurrentT, typename PreviousT> __device__ void applyWind(CurrentT& current, PreviousT& previous) { const float dragCoefficient = gFrameData.mDragCoefficient; const float liftCoefficient = gFrameData.mLiftCoefficient; const float fluidDensity = gFrameData.mFluidDensity; const float itrDt = gFrameData.mIterDt; if (dragCoefficient == 0.0f && liftCoefficient == 0.0f) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::WIND); const float oneThird = 1 / 3.0f; float3 wind = make_float3(gIterData.mWind[0], gIterData.mWind[1], gIterData.mWind[2]); const uint16_t* tIt = gClothData.mTriangles; for (int32_t i = threadIdx.x; i < gClothData.mNumTriangles; i += blockDim.x) { uint16_t i0 = tIt[i * 3 + 0]; uint16_t i1 = tIt[i * 3 + 1]; uint16_t i2 = tIt[i * 3 + 2]; float3 c0 = current(i0); float3 c1 = current(i1); float3 c2 = current(i2); // float w1 = current(i0, 3); // float w2 = current(i1, 3); // float w2 = current(i2, 3); // // float wMult = w1 * w2 * w3; // float invMass = wMult < FLT_EPSILON ? 0.f : w1 * w2 * w3 / (w1 * w2 + w1 * w3 + w2 * w3); float3 p0 = previous(i0); float3 p1 = previous(i1); float3 p2 = previous(i2); float3 cur = oneThird * (c0 + c1 + c2); float3 prev = oneThird * (p0 + p1 + p2); float3 delta = cur - prev + wind; if (IsTurning) { const float3* rot = reinterpret_cast<const float3*>(gFrameData.mRotation); float3 d = wind - prev; delta = cur + d.x * rot[0] + d.y * rot[1] + d.z * rot[2]; } float3 normal = cross3(c2 - c0, c1 - c0); const float doubleArea = sqrtf(dot3(normal, normal)); normal = (1.0f / doubleArea) * normal; float invSqrScale = dot3(delta, delta); float scale = rsqrtf(invSqrScale); float deltaLength = sqrtf(invSqrScale); float cosTheta = dot3(normal, delta) * scale; float sinTheta = sqrtf(max(0.0f, 1.0f - cosTheta * cosTheta)); float3 liftDir = cross3(cross3(delta, normal), scale * delta); float3 lift = liftCoefficient * cosTheta * sinTheta * ((deltaLength / itrDt) * liftDir); float3 drag = dragCoefficient * abs(cosTheta) * ((deltaLength / itrDt) * delta); float3 impulse = invSqrScale < FLT_EPSILON ? make_float3(0.0f, 0.0f, 0.0f) : fluidDensity * doubleArea * (lift + drag); applyImpulse(current(i0), impulse); applyImpulse(current(i1), impulse); applyImpulse(current(i2), impulse); } __syncthreads(); } template <typename CurrentT, typename PreviousT> __device__ void applyWind(CurrentT& current, PreviousT& previous) { if (gIterData.mIsTurning) applyWind<true>(current, previous); else applyWind<false>(current, previous); } template <typename CurrentT> __device__ void constrainTether(CurrentT& current) { if (0.0f == gFrameData.mTetherConstraintStiffness || !gClothData.mNumTethers) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::TETHER); int32_t numParticles = gClothData.mNumParticles; int32_t numTethers = gClothData.mNumTethers; assert(0 == numTethers % numParticles); float stiffness = numParticles * __fdividef(gFrameData.mTetherConstraintStiffness, numTethers); float scale = gClothData.mTetherConstraintScale; const uint32_t* __restrict tIt = reinterpret_cast<const uint32_t*>(gClothData.mTethers); for (int32_t i = threadIdx.x; i < numParticles; i += blockDim.x) { float posX = current(i, 0); float posY = current(i, 1); float posZ = current(i, 2); float offsetX = 0.0f; float offsetY = 0.0f; float offsetZ = 0.0f; for (int32_t j = i; j < numTethers; j += gClothData.mNumParticles) { uint32_t tether = __ldg(tIt + j); int32_t anchor = tether & 0xffff; float deltaX = current(anchor, 0) - posX; float deltaY = current(anchor, 1) - posY; float deltaZ = current(anchor, 2) - posZ; float sqrLength = FLT_EPSILON + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ; float radius = (tether >> 16) * scale; float slack = 1.0f - radius * rsqrtf(sqrLength); if (slack > 0.0f) { offsetX += deltaX * slack; offsetY += deltaY * slack; offsetZ += deltaZ * slack; } } current(i, 0) = posX + offsetX * stiffness; current(i, 1) = posY + offsetY * stiffness; current(i, 2) = posZ + offsetZ * stiffness; } } template <typename CurrentT> __device__ void solveFabric(CurrentT& current) { ProfileDetailZone zone(cloth::CuProfileZoneIds::FABRIC); const cloth::CuPhaseConfig* __restrict cIt = (cloth::CuPhaseConfig*)gSharedMemory; const cloth::CuPhaseConfig* cEnd = cIt + gClothData.mNumPhases; for (; cIt != cEnd; ++cIt) { __syncthreads(); ProfileDetailZone zone(cloth::CuProfileZoneIds::CONSTRAINT_SET); int32_t numConstraints = cIt->mNumConstraints; if (threadIdx.x >= numConstraints) continue; const uint32_t* __restrict iIt = reinterpret_cast<const uint32_t*>(cIt->mIndices) + threadIdx.x; const float* restvalues = cIt->mRestvalues; const float* rIt = restvalues + threadIdx.x; const float* rEnd = restvalues + numConstraints; const float* stIt = cIt->mStiffnessValues + threadIdx.x; bool useStiffnessPerConstraint = cIt->mStiffnessValues!=nullptr; uint32_t vpijPrefetch = __ldg(iIt); float rijPrefetch = __ldg(rIt); float stijPrefetch; if (useStiffnessPerConstraint) stijPrefetch = __ldg(stIt); float stiffness = cIt->mStiffness; float stiffnessMultiplier = cIt->mStiffnessMultiplier; float compressionLimit = cIt->mCompressionLimit; float stretchLimit = cIt->mStretchLimit; do { rIt += blockDim.x; iIt += blockDim.x; stIt += blockDim.x; int32_t vpi = USHRT_MAX & vpijPrefetch; int32_t vpj = USHRT_MAX & vpijPrefetch >> 16; float rij = rijPrefetch; float stij = useStiffnessPerConstraint?1.0f - exp2f(stijPrefetch * gFrameData.mStiffnessExponent):stiffness; if (rIt < rEnd) { vpijPrefetch = __ldg(iIt); rijPrefetch = __ldg(rIt); if (useStiffnessPerConstraint) stijPrefetch = __ldg(stIt); } float vxi = current(vpi, 0); float vyi = current(vpi, 1); float vzi = current(vpi, 2); float vwi = current(vpi, 3); float vxj = current(vpj, 0); float vyj = current(vpj, 1); float vzj = current(vpj, 2); float vwj = current(vpj, 3); float hxij = vxj - vxi; float hyij = vyj - vyi; float hzij = vzj - vzi; float e2ij = FLT_EPSILON + hxij * hxij + hyij * hyij + hzij * hzij; float negErij = rij > FLT_EPSILON ? -1.0f + rij * rsqrtf(e2ij) : 0.0f; negErij = negErij + stiffnessMultiplier * max(compressionLimit, min(-negErij, stretchLimit)); float negExij = __fdividef(negErij * stij, FLT_EPSILON + vwi + vwj); float vmi = -vwi * negExij; current(vpi, 0) = vxi + vmi * hxij; current(vpi, 1) = vyi + vmi * hyij; current(vpi, 2) = vzi + vmi * hzij; float vmj = +vwj * negExij; current(vpj, 0) = vxj + vmj * hxij; current(vpj, 1) = vyj + vmj * hyij; current(vpj, 2) = vzj + vmj * hzij; } while (rIt < rEnd); } __syncthreads(); } template <typename CurrentT> __device__ void constrainMotion(CurrentT& current, float alpha) { if (!gFrameData.mStartMotionConstraints) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::MOTION); // negative because of fused multiply-add optimization float negativeScale = -gClothData.mMotionConstraintScale; float negativeBias = -gClothData.mMotionConstraintBias; const float4* startIt = reinterpret_cast<const float4*>(gFrameData.mStartMotionConstraints); const float4* targetIt = reinterpret_cast<const float4*>(gFrameData.mTargetMotionConstraints); for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 startPos = __ldg(startIt + i); float4 targetPos = __ldg(targetIt + i); float sphereX = startPos.x + (targetPos.x - startPos.x) * alpha; float sphereY = startPos.y + (targetPos.y - startPos.y) * alpha; float sphereZ = startPos.z + (targetPos.z - startPos.z) * alpha; float sphereW = startPos.w + (targetPos.w - startPos.w) * alpha; float dx = sphereX - current(i, 0); float dy = sphereY - current(i, 1); float dz = sphereZ - current(i, 2); float sqrLength = FLT_EPSILON + dx * dx + dy * dy + dz * dz; float negativeRadius = min(0.0f, sphereW * negativeScale + negativeBias); float slack = max(negativeRadius * rsqrtf(sqrLength) + 1.0f, 0.0f) * gFrameData.mMotionConstraintStiffness; current(i, 0) += slack * dx; current(i, 1) += slack * dy; current(i, 2) += slack * dz; // set invMass to zero if radius is zero if (negativeRadius >= 0.0f) current(i, 3) = 0.0f; } } template <typename T> __device__ void constrainSeparation(T& current, float alpha) { if (!gFrameData.mStartSeparationConstraints) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::SEPARATION); const float4* startIt = reinterpret_cast<const float4*>(gFrameData.mStartSeparationConstraints); const float4* targetIt = reinterpret_cast<const float4*>(gFrameData.mTargetSeparationConstraints); for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 startPos = __ldg(startIt + i); float4 targetPos = __ldg(targetIt + i); float sphereX = startPos.x + (targetPos.x - startPos.x) * alpha; float sphereY = startPos.y + (targetPos.y - startPos.y) * alpha; float sphereZ = startPos.z + (targetPos.z - startPos.z) * alpha; float sphereW = startPos.w + (targetPos.w - startPos.w) * alpha; float dx = sphereX - current(i, 0); float dy = sphereY - current(i, 1); float dz = sphereZ - current(i, 2); float sqrLength = FLT_EPSILON + dx * dx + dy * dy + dz * dz; float slack = min(0.0f, 1.0f - sphereW * rsqrtf(sqrLength)); current(i, 0) += slack * dx; current(i, 1) += slack * dy; current(i, 2) += slack * dz; } } template <typename CurrentT, typename PreviousT> __device__ void updateSleepState(const CurrentT& current, const PreviousT& previous) { ProfileDetailZone zone(cloth::CuProfileZoneIds::SLEEP); if (!threadIdx.x) gFrameData.mSleepTestCounter += max(1, uint32_t(gFrameData.mIterDt * 1000)); __syncthreads(); if (gFrameData.mSleepTestCounter < gClothData.mSleepTestInterval) return; float maxDelta = 0.0f; for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 prev = previous(i); maxDelta = max(fabsf(current(i, 0) - prev.x), maxDelta); maxDelta = max(fabsf(current(i, 1) - prev.y), maxDelta); maxDelta = max(fabsf(current(i, 2) - prev.z), maxDelta); } if (!threadIdx.x) { ++gFrameData.mSleepPassCounter; gFrameData.mSleepTestCounter -= gClothData.mSleepTestInterval; } __syncthreads(); if (maxDelta > gClothData.mSleepThreshold * gFrameData.mIterDt) gFrameData.mSleepPassCounter = 0; } template <typename CurrentT, typename PreviousT> __device__ void simulateCloth(CurrentT& current, PreviousT& previous) { // apply exponent to phase configs assert(blockDim.x >= gClothData.mNumPhases); if (threadIdx.x < gClothData.mNumPhases) { float exponent = gFrameData.mStiffnessExponent; float* ptr = gSharedMemory + threadIdx.x * gCuPhaseConfigSize; ptr[0] = 1.0f - exp2f(ptr[0] * exponent); ptr[1] = 1.0f - exp2f(ptr[1] * exponent); } uint32_t numIterations = gFrameData.mNumIterations; float invNumIterations = __fdividef(1.0f, numIterations); const cloth::CuIterationData* iterData = gFrameData.mIterationData; const cloth::CuIterationData* iterEnd = iterData + numIterations; loadIterData(iterData); __syncthreads(); for (float alpha = invNumIterations; iterData++ != iterEnd; alpha += invNumIterations) { integrateParticles(current, previous); accelerateParticles(current); applyWind(current, previous); constrainMotion(current, alpha); constrainTether(current); solveFabric(current); loadIterData(iterData); constrainSeparation(current, alpha); gCollideParticles.get()(current, previous, alpha); gSelfCollideParticles.get()(current); updateSleepState(current, previous); } __syncthreads(); } __device__ void simulateShared() { ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_SHARED); __shared__ uninitialized<SharedParticleData> current; __shared__ uninitialized<SharedParticleData> previous; int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize; int32_t particlesDataSize = 4 * gClothData.mNumParticles; Pointer<Shared, float> sharedCurPos = Pointer<Shared, float>(gSharedMemory + configDataSize + threadIdx.x % 4 * gClothData.mNumParticles); Pointer<Shared, float> sharedPrevPos = sharedCurPos + particlesDataSize; if (threadIdx.x < 4) { current.get().mPointers[threadIdx.x] = sharedCurPos; previous.get().mPointers[threadIdx.x] = sharedPrevPos; } float* globalCurPos = gClothData.mParticles; float* globalPrevPos = gClothData.mParticles + particlesDataSize; // copy particles from device memory to shared memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) { sharedCurPos[i / 4] = globalCurPos[i]; sharedPrevPos[i / 4] = globalPrevPos[i]; } simulateCloth(current.get(), previous.get()); // copy particles from shared memory to device memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) { globalCurPos[i] = sharedCurPos[i / 4]; globalPrevPos[i] = sharedPrevPos[i / 4]; } __syncthreads(); } __device__ void simulateStreamed() { ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_STREAMED); __shared__ uninitialized<SharedParticleData> current; __shared__ uninitialized<GlobalParticleData> previous; int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize; int32_t particlesDataSize = 4 * gClothData.mNumParticles; float* globalCurPos = gClothData.mParticles; Pointer<Shared, float> sharedCurPos = Pointer<Shared, float>(gSharedMemory + configDataSize + threadIdx.x % 4 * gClothData.mNumParticles); if (threadIdx.x < 4) current.get().mPointers[threadIdx.x] = sharedCurPos; if (!threadIdx.x) previous.get().mPtr = GlobalParticleData::PointerType(globalCurPos + particlesDataSize); // copy particles from device memory to shared memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) sharedCurPos[i / 4] = globalCurPos[i]; simulateCloth(current.get(), previous.get()); // copy particles from shared memory to device memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) globalCurPos[i] = sharedCurPos[i / 4]; __syncthreads(); } __device__ void simulateGlobal() { ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_GLOBAL); __shared__ uninitialized<GlobalParticleData> current; __shared__ uninitialized<GlobalParticleData> previous; if (!threadIdx.x) { GlobalParticleData::PointerType globalCurPos(gClothData.mParticles); current.get().mPtr = globalCurPos; previous.get().mPtr = globalCurPos + gClothData.mNumParticles; } simulateCloth(current.get(), previous.get()); } } // anonymous namespace extern "C" __global__ void #if __CUDA_ARCH__ >= 300 __launch_bounds__(1024, 1) #else __launch_bounds__(512, 1) #endif simulateCloths(cloth::CuKernelData kernelData) { gProfileBuffer = kernelData.mProfileBuffer; gProfileBaseId = kernelData.mProfileBaseId; ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE); // check that http://nvbugs/1038473 is fixed assert(gSharedMemory > (float*)&gFrameData); assert(gSharedMemory > (float*)&gClothData); // fetch cloth index from queue __shared__ uint32_t clothIdx; if (!threadIdx.x) clothIdx = atomicInc(kernelData.mClothIndex, gridDim.x - 1); __syncthreads(); assert(clothIdx < gridDim.x); // copy cloth data to shared memory const uint32_t* clothData = reinterpret_cast<const uint32_t*>(kernelData.mClothData + clothIdx); if (threadIdx.x < gCuClothDataSize) reinterpret_cast<uint32_t*>(&gClothData)[threadIdx.x] = clothData[threadIdx.x]; // copy frame data to shared memory uint32_t* frameData = reinterpret_cast<uint32_t*>(kernelData.mFrameData + clothIdx); if (threadIdx.x < gCuFrameDataSize) reinterpret_cast<uint32_t*>(&gFrameData)[threadIdx.x] = frameData[threadIdx.x]; __syncthreads(); if (gFrameData.mSleepPassCounter >= gClothData.mSleepAfterCount) return; // cloth is sleeping, exit // copy phase configs to shared memory int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize; for (int32_t i = threadIdx.x; i < configDataSize; i += blockDim.x) gSharedUnsigned[i] = reinterpret_cast<const uint32_t*>(gClothData.mPhaseConfigs)[i]; Pointer<Shared, uint32_t> scratchPtr = Pointer<Shared, uint32_t>( gSharedUnsigned + configDataSize + 4 * gFrameData.mNumSharedPositions * gClothData.mNumParticles); // initialize with placement new new (gCollideParticles.data) CuCollision(scratchPtr); new (gSelfCollideParticles.data) CuSelfCollision(); // copy particles and constraints to device if (gFrameData.mDeviceParticlesDirty) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 8; i += blockDim.x) gClothData.mParticles[i] = gClothData.mParticlesHostCopy[i]; } if (gFrameData.mHostMotionConstraints) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) gFrameData.mTargetMotionConstraints[i] = gFrameData.mHostMotionConstraints[i]; } if (gFrameData.mHostSeparationConstraints) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) gFrameData.mTargetSeparationConstraints[i] = gFrameData.mHostSeparationConstraints[i]; } if (gFrameData.mHostParticleAccelerations) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) gFrameData.mParticleAccelerations[i] = gFrameData.mHostParticleAccelerations[i]; } // necessary to ensure phase configs are fully loaded before setup in simulateCloth() __syncthreads(); switch(gFrameData.mNumSharedPositions) { case 0: simulateGlobal(); break; case 1: simulateStreamed(); break; case 2: simulateShared(); break; } // write back frame data if (threadIdx.x < gCuFrameDataSize) frameData[threadIdx.x] = reinterpret_cast<const uint32_t*>(&gFrameData)[threadIdx.x]; // copy particles to host for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 8; i += blockDim.x) gClothData.mParticlesHostCopy[i] = gClothData.mParticles[i]; } const char* cloth::getKernelFunctionName() { return "simulateCloths"; }
a5f14760585d0ef35dc847c57a27eb486d706b35.cu
// This code contains NVIDIA Confidential Information and is disclosed to you // under a form of NVIDIA software license agreement provided separately to you. // // Notice // NVIDIA Corporation and its licensors retain all intellectual property and // proprietary rights in and to this software and related documentation and // any modifications thereto. Any use, reproduction, disclosure, or // distribution of this software and related documentation without an express // license agreement from NVIDIA Corporation is strictly prohibited. // // ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES // NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO // THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, // MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. // // Information and code furnished is believed to be accurate and reliable. // However, NVIDIA Corporation assumes no responsibility for the consequences of use of such // information or for any infringement of patents or other rights of third parties that may // result from its use. No license is granted by implication or otherwise under any patent // or patent rights of NVIDIA Corporation. Details are subject to change without notice. // This code supersedes and replaces all information previously supplied. // NVIDIA Corporation products are not authorized for use as critical // components in life support devices or systems without express written approval of // NVIDIA Corporation. // // Copyright (c) 2008-2017 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2004 NovodeX AG. All rights reserved. #include "CuSolverKernel.h" #include "CuClothData.h" #include "CuPhaseConfig.h" #include <new> // placement new /* For detailed comments about the algorithm check SwSolverKernel.cpp (or the documentation) The CPU implementation is generally easier to read, and comments are not duplicated in other implementations. Only CUDA implementation specific comments are left in this implementation. */ #ifndef FLT_EPSILON #define FLT_EPSILON 1.192092896e-07F #endif #ifndef FLT_MAX #define FLT_MAX 3.402823466e+38F #endif // Converting pointers to shared/global addresses is faster than doing generic loads on SM50 #define CONVERT_ADDRESSES (__CUDA_ARCH__ >= 500) #if !defined(_WIN64) && !defined(__x86_64__) #define POINTER_CONSTRAINT "r" #define POINTER_TYPE "u32" #else #define POINTER_CONSTRAINT "l" #define POINTER_TYPE "u64" #endif #ifndef __CUDA_ARCH__ #define assert(x) #endif extern "C" { extern _CRTIMP __host__ __device__ int __cdecl printf(const char*, ...); } using namespace nv; // shared memory copy (instead of relying on constant cache) __shared__ cloth::CuClothData gClothData; __shared__ cloth::CuFrameData gFrameData; __shared__ cloth::CuIterationData gIterData; // Our way to create stream local variables __shared__ void* gProfileBuffer; __shared__ uint32_t gProfileBaseId; static const uint32_t gCuClothDataSize = sizeof(cloth::CuClothData) / sizeof(float); static const uint32_t gCuFrameDataSize = sizeof(cloth::CuFrameData) / sizeof(float); static const uint32_t gCuIterationDataSize = sizeof(cloth::CuIterationData) / sizeof(float); static const uint32_t gCuPhaseConfigSize = sizeof(cloth::CuPhaseConfig) / sizeof(float); /* Memory block for all temporary data in shared memory (in 'allocation' order). The numbers indicate the allocation slot if used a stack allocator. 0) simulate*()::configs (numPhases*sizeof(CuPhaseConfig)) 1) simulate*()::particles ({0,1,2}*4*numParticles floats) 2) CuCollision::mCapsuleIndices, mCapsuleMasks, mConvexMasks (numCapsules*4+numConvexes ints) 3) CuCollision::mPrevData (4*numSpheres+10*numCones floats) 4) CuCollision::collideConvexes() (4*numPlanes floats) 4) CuCollision::collideTriangles() (19*numTriangles floats) 4) CuCollision::mCurData::Spheres (4*numSpheres floats) 5) computeParticleBounds()::dst (192 floats written, 208 float read) 5) computeSphereBounds()::dst (192 floats written, 208 floats read) 5) CuCollision::mCurData::Cones (10*numCones floats) 6) CuCollision::mShapeGrid (2*6*sGridSize=96 floats) 4) CuSelfCollision::buildAcceleration()::buffer (34*16=544 ints) */ extern __shared__ float gSharedMemory[]; extern __shared__ int32_t gSharedSigned[]; extern __shared__ uint32_t gSharedUnsigned[]; /***************** Pointer Wrappers **********************/ enum AddressSpace { Shared, Global }; template <AddressSpace, typename T> __device__ T load(const T* ptr); template <AddressSpace, typename T> __device__ void store(T* ptr, const T& value); #if !CONVERT_ADDRESSES template <AddressSpace, typename T> __device__ T load(const T* ptr) { return *ptr; } template <AddressSpace, typename T> __device__ void store(T* ptr, const T& value) { *ptr = value; } #else template <> __device__ float load<Shared>(const float* ptr) { float value; asm("ld.shared.f32 %0, [%1];" : "=f"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ int32_t load<Shared>(const int32_t* ptr) { int32_t value; asm("ld.shared.s32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ uint32_t load<Shared>(const uint32_t* ptr) { uint32_t value; asm("ld.shared.u32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ void store<Shared>(int32_t* ptr, const int32_t& value) { asm("st.shared.s32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } template <> __device__ void store<Shared>(float* ptr, const float& value) { asm("st.shared.f32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "f"(value) : "memory"); } template <> __device__ void store<Shared>(uint32_t* ptr, const uint32_t& value) { asm("st.shared.u32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } template <> __device__ float load<Global>(const float* ptr) { float value; asm("ld.global.f32 %0, [%1];" : "=f"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ int32_t load<Global>(const int32_t* ptr) { int32_t value; asm("ld.global.s32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ uint32_t load<Global>(const uint32_t* ptr) { uint32_t value; asm("ld.global.u32 %0, [%1];" : "=r"(value) : POINTER_CONSTRAINT(ptr)); return value; } template <> __device__ void store<Global>(int32_t* ptr, const int32_t& value) { asm("st.global.s32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } template <> __device__ void store<Global>(float* ptr, const float& value) { asm("st.global.f32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "f"(value) : "memory"); } template <> __device__ void store<Global>(uint32_t* ptr, const uint32_t& value) { asm("st.global.u32 [%0], %1;" : : POINTER_CONSTRAINT(ptr), "r"(value) : "memory"); } #endif template <AddressSpace, typename> struct Pointer; template <AddressSpace S, typename T> struct Reference { template <AddressSpace, typename> friend struct Reference; friend struct Pointer<S, T>; __device__ Reference() { } __device__ Reference(const Reference& other) : mPtr(other.mPtr) { } template <typename U> __device__ Reference(const Reference<S, U>& other) : mPtr(other.mPtr) { } __device__ Reference& operator = (const Reference& other) { return *this = static_cast<T>(other); } template <typename U> __device__ Reference& operator = (const Reference<S, U>& other) { return *this = static_cast<U>(other); } __device__ Reference& operator += (const T& value) { return *this = *this + value; } __device__ Reference& operator |= (const T& value) { return *this = *this | value; } __device__ Reference& operator &= (const T& value) { return *this = *this & value; } __device__ Reference& operator *= (const T& value) { return *this = *this * value; } __device__ operator T() const { return load<S>(mPtr); } __device__ Reference& operator = (const T& value) { store<S>(mPtr, value); return *this; } //private: T* mPtr; __device__ explicit Reference(T& ref) : mPtr(&ref) { } template <typename U> friend __device__ void atomicAdd(Reference& ref, U value) { ::atomicAdd(ref.mPtr, value); } }; template <AddressSpace S, typename T> struct Convert { static __device__ T* from(T* ptr) { return ptr; } static __device__ T* to(T* ptr) { return ptr; } }; #if CONVERT_ADDRESSES template <typename T> struct Convert<Shared, T> { static __device__ T* from(T* ptr) { asm("cvta.shared." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } static __device__ T* to(T* ptr) { asm("cvta.to.shared." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } }; template <typename T> struct Convert<Global, T> { static __device__ T* from(T* ptr) { asm("cvta.global." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } static __device__ T* to(T* ptr) { asm("cvta.to.global." POINTER_TYPE " %0, %0;" : "+" POINTER_CONSTRAINT(ptr)); return ptr; } }; #endif template <AddressSpace S, typename T> __device__ T* generic(const Pointer<S, T>&); // pointer forced to point to shared memory (only works for sizeof(T) <= 4) template <AddressSpace S, typename T> struct Pointer { template <AddressSpace, typename> friend struct Pointer; friend __device__ T* generic<S, T>(const Pointer<S, T>&); friend struct GlobalParticleData; __device__ Pointer() { } __device__ Pointer(const Pointer& other) : mPtr(other.mPtr) { } template <typename U> __device__ Pointer(const Pointer<S, U>& other) : mPtr(other.mPtr) { } // construct from generic pointer __device__ explicit Pointer(T* ptr) : mPtr(Convert<S, T>::to(ptr)) { } __device__ bool operator!=(const Pointer& other) const { return mPtr != other.mPtr; } __device__ bool operator<(const Pointer& other) const { return mPtr < other.mPtr; } __device__ Pointer operator + (ptrdiff_t i) const { return Pointer(*this) += i; } __device__ Pointer& operator += (ptrdiff_t i) { mPtr += i * stride(); return *this; } __device__ Pointer operator - (ptrdiff_t i) const { return Pointer(*this) -= i; } __device__ Pointer& operator -= (ptrdiff_t i) { mPtr -= i * stride(); return *this; } __device__ Pointer& operator ++ () { mPtr += stride(); return *this; } __device__ Pointer& operator -- () { mPtr -= stride(); return *this; } __device__ Reference<S, T> operator*() const { return Reference<S, T>(*mPtr); } __device__ Reference<S, T> operator[](int32_t i) const { return Reference<S, T>(mPtr[i * stride()]); } private: // convert back to generic pointer, private for safety, use generic() instead __device__ operator T*() const { return Convert<S, T>::from(mPtr); } __device__ static size_t stride() { return 1; } template <typename U> __device__ Pointer(const Pointer<S, U>& other, ptrdiff_t stridedOffset) : mPtr(other.mPtr + stridedOffset) { } T* mPtr; }; // pointers to global memory are all referring to particle data // stored as array of structs, so they have a stride of 4. template<> __device__ size_t Pointer<Global, float>::stride() { return 4; } template<> __device__ size_t Pointer<Global, const float>::stride() { return 4; } template <AddressSpace S, typename T> __device__ T* generic(const Pointer<S, T>& ptr) { return ptr; } #if !CONVERT_ADDRESSES template <typename T> __device__ T* generic(T* ptr) { return ptr; } #endif /***************** Particle Data **********************/ template <typename T> struct SharedParticleReference { __device__ operator float3() const { float3 result; result.x = mReferences[0]; result.y = mReferences[1]; result.z = mReferences[2]; return result; } __device__ SharedParticleReference& operator = (const float3& vec) { mReferences[0] = vec.x; mReferences[1] = vec.y; mReferences[2] = vec.z; return *this; } __device__ operator float4() const { float4 result; result.x = mReferences[0]; result.y = mReferences[1]; result.z = mReferences[2]; result.w = mReferences[3]; return result; } __device__ SharedParticleReference& operator = (const float4& vec) { mReferences[0] = vec.x; mReferences[1] = vec.y; mReferences[2] = vec.z; mReferences[3] = vec.w; return *this; } Reference<Shared, T> mReferences[4]; }; struct SharedParticleData { typedef float3 VectorType; typedef Pointer<Shared, float> PointerType; typedef Pointer<Shared, const float> ConstPointerType; typedef Reference<Shared, float> ReferenceType; typedef Reference<Shared, const float> ConstReferenceType; typedef SharedParticleReference<float> ParticleReferenceType; typedef SharedParticleReference<const float> ParticleConstReferenceType; __device__ ReferenceType operator()(int32_t index, int32_t element) { return mPointers[element][index]; } __device__ ConstReferenceType operator()(int32_t index, int32_t element) const { return mPointers[element][index]; } __device__ ParticleReferenceType operator()(int32_t index) { ParticleReferenceType result = { mPointers[0][index], mPointers[1][index], mPointers[2][index], mPointers[3][index] }; return result; } __device__ ParticleConstReferenceType operator()(int32_t index) const { ParticleConstReferenceType result = { mPointers[0][index], mPointers[1][index], mPointers[2][index], mPointers[3][index] }; return result; } __device__ const PointerType& operator[](int32_t element) { return mPointers[element]; } __device__ ConstPointerType operator[](int32_t element) const { return mPointers[element]; } PointerType mPointers[4]; }; template <typename T> struct GlobalParticleReference { __device__ GlobalParticleReference(Pointer<Global, T> ref) : mPtr(reinterpret_cast<T* const&>(ref)) { } #if CONVERT_ADDRESSES __device__ operator float4() const { float4 vec; asm("ld.global.v4.f32 {%0, %1, %2, %3}, [%4];" : "=f"(vec.x), "=f"(vec.y), "=f"(vec.z), "=f"(vec.w) : POINTER_CONSTRAINT(mPtr)); return vec; } __device__ GlobalParticleReference& operator = (const float4& vec) { asm("st.global.v4.f32 [%0], {%1, %2, %3, %4};" ::POINTER_CONSTRAINT(mPtr), "f"(vec.x), "f"(vec.y), "f"(vec.z), "f"(vec.w) : "memory"); return *this; } __device__ operator float3() const { float4 vec = *this; return make_float3(vec.x, vec.y, vec.z); } #else __device__ operator float4() const { return *reinterpret_cast<const float4*>(mPtr); } __device__ GlobalParticleReference& operator = (const float4& vec) { *reinterpret_cast<float4*>(mPtr) = vec; return *this; } __device__ operator float3() const { return *reinterpret_cast<const float3*>(mPtr); } __device__ GlobalParticleReference& operator = (const float3& vec) { *reinterpret_cast<float3*>(mPtr) = vec; return *this; } #endif T* mPtr; // pointer to global address }; struct GlobalParticleData { #if CONVERT_ADDRESSES // ld.global.v4 saturates memory bandwidth better than 3x ld.global typedef float4 VectorType; #else // the same isn't true for ld without state space typedef float3 VectorType; #endif typedef Pointer<Global, float> PointerType; typedef Pointer<Global, const float> ConstPointerType; typedef Reference<Global, float> ReferenceType; typedef Reference<Global, const float> ConstReferenceType; typedef GlobalParticleReference<float> ParticleReferenceType; typedef GlobalParticleReference<const float> ParticleConstReferenceType; __device__ ReferenceType operator()(int32_t index, int32_t element) { return *PointerType(mPtr, index * 4 + element); } __device__ ConstReferenceType operator()(int32_t index, int32_t element) const { return *ConstPointerType(mPtr, index * 4 + element); } __device__ ParticleReferenceType operator()(int32_t index) { return PointerType(mPtr, index * 4); } __device__ ParticleConstReferenceType operator()(int32_t index) const { return ConstPointerType(mPtr, index * 4); } __device__ PointerType operator[](int32_t element) { return PointerType(mPtr, element); } __device__ ConstPointerType operator[](int32_t element) const { return ConstPointerType(mPtr, element); } PointerType mPtr; }; /***************** Profiling **********************/ struct ProfileDisabledZone { __device__ ProfileDisabledZone(cloth::CuProfileZoneIds::Enum) { } }; #if defined(__CUDA_ARCH__) && defined(PX_PROFILE) // profile zones enabled for profile build // code below is copied from GPUProfile.h and needs to be kept in sync. #define NUM_WARPS_PER_PROFILE_BUFFER (4 * 1024 * 1024) struct __align__(16) WarpProfileEvent { __device__ WarpProfileEvent(uint16_t id) : block(blockIdx.x + gridDim.x * blockIdx.y), warp(threadIdx.x >> 5), userData(0), eventId(id) { uint32_t smid32, warpid32; asm volatile("mov.u32 %0, %smid;" : "=r"(smid32)); asm volatile("mov.u32 %0, %warpid;" : "=r"(warpid32)); asm volatile("mov.u32 %0, %clock;" : "=r"(startTime)); smid = smid32; warpid = warpid32; endTime = startTime; } uint16_t block; uint8_t warp; uint8_t smid; uint8_t warpid; uint8_t userData; uint16_t eventId; uint32_t startTime; uint32_t endTime; }; struct ProfileZone { __device__ ProfileZone(cloth::CuProfileZoneIds::Enum id) : mEvent(0) { if (!gProfileBuffer || threadIdx.x & 0x1f) return; // +1: first entry reserved for counter uint32_t index = atomicAdd(reinterpret_cast<uint32_t*>(gProfileBuffer), 1) + 1; if (index >= NUM_WARPS_PER_PROFILE_BUFFER) return; mEvent = reinterpret_cast<WarpProfileEvent*>(gProfileBuffer) + index; new (mEvent) WarpProfileEvent(gProfileBaseId + id); } __device__ ~ProfileZone() { if (mEvent) mEvent->endTime = clock(); } WarpProfileEvent* mEvent; }; #else typedef ProfileDisabledZone ProfileZone; #endif #if 1 // set to 1 to enable detailed profile zones typedef ProfileZone ProfileDetailZone; #else typedef ProfileDisabledZone ProfileDetailZone; #endif namespace { // cut down version of thrust::uninitialized // avoids warning about non-empty c'tor template <typename T> struct uninitialized { __device__ inline T& get() { return *reinterpret_cast<T*>(data); } // maximum alignment required by device code is 16 __align__(16) unsigned char data[sizeof(T)]; }; } #if __CUDA_ARCH__ < 320 namespace { template <typename T> __device__ T __ldg(const T* __restrict ptr) { return *ptr; } } #endif #define CU_SOLVER_KERNEL_CU #include "CuCollision.h" #include "CuSelfCollision.h" namespace { __device__ void loadIterData(const cloth::CuIterationData* __restrict iterData) { if (threadIdx.x < gCuIterationDataSize) { gIterData.mIntegrationTrafo[threadIdx.x] = __ldg(iterData->mIntegrationTrafo + threadIdx.x); } } // integrate particle positions and store transposed template <bool IsTurning, typename CurrentT, typename PreviousT> __device__ void integrateParticles(CurrentT& current, PreviousT& previous) { ProfileDetailZone zone(cloth::CuProfileZoneIds::INTEGRATE); const float* __restrict trafo = gIterData.mIntegrationTrafo; for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 prev = previous(i); float4 next = current(i); float4 cur = { next.x, next.y, next.z, prev.w }; if (next.w == 0.0f) next.w = prev.w; if (next.w > 0.0f) { if (IsTurning) { next.x = next.x + trafo[3] + cur.x * trafo[15] + prev.x * trafo[6] + cur.y * trafo[16] + prev.y * trafo[7] + cur.z * trafo[17] + prev.z * trafo[8]; next.y = next.y + trafo[4] + cur.x * trafo[18] + prev.x * trafo[9] + cur.y * trafo[19] + prev.y * trafo[10] + cur.z * trafo[20] + prev.z * trafo[11]; next.z = next.z + trafo[5] + cur.x * trafo[21] + prev.x * trafo[12] + cur.y * trafo[22] + prev.y * trafo[13] + cur.z * trafo[23] + prev.z * trafo[14]; } else { next.x += (cur.x - prev.x) * trafo[6] + trafo[3]; next.y += (cur.y - prev.y) * trafo[9] + trafo[4]; next.z += (cur.z - prev.z) * trafo[12] + trafo[5]; } cur.x += trafo[0]; cur.y += trafo[1]; cur.z += trafo[2]; } current(i) = next; previous(i) = cur; } } template <typename CurrentT, typename PreviousT> __device__ void integrateParticles(CurrentT& current, PreviousT& previous) { if (gIterData.mIsTurning) integrateParticles<true>(current, previous); else integrateParticles<false>(current, previous); } template <typename CurrentT> __device__ void accelerateParticles(CurrentT& current) { // might be better to move this into integrate particles const float* __restrict accelerations = gFrameData.mParticleAccelerations; if (!accelerations) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::ACCELERATE); __syncthreads(); // looping with 4 instead of 1 thread per particle float sqrIterDt = ~threadIdx.x & 0x3 ? gFrameData.mIterDt * gFrameData.mIterDt : 0.0f; typename CurrentT::PointerType sharedCurPos = current[threadIdx.x % 4]; for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) { // turning this into __ldg slows kernel down even without particle accelerations (!) if (current(i / 4, 3) > 0.0f) sharedCurPos[i / 4] += accelerations[i] * sqrIterDt; } __syncthreads(); } __device__ float3 operator + (const float3& u, const float3& v) { return make_float3(u.x + v.x, u.y + v.y, u.z + v.z); } __device__ float3 operator - (const float3& u, const float3& v) { return make_float3(u.x - v.x, u.y - v.y, u.z - v.z); } __device__ float3 operator*(float s, const float3& v) { return make_float3(v.x * s, v.y * s, v.z * s); } __device__ float dot3(const float3& u, const float3& v) { return u.x * v.x + u.y * v.y + u.z * v.z; } __device__ float3 cross3(const float3& u, const float3& v) { return make_float3(u.y * v.z - u.z * v.y, u.z * v.x - u.x * v.z, u.x * v.y - u.y * v.x); } __device__ void applyImpulse(SharedParticleData::ParticleReferenceType pos, const float3& impulse) { float scale = -pos.mReferences[3]; #if CONVERT_ADDRESSES // Use this instead of atomicAdd function to work around compiler issue treating the pointer as global memory instead of shared memory asm("red.shared.add.f32 [%0], %1;" ::POINTER_CONSTRAINT(pos.mReferences[0].mPtr), "f"(impulse.x * scale)); asm("red.shared.add.f32 [%0], %1;" ::POINTER_CONSTRAINT(pos.mReferences[1].mPtr), "f"(impulse.y * scale)); asm("red.shared.add.f32 [%0], %1;" ::POINTER_CONSTRAINT(pos.mReferences[2].mPtr), "f"(impulse.z * scale)); #else atomicAdd(pos.mReferences[0].mPtr, impulse.x * scale); atomicAdd(pos.mReferences[1].mPtr, impulse.y * scale); atomicAdd(pos.mReferences[2].mPtr, impulse.z * scale); #endif } __device__ void applyImpulse(GlobalParticleData::ParticleReferenceType pos, const float3& impulse) { float scale = -pos.mPtr[3]; atomicAdd(pos.mPtr + 0, impulse.x * scale); atomicAdd(pos.mPtr + 1, impulse.y * scale); atomicAdd(pos.mPtr + 2, impulse.z * scale); } template <bool IsTurning, typename CurrentT, typename PreviousT> __device__ void applyWind(CurrentT& current, PreviousT& previous) { const float dragCoefficient = gFrameData.mDragCoefficient; const float liftCoefficient = gFrameData.mLiftCoefficient; const float fluidDensity = gFrameData.mFluidDensity; const float itrDt = gFrameData.mIterDt; if (dragCoefficient == 0.0f && liftCoefficient == 0.0f) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::WIND); const float oneThird = 1 / 3.0f; float3 wind = make_float3(gIterData.mWind[0], gIterData.mWind[1], gIterData.mWind[2]); const uint16_t* tIt = gClothData.mTriangles; for (int32_t i = threadIdx.x; i < gClothData.mNumTriangles; i += blockDim.x) { uint16_t i0 = tIt[i * 3 + 0]; uint16_t i1 = tIt[i * 3 + 1]; uint16_t i2 = tIt[i * 3 + 2]; float3 c0 = current(i0); float3 c1 = current(i1); float3 c2 = current(i2); // float w1 = current(i0, 3); // float w2 = current(i1, 3); // float w2 = current(i2, 3); // // float wMult = w1 * w2 * w3; // float invMass = wMult < FLT_EPSILON ? 0.f : w1 * w2 * w3 / (w1 * w2 + w1 * w3 + w2 * w3); float3 p0 = previous(i0); float3 p1 = previous(i1); float3 p2 = previous(i2); float3 cur = oneThird * (c0 + c1 + c2); float3 prev = oneThird * (p0 + p1 + p2); float3 delta = cur - prev + wind; if (IsTurning) { const float3* rot = reinterpret_cast<const float3*>(gFrameData.mRotation); float3 d = wind - prev; delta = cur + d.x * rot[0] + d.y * rot[1] + d.z * rot[2]; } float3 normal = cross3(c2 - c0, c1 - c0); const float doubleArea = sqrtf(dot3(normal, normal)); normal = (1.0f / doubleArea) * normal; float invSqrScale = dot3(delta, delta); float scale = rsqrtf(invSqrScale); float deltaLength = sqrtf(invSqrScale); float cosTheta = dot3(normal, delta) * scale; float sinTheta = sqrtf(max(0.0f, 1.0f - cosTheta * cosTheta)); float3 liftDir = cross3(cross3(delta, normal), scale * delta); float3 lift = liftCoefficient * cosTheta * sinTheta * ((deltaLength / itrDt) * liftDir); float3 drag = dragCoefficient * abs(cosTheta) * ((deltaLength / itrDt) * delta); float3 impulse = invSqrScale < FLT_EPSILON ? make_float3(0.0f, 0.0f, 0.0f) : fluidDensity * doubleArea * (lift + drag); applyImpulse(current(i0), impulse); applyImpulse(current(i1), impulse); applyImpulse(current(i2), impulse); } __syncthreads(); } template <typename CurrentT, typename PreviousT> __device__ void applyWind(CurrentT& current, PreviousT& previous) { if (gIterData.mIsTurning) applyWind<true>(current, previous); else applyWind<false>(current, previous); } template <typename CurrentT> __device__ void constrainTether(CurrentT& current) { if (0.0f == gFrameData.mTetherConstraintStiffness || !gClothData.mNumTethers) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::TETHER); int32_t numParticles = gClothData.mNumParticles; int32_t numTethers = gClothData.mNumTethers; assert(0 == numTethers % numParticles); float stiffness = numParticles * __fdividef(gFrameData.mTetherConstraintStiffness, numTethers); float scale = gClothData.mTetherConstraintScale; const uint32_t* __restrict tIt = reinterpret_cast<const uint32_t*>(gClothData.mTethers); for (int32_t i = threadIdx.x; i < numParticles; i += blockDim.x) { float posX = current(i, 0); float posY = current(i, 1); float posZ = current(i, 2); float offsetX = 0.0f; float offsetY = 0.0f; float offsetZ = 0.0f; for (int32_t j = i; j < numTethers; j += gClothData.mNumParticles) { uint32_t tether = __ldg(tIt + j); int32_t anchor = tether & 0xffff; float deltaX = current(anchor, 0) - posX; float deltaY = current(anchor, 1) - posY; float deltaZ = current(anchor, 2) - posZ; float sqrLength = FLT_EPSILON + deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ; float radius = (tether >> 16) * scale; float slack = 1.0f - radius * rsqrtf(sqrLength); if (slack > 0.0f) { offsetX += deltaX * slack; offsetY += deltaY * slack; offsetZ += deltaZ * slack; } } current(i, 0) = posX + offsetX * stiffness; current(i, 1) = posY + offsetY * stiffness; current(i, 2) = posZ + offsetZ * stiffness; } } template <typename CurrentT> __device__ void solveFabric(CurrentT& current) { ProfileDetailZone zone(cloth::CuProfileZoneIds::FABRIC); const cloth::CuPhaseConfig* __restrict cIt = (cloth::CuPhaseConfig*)gSharedMemory; const cloth::CuPhaseConfig* cEnd = cIt + gClothData.mNumPhases; for (; cIt != cEnd; ++cIt) { __syncthreads(); ProfileDetailZone zone(cloth::CuProfileZoneIds::CONSTRAINT_SET); int32_t numConstraints = cIt->mNumConstraints; if (threadIdx.x >= numConstraints) continue; const uint32_t* __restrict iIt = reinterpret_cast<const uint32_t*>(cIt->mIndices) + threadIdx.x; const float* restvalues = cIt->mRestvalues; const float* rIt = restvalues + threadIdx.x; const float* rEnd = restvalues + numConstraints; const float* stIt = cIt->mStiffnessValues + threadIdx.x; bool useStiffnessPerConstraint = cIt->mStiffnessValues!=nullptr; uint32_t vpijPrefetch = __ldg(iIt); float rijPrefetch = __ldg(rIt); float stijPrefetch; if (useStiffnessPerConstraint) stijPrefetch = __ldg(stIt); float stiffness = cIt->mStiffness; float stiffnessMultiplier = cIt->mStiffnessMultiplier; float compressionLimit = cIt->mCompressionLimit; float stretchLimit = cIt->mStretchLimit; do { rIt += blockDim.x; iIt += blockDim.x; stIt += blockDim.x; int32_t vpi = USHRT_MAX & vpijPrefetch; int32_t vpj = USHRT_MAX & vpijPrefetch >> 16; float rij = rijPrefetch; float stij = useStiffnessPerConstraint?1.0f - exp2f(stijPrefetch * gFrameData.mStiffnessExponent):stiffness; if (rIt < rEnd) { vpijPrefetch = __ldg(iIt); rijPrefetch = __ldg(rIt); if (useStiffnessPerConstraint) stijPrefetch = __ldg(stIt); } float vxi = current(vpi, 0); float vyi = current(vpi, 1); float vzi = current(vpi, 2); float vwi = current(vpi, 3); float vxj = current(vpj, 0); float vyj = current(vpj, 1); float vzj = current(vpj, 2); float vwj = current(vpj, 3); float hxij = vxj - vxi; float hyij = vyj - vyi; float hzij = vzj - vzi; float e2ij = FLT_EPSILON + hxij * hxij + hyij * hyij + hzij * hzij; float negErij = rij > FLT_EPSILON ? -1.0f + rij * rsqrtf(e2ij) : 0.0f; negErij = negErij + stiffnessMultiplier * max(compressionLimit, min(-negErij, stretchLimit)); float negExij = __fdividef(negErij * stij, FLT_EPSILON + vwi + vwj); float vmi = -vwi * negExij; current(vpi, 0) = vxi + vmi * hxij; current(vpi, 1) = vyi + vmi * hyij; current(vpi, 2) = vzi + vmi * hzij; float vmj = +vwj * negExij; current(vpj, 0) = vxj + vmj * hxij; current(vpj, 1) = vyj + vmj * hyij; current(vpj, 2) = vzj + vmj * hzij; } while (rIt < rEnd); } __syncthreads(); } template <typename CurrentT> __device__ void constrainMotion(CurrentT& current, float alpha) { if (!gFrameData.mStartMotionConstraints) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::MOTION); // negative because of fused multiply-add optimization float negativeScale = -gClothData.mMotionConstraintScale; float negativeBias = -gClothData.mMotionConstraintBias; const float4* startIt = reinterpret_cast<const float4*>(gFrameData.mStartMotionConstraints); const float4* targetIt = reinterpret_cast<const float4*>(gFrameData.mTargetMotionConstraints); for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 startPos = __ldg(startIt + i); float4 targetPos = __ldg(targetIt + i); float sphereX = startPos.x + (targetPos.x - startPos.x) * alpha; float sphereY = startPos.y + (targetPos.y - startPos.y) * alpha; float sphereZ = startPos.z + (targetPos.z - startPos.z) * alpha; float sphereW = startPos.w + (targetPos.w - startPos.w) * alpha; float dx = sphereX - current(i, 0); float dy = sphereY - current(i, 1); float dz = sphereZ - current(i, 2); float sqrLength = FLT_EPSILON + dx * dx + dy * dy + dz * dz; float negativeRadius = min(0.0f, sphereW * negativeScale + negativeBias); float slack = max(negativeRadius * rsqrtf(sqrLength) + 1.0f, 0.0f) * gFrameData.mMotionConstraintStiffness; current(i, 0) += slack * dx; current(i, 1) += slack * dy; current(i, 2) += slack * dz; // set invMass to zero if radius is zero if (negativeRadius >= 0.0f) current(i, 3) = 0.0f; } } template <typename T> __device__ void constrainSeparation(T& current, float alpha) { if (!gFrameData.mStartSeparationConstraints) return; ProfileDetailZone zone(cloth::CuProfileZoneIds::SEPARATION); const float4* startIt = reinterpret_cast<const float4*>(gFrameData.mStartSeparationConstraints); const float4* targetIt = reinterpret_cast<const float4*>(gFrameData.mTargetSeparationConstraints); for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 startPos = __ldg(startIt + i); float4 targetPos = __ldg(targetIt + i); float sphereX = startPos.x + (targetPos.x - startPos.x) * alpha; float sphereY = startPos.y + (targetPos.y - startPos.y) * alpha; float sphereZ = startPos.z + (targetPos.z - startPos.z) * alpha; float sphereW = startPos.w + (targetPos.w - startPos.w) * alpha; float dx = sphereX - current(i, 0); float dy = sphereY - current(i, 1); float dz = sphereZ - current(i, 2); float sqrLength = FLT_EPSILON + dx * dx + dy * dy + dz * dz; float slack = min(0.0f, 1.0f - sphereW * rsqrtf(sqrLength)); current(i, 0) += slack * dx; current(i, 1) += slack * dy; current(i, 2) += slack * dz; } } template <typename CurrentT, typename PreviousT> __device__ void updateSleepState(const CurrentT& current, const PreviousT& previous) { ProfileDetailZone zone(cloth::CuProfileZoneIds::SLEEP); if (!threadIdx.x) gFrameData.mSleepTestCounter += max(1, uint32_t(gFrameData.mIterDt * 1000)); __syncthreads(); if (gFrameData.mSleepTestCounter < gClothData.mSleepTestInterval) return; float maxDelta = 0.0f; for (int32_t i = threadIdx.x; i < gClothData.mNumParticles; i += blockDim.x) { float4 prev = previous(i); maxDelta = max(fabsf(current(i, 0) - prev.x), maxDelta); maxDelta = max(fabsf(current(i, 1) - prev.y), maxDelta); maxDelta = max(fabsf(current(i, 2) - prev.z), maxDelta); } if (!threadIdx.x) { ++gFrameData.mSleepPassCounter; gFrameData.mSleepTestCounter -= gClothData.mSleepTestInterval; } __syncthreads(); if (maxDelta > gClothData.mSleepThreshold * gFrameData.mIterDt) gFrameData.mSleepPassCounter = 0; } template <typename CurrentT, typename PreviousT> __device__ void simulateCloth(CurrentT& current, PreviousT& previous) { // apply exponent to phase configs assert(blockDim.x >= gClothData.mNumPhases); if (threadIdx.x < gClothData.mNumPhases) { float exponent = gFrameData.mStiffnessExponent; float* ptr = gSharedMemory + threadIdx.x * gCuPhaseConfigSize; ptr[0] = 1.0f - exp2f(ptr[0] * exponent); ptr[1] = 1.0f - exp2f(ptr[1] * exponent); } uint32_t numIterations = gFrameData.mNumIterations; float invNumIterations = __fdividef(1.0f, numIterations); const cloth::CuIterationData* iterData = gFrameData.mIterationData; const cloth::CuIterationData* iterEnd = iterData + numIterations; loadIterData(iterData); __syncthreads(); for (float alpha = invNumIterations; iterData++ != iterEnd; alpha += invNumIterations) { integrateParticles(current, previous); accelerateParticles(current); applyWind(current, previous); constrainMotion(current, alpha); constrainTether(current); solveFabric(current); loadIterData(iterData); constrainSeparation(current, alpha); gCollideParticles.get()(current, previous, alpha); gSelfCollideParticles.get()(current); updateSleepState(current, previous); } __syncthreads(); } __device__ void simulateShared() { ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_SHARED); __shared__ uninitialized<SharedParticleData> current; __shared__ uninitialized<SharedParticleData> previous; int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize; int32_t particlesDataSize = 4 * gClothData.mNumParticles; Pointer<Shared, float> sharedCurPos = Pointer<Shared, float>(gSharedMemory + configDataSize + threadIdx.x % 4 * gClothData.mNumParticles); Pointer<Shared, float> sharedPrevPos = sharedCurPos + particlesDataSize; if (threadIdx.x < 4) { current.get().mPointers[threadIdx.x] = sharedCurPos; previous.get().mPointers[threadIdx.x] = sharedPrevPos; } float* globalCurPos = gClothData.mParticles; float* globalPrevPos = gClothData.mParticles + particlesDataSize; // copy particles from device memory to shared memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) { sharedCurPos[i / 4] = globalCurPos[i]; sharedPrevPos[i / 4] = globalPrevPos[i]; } simulateCloth(current.get(), previous.get()); // copy particles from shared memory to device memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) { globalCurPos[i] = sharedCurPos[i / 4]; globalPrevPos[i] = sharedPrevPos[i / 4]; } __syncthreads(); } __device__ void simulateStreamed() { ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_STREAMED); __shared__ uninitialized<SharedParticleData> current; __shared__ uninitialized<GlobalParticleData> previous; int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize; int32_t particlesDataSize = 4 * gClothData.mNumParticles; float* globalCurPos = gClothData.mParticles; Pointer<Shared, float> sharedCurPos = Pointer<Shared, float>(gSharedMemory + configDataSize + threadIdx.x % 4 * gClothData.mNumParticles); if (threadIdx.x < 4) current.get().mPointers[threadIdx.x] = sharedCurPos; if (!threadIdx.x) previous.get().mPtr = GlobalParticleData::PointerType(globalCurPos + particlesDataSize); // copy particles from device memory to shared memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) sharedCurPos[i / 4] = globalCurPos[i]; simulateCloth(current.get(), previous.get()); // copy particles from shared memory to device memory and transpose for (int32_t i = threadIdx.x; i < particlesDataSize; i += blockDim.x) globalCurPos[i] = sharedCurPos[i / 4]; __syncthreads(); } __device__ void simulateGlobal() { ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE_GLOBAL); __shared__ uninitialized<GlobalParticleData> current; __shared__ uninitialized<GlobalParticleData> previous; if (!threadIdx.x) { GlobalParticleData::PointerType globalCurPos(gClothData.mParticles); current.get().mPtr = globalCurPos; previous.get().mPtr = globalCurPos + gClothData.mNumParticles; } simulateCloth(current.get(), previous.get()); } } // anonymous namespace extern "C" __global__ void #if __CUDA_ARCH__ >= 300 __launch_bounds__(1024, 1) #else __launch_bounds__(512, 1) #endif simulateCloths(cloth::CuKernelData kernelData) { gProfileBuffer = kernelData.mProfileBuffer; gProfileBaseId = kernelData.mProfileBaseId; ProfileZone zone(cloth::CuProfileZoneIds::SIMULATE); // check that http://nvbugs/1038473 is fixed assert(gSharedMemory > (float*)&gFrameData); assert(gSharedMemory > (float*)&gClothData); // fetch cloth index from queue __shared__ uint32_t clothIdx; if (!threadIdx.x) clothIdx = atomicInc(kernelData.mClothIndex, gridDim.x - 1); __syncthreads(); assert(clothIdx < gridDim.x); // copy cloth data to shared memory const uint32_t* clothData = reinterpret_cast<const uint32_t*>(kernelData.mClothData + clothIdx); if (threadIdx.x < gCuClothDataSize) reinterpret_cast<uint32_t*>(&gClothData)[threadIdx.x] = clothData[threadIdx.x]; // copy frame data to shared memory uint32_t* frameData = reinterpret_cast<uint32_t*>(kernelData.mFrameData + clothIdx); if (threadIdx.x < gCuFrameDataSize) reinterpret_cast<uint32_t*>(&gFrameData)[threadIdx.x] = frameData[threadIdx.x]; __syncthreads(); if (gFrameData.mSleepPassCounter >= gClothData.mSleepAfterCount) return; // cloth is sleeping, exit // copy phase configs to shared memory int32_t configDataSize = gClothData.mNumPhases * gCuPhaseConfigSize; for (int32_t i = threadIdx.x; i < configDataSize; i += blockDim.x) gSharedUnsigned[i] = reinterpret_cast<const uint32_t*>(gClothData.mPhaseConfigs)[i]; Pointer<Shared, uint32_t> scratchPtr = Pointer<Shared, uint32_t>( gSharedUnsigned + configDataSize + 4 * gFrameData.mNumSharedPositions * gClothData.mNumParticles); // initialize with placement new new (gCollideParticles.data) CuCollision(scratchPtr); new (gSelfCollideParticles.data) CuSelfCollision(); // copy particles and constraints to device if (gFrameData.mDeviceParticlesDirty) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 8; i += blockDim.x) gClothData.mParticles[i] = gClothData.mParticlesHostCopy[i]; } if (gFrameData.mHostMotionConstraints) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) gFrameData.mTargetMotionConstraints[i] = gFrameData.mHostMotionConstraints[i]; } if (gFrameData.mHostSeparationConstraints) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) gFrameData.mTargetSeparationConstraints[i] = gFrameData.mHostSeparationConstraints[i]; } if (gFrameData.mHostParticleAccelerations) { for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 4; i += blockDim.x) gFrameData.mParticleAccelerations[i] = gFrameData.mHostParticleAccelerations[i]; } // necessary to ensure phase configs are fully loaded before setup in simulateCloth() __syncthreads(); switch(gFrameData.mNumSharedPositions) { case 0: simulateGlobal(); break; case 1: simulateStreamed(); break; case 2: simulateShared(); break; } // write back frame data if (threadIdx.x < gCuFrameDataSize) frameData[threadIdx.x] = reinterpret_cast<const uint32_t*>(&gFrameData)[threadIdx.x]; // copy particles to host for (int32_t i = threadIdx.x; i < gClothData.mNumParticles * 8; i += blockDim.x) gClothData.mParticlesHostCopy[i] = gClothData.mParticles[i]; } const char* cloth::getKernelFunctionName() { return "simulateCloths"; }
c5f1feb4bd469e5082be0378015270d6f76f94d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/fvm_types.hpp> #include <arbor/gpu/gpu_api.hpp> #include <arbor/gpu/gpu_common.hpp> #include "matrix_common.hpp" #include "diffusion.hpp" namespace arb { namespace gpu { namespace kernels { /// GPU implementation of Hines matrix assembly. /// Fine layout. /// For a given time step size dt: /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve. template <typename T, typename I> __global__ void assemble_diffusion( T* __restrict__ const d, T* __restrict__ const rhs, const T* __restrict__ const invariant_d, const T* __restrict__ const concentration, const T* __restrict__ const voltage, const T* __restrict__ const current, const T q, const T* __restrict__ const conductivity, const T* __restrict__ const area, const T dt, const I* __restrict__ const perm, unsigned n) { const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < n) { const auto pid = perm[tid]; auto u = voltage[tid]; // mV auto g = conductivity[tid]; // S auto J = current[tid]; // A/m^2 auto A = 1e-3*area[tid]; // 1e-9m auto X = concentration[tid]; // mM // conversion from current density to concentration change // using Faraday's constant auto F = A/(q*96.485332); d[pid] = 1e-3/dt + F*g + invariant_d[tid]; rhs[pid] = 1e-3/dt*X + F*(u*g - J); } } /// GPU implementation of Hines Matrix solver. /// Fine-grained tree based solver. /// Each block solves a set of matricesb iterating over the levels of matrix /// and perfoming a backward and forward substitution. On each level one thread /// gets assigned to one branch on this level of a matrix and solves and /// performs the substitution. Afterwards all threads continue on the next /// level. /// To avoid idle threads, one should try that on each level, there is a similar /// number of branches. template <typename T> __global__ void solve_diffusion( T* __restrict__ const rhs, T* __restrict__ const d, const T* __restrict__ const u, const level_metadata* __restrict__ const level_meta, const arb_index_type* __restrict__ const level_lengths, const arb_index_type* __restrict__ const level_parents, const arb_index_type* __restrict__ const block_index, const arb_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells { const auto tid = threadIdx.x; const auto bid = blockIdx.x; const auto first_level = block_index[bid]; const auto num_levels = block_index[bid + 1] - first_level; const auto block_level_meta = &level_meta[first_level]; // backward substitution for (unsigned l=0; l<num_levels-1; ++l) { // Metadata for this level and the next level const auto& lvl_meta = block_level_meta[l]; const auto& next_lvl_meta = block_level_meta[l+1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; // Perform backward substitution for each branch on this level. // One thread per branch. if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + tid; // each branch perform substitution for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const auto d_next = d[next_pos]; const auto rhs_next = rhs[next_pos]; const T factor = -u[pos]/d[pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } // Update d and rhs at the parent node of this branch. // A parent may have more than one contributing to it, so we use // atomic updates to avoid races conditions. const unsigned parent_index = next_lvl_meta.matrix_data_index; const unsigned p = parent_index + lvl_parents[tid]; const T factor = -u[pos] / d[pos]; gpu_atomic_add(d + p, factor*u[pos]); gpu_atomic_add(rhs + p, factor*rhs[pos]); } __syncthreads(); } // Solve the root { // The levels are sorted such that the root is the last level const auto& last_lvl_meta = block_level_meta[num_levels-1]; const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index; const unsigned width = num_matrix[bid]; if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = last_lvl_meta.matrix_data_index + tid; // backward for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const T factor = -u[pos] / d[pos]; const auto rhs_next = rhs[next_pos]; const auto d_next = d[next_pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } auto rhsp = rhs[pos] / d[pos]; rhs[pos] = rhsp; pos -= width; // forward for (unsigned i=0; i<len-1; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } // forward substitution // take great care with loop limits decrementing unsigned counter l for (unsigned l=num_levels-1; l>0; --l) { const auto& lvl_meta = block_level_meta[l-1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; const unsigned parent_index = block_level_meta[l].matrix_data_index; __syncthreads(); // Perform forward-substitution for each branch on this level. // One thread per branch. if (tid < width) { // Find the index of the first node in this branch. const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid; // Load the rhs value for the parent node of this branch. const unsigned p = parent_index + lvl_parents[tid]; T rhsp = rhs[p]; // each branch perform substitution for (unsigned i=0; i<len; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } } // namespace kernels ARB_ARBOR_API void assemble_diffusion( arb_value_type* d, arb_value_type* rhs, const arb_value_type* invariant_d, const arb_value_type* concentration, const arb_value_type* voltage, const arb_value_type* current, arb_value_type q, const arb_value_type* conductivity, const arb_value_type* area, const arb_value_type dt, const arb_index_type* perm, unsigned n) { launch_1d(n, 128, kernels::assemble_diffusion<arb_value_type, arb_index_type>, d, rhs, invariant_d, concentration, voltage, current, q, conductivity, area, dt, perm, n); } // Example: // // block 0 block 1 block 2 // .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~ // // L0 \ / L5 \ / // \/ \/ // L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . . // \ / \ / \ / \|/ \ / \ / // L2 | | L4 | | | L7 | // | | | | | | // // levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ] // block_index = [0, 3, 5, 8, ...] // num_levels = [3, 2, 3, ...] // num_cells = [2, 3, ...] // num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size() ARB_ARBOR_API void solve_diffusion( arb_value_type* rhs, arb_value_type* d, // diagonal values const arb_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD) const level_metadata* level_meta, // information pertaining to each level const arb_index_type* level_lengths, // lengths of branches of every level concatenated const arb_index_type* level_parents, // parents of branches of every level concatenated const arb_index_type* block_index, // start index into levels for each gpu block arb_index_type* num_cells, // the number of cells packed into this single matrix arb_index_type* padded_size, // length of rhs, d, u, including padding unsigned num_blocks, // number of blocks unsigned blocksize) // size of each block { launch(num_blocks, blocksize, kernels::solve_diffusion<arb_value_type>, rhs, d, u, level_meta, level_lengths, level_parents, block_index, num_cells); } } // namespace gpu } // namespace arb
c5f1feb4bd469e5082be0378015270d6f76f94d4.cu
#include <arbor/fvm_types.hpp> #include <arbor/gpu/gpu_api.hpp> #include <arbor/gpu/gpu_common.hpp> #include "matrix_common.hpp" #include "diffusion.hpp" namespace arb { namespace gpu { namespace kernels { /// GPU implementation of Hines matrix assembly. /// Fine layout. /// For a given time step size dt: /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve. template <typename T, typename I> __global__ void assemble_diffusion( T* __restrict__ const d, T* __restrict__ const rhs, const T* __restrict__ const invariant_d, const T* __restrict__ const concentration, const T* __restrict__ const voltage, const T* __restrict__ const current, const T q, const T* __restrict__ const conductivity, const T* __restrict__ const area, const T dt, const I* __restrict__ const perm, unsigned n) { const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < n) { const auto pid = perm[tid]; auto u = voltage[tid]; // mV auto g = conductivity[tid]; // µS auto J = current[tid]; // A/m^2 auto A = 1e-3*area[tid]; // 1e-9·m² auto X = concentration[tid]; // mM // conversion from current density to concentration change // using Faraday's constant auto F = A/(q*96.485332); d[pid] = 1e-3/dt + F*g + invariant_d[tid]; rhs[pid] = 1e-3/dt*X + F*(u*g - J); } } /// GPU implementation of Hines Matrix solver. /// Fine-grained tree based solver. /// Each block solves a set of matricesb iterating over the levels of matrix /// and perfoming a backward and forward substitution. On each level one thread /// gets assigned to one branch on this level of a matrix and solves and /// performs the substitution. Afterwards all threads continue on the next /// level. /// To avoid idle threads, one should try that on each level, there is a similar /// number of branches. template <typename T> __global__ void solve_diffusion( T* __restrict__ const rhs, T* __restrict__ const d, const T* __restrict__ const u, const level_metadata* __restrict__ const level_meta, const arb_index_type* __restrict__ const level_lengths, const arb_index_type* __restrict__ const level_parents, const arb_index_type* __restrict__ const block_index, const arb_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells { const auto tid = threadIdx.x; const auto bid = blockIdx.x; const auto first_level = block_index[bid]; const auto num_levels = block_index[bid + 1] - first_level; const auto block_level_meta = &level_meta[first_level]; // backward substitution for (unsigned l=0; l<num_levels-1; ++l) { // Metadata for this level and the next level const auto& lvl_meta = block_level_meta[l]; const auto& next_lvl_meta = block_level_meta[l+1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; // Perform backward substitution for each branch on this level. // One thread per branch. if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + tid; // each branch perform substitution for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const auto d_next = d[next_pos]; const auto rhs_next = rhs[next_pos]; const T factor = -u[pos]/d[pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } // Update d and rhs at the parent node of this branch. // A parent may have more than one contributing to it, so we use // atomic updates to avoid races conditions. const unsigned parent_index = next_lvl_meta.matrix_data_index; const unsigned p = parent_index + lvl_parents[tid]; const T factor = -u[pos] / d[pos]; gpu_atomic_add(d + p, factor*u[pos]); gpu_atomic_add(rhs + p, factor*rhs[pos]); } __syncthreads(); } // Solve the root { // The levels are sorted such that the root is the last level const auto& last_lvl_meta = block_level_meta[num_levels-1]; const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index; const unsigned width = num_matrix[bid]; if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = last_lvl_meta.matrix_data_index + tid; // backward for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const T factor = -u[pos] / d[pos]; const auto rhs_next = rhs[next_pos]; const auto d_next = d[next_pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } auto rhsp = rhs[pos] / d[pos]; rhs[pos] = rhsp; pos -= width; // forward for (unsigned i=0; i<len-1; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } // forward substitution // take great care with loop limits decrementing unsigned counter l for (unsigned l=num_levels-1; l>0; --l) { const auto& lvl_meta = block_level_meta[l-1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; const unsigned parent_index = block_level_meta[l].matrix_data_index; __syncthreads(); // Perform forward-substitution for each branch on this level. // One thread per branch. if (tid < width) { // Find the index of the first node in this branch. const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid; // Load the rhs value for the parent node of this branch. const unsigned p = parent_index + lvl_parents[tid]; T rhsp = rhs[p]; // each branch perform substitution for (unsigned i=0; i<len; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } } // namespace kernels ARB_ARBOR_API void assemble_diffusion( arb_value_type* d, arb_value_type* rhs, const arb_value_type* invariant_d, const arb_value_type* concentration, const arb_value_type* voltage, const arb_value_type* current, arb_value_type q, const arb_value_type* conductivity, const arb_value_type* area, const arb_value_type dt, const arb_index_type* perm, unsigned n) { launch_1d(n, 128, kernels::assemble_diffusion<arb_value_type, arb_index_type>, d, rhs, invariant_d, concentration, voltage, current, q, conductivity, area, dt, perm, n); } // Example: // // block 0 block 1 block 2 // .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~ // // L0 \ / L5 \ / // \/ \/ // L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . . // \ / \ / \ / \|/ \ / \ / // L2 | | L4 | | | L7 | // | | | | | | // // levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ] // block_index = [0, 3, 5, 8, ...] // num_levels = [3, 2, 3, ...] // num_cells = [2, 3, ...] // num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size() ARB_ARBOR_API void solve_diffusion( arb_value_type* rhs, arb_value_type* d, // diagonal values const arb_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD) const level_metadata* level_meta, // information pertaining to each level const arb_index_type* level_lengths, // lengths of branches of every level concatenated const arb_index_type* level_parents, // parents of branches of every level concatenated const arb_index_type* block_index, // start index into levels for each gpu block arb_index_type* num_cells, // the number of cells packed into this single matrix arb_index_type* padded_size, // length of rhs, d, u, including padding unsigned num_blocks, // number of blocks unsigned blocksize) // size of each block { launch(num_blocks, blocksize, kernels::solve_diffusion<arb_value_type>, rhs, d, u, level_meta, level_lengths, level_parents, block_index, num_cells); } } // namespace gpu } // namespace arb
91b388e99a8b9f5477c54f576e79068b3a040126.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int channels = top[0]->shape(softmax_axis_); if (temperature_scaling_) { Dtype * cooled_bottom_data; cooled_bottom_data = cooled_bottom_.mutable_gpu_data(); caffe_gpu_scale(count, (Dtype)1.0/temperature_, bottom_data, cooled_bottom_data); bottom_data = cooled_bottom_data; } caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, top_data); } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = top[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
91b388e99a8b9f5477c54f576e79068b3a040126.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int channels = top[0]->shape(softmax_axis_); if (temperature_scaling_) { Dtype * cooled_bottom_data; cooled_bottom_data = cooled_bottom_.mutable_gpu_data(); caffe_gpu_scale(count, (Dtype)1.0/temperature_, bottom_data, cooled_bottom_data); bottom_data = cooled_bottom_data; } caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, top_data); } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = top[0]->count(); int channels = top[0]->shape(softmax_axis_); caffe_copy(count, top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_), CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
6b85c2cf87f7e22e1a90af0278f32609b5b48631.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include "dali/kernels/imgproc/resample/resampling_batch.h" #include "dali/kernels/imgproc/resample/bilinear_impl.cuh" #include "dali/kernels/imgproc/resample/nearest_impl.cuh" #include "dali/kernels/imgproc/resample/resampling_impl.cuh" namespace dali { namespace kernels { namespace resampling { template <int spatial_ndim, typename Output, typename Input> __global__ void BatchedSeparableResampleKernel( int which_pass, const SampleDesc<spatial_ndim> *__restrict__ samples, const BlockDesc<spatial_ndim> *__restrict__ block2sample) { // find which part of which sample this block will process BlockDesc<spatial_ndim> bdesc = block2sample[blockIdx.x]; const auto &sample = samples[bdesc.sample_idx]; Output *__restrict__ sample_out; const Input *__restrict__ sample_in; ivec<spatial_ndim> in_shape; auto in_strides = sample.strides[which_pass]; auto out_strides = sample.strides[which_pass+1]; sample_in = reinterpret_cast<const Input*>(sample.pointers[which_pass]); sample_out = reinterpret_cast<Output*>(sample.pointers[which_pass+1]); in_shape = sample.shapes[which_pass]; int axis = sample.order[which_pass]; // vec-order: 0 = X, 1 = Y, 2 = Z ResamplingFilterType ftype = sample.filter_type[axis]; ResamplingFilter filter = sample.filter[axis]; int support = filter.support(); float origin = sample.origin[axis]; float scale = sample.scale[axis]; ivec<spatial_ndim> lo = bdesc.start, hi = bdesc.end; switch (ftype) { case ResamplingFilterType::Nearest: { vec<spatial_ndim> origin_v(0.0f), scale_v(1.0f); origin_v[axis] = origin; scale_v[axis] = scale; NNResample(lo, hi, origin_v, scale_v, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } break; case ResamplingFilterType::Linear: if (axis == 0) { LinearHorz(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } else if (axis == 1) { LinearVert(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } else { LinearDepth(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } break; default: if (axis == 0) { ResampleHorz(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels, filter, support); } else if (axis == 1) { ResampleVert(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels, filter, support); } else if (axis == 2) { ResampleDepth(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels, filter, support); } break; } } template <int spatial_ndim, typename Output, typename Input> void BatchedSeparableResample( int which_pass, const SampleDesc<spatial_ndim> *samples, const BlockDesc<spatial_ndim> *block2sample, int num_blocks, ivec3 block_size, hipStream_t stream) { if (num_blocks <= 0) return; dim3 block(block_size.x, block_size.y, block_size.z); hipLaunchKernelGGL(( BatchedSeparableResampleKernel<spatial_ndim, Output, Input>) , dim3(num_blocks), dim3(block), ResampleSharedMemSize, stream, which_pass, samples, block2sample); CUDA_CALL(hipGetLastError()); } #define INSTANTIATE_BATCHED_RESAMPLE(spatial_ndim, Output, Input) \ template DLL_PUBLIC void BatchedSeparableResample<spatial_ndim, Output, Input>( \ int which_pass, \ const SampleDesc<spatial_ndim> *samples, \ const BlockDesc<spatial_ndim> *block2sample, int num_blocks, \ ivec3 block_size, hipStream_t stream) // Instantiate the resampling functions. // The resampling always goes through intermediate image of float type. // Currently limited to only uint8 <-> float and float <-> float // because the operator doesn't support anything else. // To be extended when we support more image types. INSTANTIATE_BATCHED_RESAMPLE(2, float, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, uint8_t); INSTANTIATE_BATCHED_RESAMPLE(2, uint8_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, int16_t); INSTANTIATE_BATCHED_RESAMPLE(2, int16_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, uint16_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, uint16_t); INSTANTIATE_BATCHED_RESAMPLE(2, int32_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, int32_t); INSTANTIATE_BATCHED_RESAMPLE(3, float, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, uint8_t); INSTANTIATE_BATCHED_RESAMPLE(3, uint8_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, int16_t); INSTANTIATE_BATCHED_RESAMPLE(3, int16_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, uint16_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, uint16_t); INSTANTIATE_BATCHED_RESAMPLE(3, int32_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, int32_t); } // namespace resampling } // namespace kernels } // namespace dali
6b85c2cf87f7e22e1a90af0278f32609b5b48631.cu
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_runtime.h> #include "dali/kernels/imgproc/resample/resampling_batch.h" #include "dali/kernels/imgproc/resample/bilinear_impl.cuh" #include "dali/kernels/imgproc/resample/nearest_impl.cuh" #include "dali/kernels/imgproc/resample/resampling_impl.cuh" namespace dali { namespace kernels { namespace resampling { template <int spatial_ndim, typename Output, typename Input> __global__ void BatchedSeparableResampleKernel( int which_pass, const SampleDesc<spatial_ndim> *__restrict__ samples, const BlockDesc<spatial_ndim> *__restrict__ block2sample) { // find which part of which sample this block will process BlockDesc<spatial_ndim> bdesc = block2sample[blockIdx.x]; const auto &sample = samples[bdesc.sample_idx]; Output *__restrict__ sample_out; const Input *__restrict__ sample_in; ivec<spatial_ndim> in_shape; auto in_strides = sample.strides[which_pass]; auto out_strides = sample.strides[which_pass+1]; sample_in = reinterpret_cast<const Input*>(sample.pointers[which_pass]); sample_out = reinterpret_cast<Output*>(sample.pointers[which_pass+1]); in_shape = sample.shapes[which_pass]; int axis = sample.order[which_pass]; // vec-order: 0 = X, 1 = Y, 2 = Z ResamplingFilterType ftype = sample.filter_type[axis]; ResamplingFilter filter = sample.filter[axis]; int support = filter.support(); float origin = sample.origin[axis]; float scale = sample.scale[axis]; ivec<spatial_ndim> lo = bdesc.start, hi = bdesc.end; switch (ftype) { case ResamplingFilterType::Nearest: { vec<spatial_ndim> origin_v(0.0f), scale_v(1.0f); origin_v[axis] = origin; scale_v[axis] = scale; NNResample(lo, hi, origin_v, scale_v, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } break; case ResamplingFilterType::Linear: if (axis == 0) { LinearHorz(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } else if (axis == 1) { LinearVert(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } else { LinearDepth(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels); } break; default: if (axis == 0) { ResampleHorz(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels, filter, support); } else if (axis == 1) { ResampleVert(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels, filter, support); } else if (axis == 2) { ResampleDepth(lo, hi, origin, scale, sample_out, out_strides, sample_in, in_strides, in_shape, sample.channels, filter, support); } break; } } template <int spatial_ndim, typename Output, typename Input> void BatchedSeparableResample( int which_pass, const SampleDesc<spatial_ndim> *samples, const BlockDesc<spatial_ndim> *block2sample, int num_blocks, ivec3 block_size, cudaStream_t stream) { if (num_blocks <= 0) return; dim3 block(block_size.x, block_size.y, block_size.z); BatchedSeparableResampleKernel<spatial_ndim, Output, Input> <<<num_blocks, block, ResampleSharedMemSize, stream>>>(which_pass, samples, block2sample); CUDA_CALL(cudaGetLastError()); } #define INSTANTIATE_BATCHED_RESAMPLE(spatial_ndim, Output, Input) \ template DLL_PUBLIC void BatchedSeparableResample<spatial_ndim, Output, Input>( \ int which_pass, \ const SampleDesc<spatial_ndim> *samples, \ const BlockDesc<spatial_ndim> *block2sample, int num_blocks, \ ivec3 block_size, cudaStream_t stream) // Instantiate the resampling functions. // The resampling always goes through intermediate image of float type. // Currently limited to only uint8 <-> float and float <-> float // because the operator doesn't support anything else. // To be extended when we support more image types. INSTANTIATE_BATCHED_RESAMPLE(2, float, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, uint8_t); INSTANTIATE_BATCHED_RESAMPLE(2, uint8_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, int16_t); INSTANTIATE_BATCHED_RESAMPLE(2, int16_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, uint16_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, uint16_t); INSTANTIATE_BATCHED_RESAMPLE(2, int32_t, float); INSTANTIATE_BATCHED_RESAMPLE(2, float, int32_t); INSTANTIATE_BATCHED_RESAMPLE(3, float, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, uint8_t); INSTANTIATE_BATCHED_RESAMPLE(3, uint8_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, int16_t); INSTANTIATE_BATCHED_RESAMPLE(3, int16_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, uint16_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, uint16_t); INSTANTIATE_BATCHED_RESAMPLE(3, int32_t, float); INSTANTIATE_BATCHED_RESAMPLE(3, float, int32_t); } // namespace resampling } // namespace kernels } // namespace dali
30b828b09ca67e1316e37db70579c3731ad07707.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/psroi_pool_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> __global__ void GPUPSROIPoolForward(const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* output_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int input_channel = (c * pooled_height + ph) * pooled_width + pw; const T* offset_input_data = input_data + (roi_batch_id * input_channels + input_channel) * height * width; T outsum = 0; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; outsum += offset_input_data[input_index]; } } T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); output_data[i] = is_empty ? 0. : outsum / bin_area; } } template <typename T, typename Context> void PsroiPoolKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, const paddle::optional<DenseTensor>& rois_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale, DenseTensor* out) { auto in_dims = x.dims(); int batch_size = in_dims[0]; int input_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; PADDLE_ENFORCE_EQ( input_channels, output_channels * pooled_height * pooled_width, errors::InvalidArgument( "The channels %d of input X should equal the product of " "output_channels %d x pooled_height %d x pooled_width %d.", input_channels, output_channels, pooled_height, pooled_width)); int rois_num_t = rois.dims()[0]; if (rois_num_t == 0) return; int rois_batch_size; DenseTensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num_t}); int* rois_batch_id_data = ctx.template HostAlloc<int>(&rois_batch_id_list); if (rois_num.get_ptr()) { rois_batch_size = rois_num->numel(); auto* rois_num_data = rois_num->data<int>(); PADDLE_ENFORCE_EQ(rois_batch_size, batch_size, errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory_utils::Copy(CPUPlace(), rois_num_list.data(), ctx.GetPlace(), rois_num_data, sizeof(int) * rois_batch_size, 0); int rois_num_count = 0; for (int i = 0; i < rois_batch_size; ++i) { rois_num_count += rois_num_list[i]; } PADDLE_ENFORCE_EQ( rois_num_count, rois_num_t, errors::InvalidArgument( "the rois_num from input and RoisNum must be the same")); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois.lod().back(); rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ(rois_batch_size, batch_size, errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num_t, rois_num_with_lod, errors::InvalidArgument( "The number of rois from input(ROIs) and its LOD " "must be the same. Received rois %d of input(ROIs) " "but the number of rois %d from its LOD is %d", rois_num, rois_num_with_lod)); // set rois batch id for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } DenseTensor rois_batch_id_list_gpu; Copy(ctx, rois_batch_id_list, ctx.GetPlace(), false, &rois_batch_id_list_gpu); int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; // call cuda kernel function hipLaunchKernelGGL(( GPUPSROIPoolForward<T>) , dim3(blocks), dim3(threads), 0, ctx.stream(), output_size, x.data<T>(), rois.data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), ctx.template Alloc<T>(out)); } } // namespace phi PD_REGISTER_KERNEL( psroi_pool, GPU, ALL_LAYOUT, phi::PsroiPoolKernel, float, double) { kernel->InputAt(2).SetDataType(phi::CppTypeToDataType<int>::Type()); }
30b828b09ca67e1316e37db70579c3731ad07707.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/psroi_pool_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaximumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaximumNumBlocks); } template <typename T> __global__ void GPUPSROIPoolForward(const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int input_channels, const int height, const int width, const int output_channels, const int pooled_height, const int pooled_width, const int* rois_batch_id_data, T* output_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { // The output is in order (n, c, ph, pw) int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % output_channels; int n = i / pooled_width / pooled_height / output_channels; // set roi_batch_id int roi_batch_id = rois_batch_id_data[n]; // [start, end) interval for spatial sampling const T* offset_input_rois = input_rois + n * 4; T roi_start_w = static_cast<T>(round(offset_input_rois[0])) * spatial_scale; T roi_start_h = static_cast<T>(round(offset_input_rois[1])) * spatial_scale; T roi_end_w = static_cast<T>(round(offset_input_rois[2]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>(round(offset_input_rois[3]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_height = max(roi_end_h - roi_start_h, (T)0.1); // avoid 0 T roi_width = max(roi_end_w - roi_start_w, (T)0.1); // Compute w and h at input feature map T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor(bin_size_h * static_cast<T>(ph) + roi_start_h); int wstart = floor(bin_size_w * static_cast<T>(pw) + roi_start_w); int hend = ceil(bin_size_h * static_cast<T>(ph + 1) + roi_start_h); int wend = ceil(bin_size_w * static_cast<T>(pw + 1) + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int input_channel = (c * pooled_height + ph) * pooled_width + pw; const T* offset_input_data = input_data + (roi_batch_id * input_channels + input_channel) * height * width; T outsum = 0; for (int ih = hstart; ih < hend; ++ih) { for (int iw = wstart; iw < wend; ++iw) { int input_index = ih * width + iw; outsum += offset_input_data[input_index]; } } T bin_area = static_cast<T>((hend - hstart) * (wend - wstart)); output_data[i] = is_empty ? 0. : outsum / bin_area; } } template <typename T, typename Context> void PsroiPoolKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& rois, const paddle::optional<DenseTensor>& rois_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale, DenseTensor* out) { auto in_dims = x.dims(); int batch_size = in_dims[0]; int input_channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; PADDLE_ENFORCE_EQ( input_channels, output_channels * pooled_height * pooled_width, errors::InvalidArgument( "The channels %d of input X should equal the product of " "output_channels %d x pooled_height %d x pooled_width %d.", input_channels, output_channels, pooled_height, pooled_width)); int rois_num_t = rois.dims()[0]; if (rois_num_t == 0) return; int rois_batch_size; DenseTensor rois_batch_id_list; rois_batch_id_list.Resize({rois_num_t}); int* rois_batch_id_data = ctx.template HostAlloc<int>(&rois_batch_id_list); if (rois_num.get_ptr()) { rois_batch_size = rois_num->numel(); auto* rois_num_data = rois_num->data<int>(); PADDLE_ENFORCE_EQ(rois_batch_size, batch_size, errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory_utils::Copy(CPUPlace(), rois_num_list.data(), ctx.GetPlace(), rois_num_data, sizeof(int) * rois_batch_size, 0); int rois_num_count = 0; for (int i = 0; i < rois_batch_size; ++i) { rois_num_count += rois_num_list[i]; } PADDLE_ENFORCE_EQ( rois_num_count, rois_num_t, errors::InvalidArgument( "the rois_num from input and RoisNum must be the same")); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { rois_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois.lod().back(); rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ(rois_batch_size, batch_size, errors::InvalidArgument( "The batch size of input(ROIs) and input(X) must be " "the same but received batch size of input(ROIs) and " "input(X) is %d and %d respectively.", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num_t, rois_num_with_lod, errors::InvalidArgument( "The number of rois from input(ROIs) and its LOD " "must be the same. Received rois %d of input(ROIs) " "but the number of rois %d from its LOD is %d", rois_num, rois_num_with_lod)); // set rois batch id for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { rois_batch_id_data[i] = n; } } } DenseTensor rois_batch_id_list_gpu; Copy(ctx, rois_batch_id_list, ctx.GetPlace(), false, &rois_batch_id_list_gpu); int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; // call cuda kernel function GPUPSROIPoolForward<T> <<<blocks, threads, 0, ctx.stream()>>>(output_size, x.data<T>(), rois.data<T>(), spatial_scale, input_channels, height, width, output_channels, pooled_height, pooled_width, rois_batch_id_list_gpu.data<int>(), ctx.template Alloc<T>(out)); } } // namespace phi PD_REGISTER_KERNEL( psroi_pool, GPU, ALL_LAYOUT, phi::PsroiPoolKernel, float, double) { kernel->InputAt(2).SetDataType(phi::CppTypeToDataType<int>::Type()); }
111ebe78551cd27b07ed43726cbf7405a023d663.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_batch_cuda_conv_bias_kern_impls.py #include "../batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4.cuinl" template void megdnn::cuda::batch_conv_bias:: do_batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4< PerChannelBiasVisitor, IConvEpilogue<Activation< megdnn::param_enumv::BatchConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, int* d_workspace, PerChannelBiasVisitor bias, IConvEpilogue<Activation< megdnn::param_enumv::BatchConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
111ebe78551cd27b07ed43726cbf7405a023d663.cu
// generated by gen_batch_cuda_conv_bias_kern_impls.py #include "../batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4.cuinl" template void megdnn::cuda::batch_conv_bias:: do_batch_conv_bias_int8_implicit_gemm_precomp_ncdiv4hw4< PerChannelBiasVisitor, IConvEpilogue<Activation< megdnn::param_enumv::BatchConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, int* d_workspace, PerChannelBiasVisitor bias, IConvEpilogue<Activation< megdnn::param_enumv::BatchConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
8813f7f62f69aeccd9e0033dcf8b3621869fdc65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHGrid.h" #include "common_hip.cuh" #include "THHNumerics.cuh" template<typename T> __global__ void gridKernel(int64_t *self, TensorInfo<T> posInfo, T *size, int64_t *count, ptrdiff_t nNodes) { KERNEL_LOOP(i, nNodes) { T *pos = posInfo.data + i * posInfo.stride[0]; int64_t coef = 1, value = 0; for (ptrdiff_t d = 0; d < posInfo.size[1]; d += posInfo.stride[1]) { value += coef * ScalarConvert<T, int64_t>::to(THCNumerics<T>::div(pos[d], size[d])); coef *= count[d]; } self[i] = value; } } #include "generic/THCGrid.cu" #include "THH/THHGenerateAllTypes.h"
8813f7f62f69aeccd9e0033dcf8b3621869fdc65.cu
#include "THCGrid.h" #include "common.cuh" #include "THCNumerics.cuh" template<typename T> __global__ void gridKernel(int64_t *self, TensorInfo<T> posInfo, T *size, int64_t *count, ptrdiff_t nNodes) { KERNEL_LOOP(i, nNodes) { T *pos = posInfo.data + i * posInfo.stride[0]; int64_t coef = 1, value = 0; for (ptrdiff_t d = 0; d < posInfo.size[1]; d += posInfo.stride[1]) { value += coef * ScalarConvert<T, int64_t>::to(THCNumerics<T>::div(pos[d], size[d])); coef *= count[d]; } self[i] = value; } } #include "generic/THCGrid.cu" #include "THC/THCGenerateAllTypes.h"
b2c20eee72824cb6043a01b3f06a0f638bb9407b.hip
// !!! This is a file automatically generated by hipify!!! /* * DoG3DFilter.cu * based on NVIDIA Toolkit Samples: convolutionSeparable * */ #include <assert.h> #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "DoG3DFilter.cuh" // diameter should be odd number #define MAX_KERNEL_LENGTH 80 #define TRUNCATE 2.0 // max sigma = MAX_KERNEL_LENGTH/TRUNCATE = 20.0 /* * Convolution kernel storage * c_Kernel[0:MAX_KERNEL_LENGTH] : 1st XY * c_Kernel[MAX_KERNEL_LENGTH:MAX_KERNEL_LENGTH*2] : 1st Z * c_Kernel[MAX_KERNEL_LENGTH*2:MAX_KERNEL_LENGTH*3] : 2nd XY * c_Kernel[MAX_KERNEL_LENGTH*3:MAX_KERNEL_LENGTH*4] : 2nd Z */ __constant__ float c_Kernel[MAX_KERNEL_LENGTH * 4]; int radius_kernel[4]; float sigma_kernel[4]; // Row convolution // width = 2560 = 5 * 2^9 = divisible by 8*16 // radius should be <= 16*2 // sigma should be < 16*2 / 2 #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_BLOCKDIM_Z 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 2 // Column convolution // height = 2160 = 3^3 * 5 * 2^4 = divisible by 9*16 // radius should be <= 16*2 // sigma should be < 16*2/2 #define COLUMNS_BLOCKDIM_X 4 #define COLUMNS_BLOCKDIM_Y 16 #define COLUMNS_BLOCKDIM_Z 4 #define COLUMNS_RESULT_STEPS 9 #define COLUMNS_HALO_STEPS 2 // Layer convolution // depth = 32 = 2^5 = divisible by 8*4 // radius should be <= 8*2 // sigma should be < 8*2/2 #define LAYERS_BLOCKDIM_X 8 #define LAYERS_BLOCKDIM_Y 8 #define LAYERS_BLOCKDIM_Z 4 #define LAYERS_RESULT_STEPS 4 #define LAYERS_HALO_STEPS 2 // Subtract filter #define SUB_BLOCKDIM_X 8 #define SUB_BLOCKDIM_Y 8 #define SUB_BLOCKDIM_Z 8 /* * Row convolution filter */ __global__ void convolutionRows3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { __shared__ float s_Data[ROWS_BLOCKDIM_Z][ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * ROWS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH]; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; //#pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += kernel[kernel_radius - j] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } extern "C" void convolutionRows3D( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); assert(imageD % ROWS_BLOCKDIM_Z == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y, imageD / ROWS_BLOCKDIM_Z); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y, ROWS_BLOCKDIM_Z); hipLaunchKernelGGL(( convolutionRows3DKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageD, kernel_index, kernel_radius ); getLastCudaError("convolutionRows3DKernel() execution failed\n"); } /* * Column convolution filter */ __global__ void convolutionColumns3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_Z][COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * COLUMNS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH]; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * imageW]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y >= 0) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (baseY + i * COLUMNS_BLOCKDIM_Y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; //#pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += kernel[kernel_radius - j] * s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * imageW] = sum; } } extern "C" void convolutionColumns3D( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); assert(imageD % COLUMNS_BLOCKDIM_Z == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y), imageD / COLUMNS_BLOCKDIM_Z); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y, COLUMNS_BLOCKDIM_Z); hipLaunchKernelGGL(( convolutionColumns3DKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageD, kernel_index, kernel_radius ); getLastCudaError("convolutionColumns3DKernel() execution failed\n"); } /* * Layer convolution filter */ __global__ void convolutionLayers3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { __shared__ float s_Data[LAYERS_BLOCKDIM_X][LAYERS_BLOCKDIM_Y][(LAYERS_RESULT_STEPS + 2 * LAYERS_HALO_STEPS) * LAYERS_BLOCKDIM_Z + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * LAYERS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * LAYERS_BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * LAYERS_RESULT_STEPS - LAYERS_HALO_STEPS) * LAYERS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; const int pitch = imageW*imageH; const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH]; //Main data #pragma unroll for (int i = LAYERS_HALO_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z] = d_Src[i * LAYERS_BLOCKDIM_Z * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < LAYERS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z] = (baseZ + i * LAYERS_BLOCKDIM_Z >= 0) ? d_Src[i * LAYERS_BLOCKDIM_Z * pitch] : 0; } //Lower halo #pragma unroll for (int i = LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS + LAYERS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z]= (baseZ + i * LAYERS_BLOCKDIM_Z < imageD) ? d_Src[i * LAYERS_BLOCKDIM_Z * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = LAYERS_HALO_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i++) { float sum = 0; //#pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += kernel[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z + j]; } d_Dst[i * LAYERS_BLOCKDIM_Z * pitch] = sum; } } extern "C" void convolutionLayers3D( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { assert(LAYERS_BLOCKDIM_Z * LAYERS_HALO_STEPS >= kernel_radius); assert(imageW % LAYERS_BLOCKDIM_X == 0); assert(imageH % LAYERS_BLOCKDIM_Y == 0); assert(imageD % (LAYERS_RESULT_STEPS * LAYERS_BLOCKDIM_Z) == 0); dim3 blocks(imageW / LAYERS_BLOCKDIM_X, imageH / LAYERS_BLOCKDIM_Y, imageD / (LAYERS_RESULT_STEPS * LAYERS_BLOCKDIM_Z)); dim3 threads(LAYERS_BLOCKDIM_X, LAYERS_BLOCKDIM_Y, LAYERS_BLOCKDIM_Z); hipLaunchKernelGGL(( convolutionLayers3DKernel), dim3(blocks), dim3(threads), 0, 0, d_Dst, d_Src, imageW, imageH, imageD, kernel_index, kernel_radius ); getLastCudaError("convolutionLayers3DKernel() execution failed\n"); } __global__ void NormalizeSubtract3DKernel(float * img_src, const float * img_sub, const int width, const int height, const int depth, float normalizer) { const int baseX = blockIdx.x * SUB_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * SUB_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * SUB_BLOCKDIM_Z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; img_src[idx] = (img_src[idx] - img_sub[idx]) * normalizer; } extern "C" void NormalizeSubtract3DFilter(float *d_src, const float *d_sub, const int width, const int height, const int depth, float normalizer) { assert(width % (SUB_BLOCKDIM_X) == 0); assert(height % (SUB_BLOCKDIM_Y) == 0); assert(depth % (SUB_BLOCKDIM_Z) == 0); dim3 blocks(width / (SUB_BLOCKDIM_X), height/(SUB_BLOCKDIM_Y), depth / (SUB_BLOCKDIM_Z)); dim3 threads(SUB_BLOCKDIM_X, SUB_BLOCKDIM_Y, SUB_BLOCKDIM_Z); hipLaunchKernelGGL(( NormalizeSubtract3DKernel), dim3(blocks), dim3(threads), 0, 0, d_src, d_sub, width, height, depth, normalizer); getLastCudaError("Error: Subtract3DKernel() kernel execution FAILED!"); } /* * Gaussian 3D filter */ extern "C" int calcGaussianWeight(float *h_kernel, float sigma) { int lw = (int)(TRUNCATE * sigma + 0.5); int length = lw * 2 + 1; float p,sum; sum = 1.; h_kernel[lw] = 1.; sigma *= sigma; for(int i = 1; i <= lw; i++) { p = exp(-0.5 * i*i / sigma); h_kernel[lw-i] = p; h_kernel[lw+i] = p; sum += p * 2; } for(int i = 0; i < length; i++) h_kernel[i] /= sum; for(int i=length; i < MAX_KERNEL_LENGTH; i++) h_kernel[i] = 0; return lw; } extern "C" int initGaussian3DKernel(const float sigma_xy1, const float sigma_z1, const float sigma_xy2, const float sigma_z2){ // save as global sigma_kernel[0] = sigma_xy1; sigma_kernel[1] = sigma_z1; sigma_kernel[2] = sigma_xy2; sigma_kernel[3] = sigma_z2; float *h_kernel = (float *)malloc(4*MAX_KERNEL_LENGTH*sizeof(float)); radius_kernel[0] = calcGaussianWeight(&h_kernel[0], sigma_xy1); radius_kernel[1] = calcGaussianWeight(&h_kernel[MAX_KERNEL_LENGTH], sigma_z1); radius_kernel[2] = calcGaussianWeight(&h_kernel[MAX_KERNEL_LENGTH*2], sigma_xy2); radius_kernel[3] = calcGaussianWeight(&h_kernel[MAX_KERNEL_LENGTH*3], sigma_z2); hipMemcpyToSymbol(c_Kernel, h_kernel, 4*MAX_KERNEL_LENGTH * sizeof(float)); getLastCudaError("Error: MemcpyToSymbol failed!"); free(h_kernel); return radius_kernel[3]; } extern "C" void Gaussian3DFilter(float *d_img, float *d_temp, float *d_result, const int sigma_num, const int width, const int height, const int depth) { if(sigma_num == 1) { convolutionRows3D(d_result, d_img, width, height, depth, 0, radius_kernel[0]); convolutionColumns3D(d_temp, d_result, width, height, depth, 0, radius_kernel[0]); convolutionLayers3D(d_result, d_temp, width, height, depth, 1, radius_kernel[1]); } else if(sigma_num == 2){ convolutionRows3D(d_result, d_img, width, height, depth, 2, radius_kernel[2]); convolutionColumns3D(d_temp, d_result, width, height, depth, 2, radius_kernel[2]); convolutionLayers3D(d_result, d_temp, width, height, depth, 3, radius_kernel[3]); } getLastCudaError("Error: convolutionRows3D() or convolutionColumns3D() or convolutionLayers3D() kernel execution FAILED!"); //checkCudaErrors(hipDeviceSynchronize()); } extern "C" void DoG3DFilter(float *d_img, float *d_temp1, float *d_temp2, float *d_result, const int width, const int height, const int depth, float gamma) { float normalizer = powf(sigma_kernel[0], gamma*2)*powf(sigma_kernel[1], gamma); Gaussian3DFilter(d_img, d_temp1, d_temp2, 2, width, height, depth); Gaussian3DFilter(d_img, d_temp1, d_result, 1, width, height, depth); NormalizeSubtract3DFilter(d_result, d_temp2, width, height, depth, normalizer); //checkCudaErrors(hipDeviceSynchronize()); }
b2c20eee72824cb6043a01b3f06a0f638bb9407b.cu
/* * DoG3DFilter.cu * based on NVIDIA Toolkit Samples: convolutionSeparable * */ #include <assert.h> #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "DoG3DFilter.cuh" // diameter should be odd number #define MAX_KERNEL_LENGTH 80 #define TRUNCATE 2.0 // max sigma = MAX_KERNEL_LENGTH/TRUNCATE = 20.0 /* * Convolution kernel storage * c_Kernel[0:MAX_KERNEL_LENGTH] : 1st XY * c_Kernel[MAX_KERNEL_LENGTH:MAX_KERNEL_LENGTH*2] : 1st Z * c_Kernel[MAX_KERNEL_LENGTH*2:MAX_KERNEL_LENGTH*3] : 2nd XY * c_Kernel[MAX_KERNEL_LENGTH*3:MAX_KERNEL_LENGTH*4] : 2nd Z */ __constant__ float c_Kernel[MAX_KERNEL_LENGTH * 4]; int radius_kernel[4]; float sigma_kernel[4]; // Row convolution // width = 2560 = 5 * 2^9 = divisible by 8*16 // radius should be <= 16*2 // sigma should be < 16*2 / 2 #define ROWS_BLOCKDIM_X 16 #define ROWS_BLOCKDIM_Y 4 #define ROWS_BLOCKDIM_Z 4 #define ROWS_RESULT_STEPS 8 #define ROWS_HALO_STEPS 2 // Column convolution // height = 2160 = 3^3 * 5 * 2^4 = divisible by 9*16 // radius should be <= 16*2 // sigma should be < 16*2/2 #define COLUMNS_BLOCKDIM_X 4 #define COLUMNS_BLOCKDIM_Y 16 #define COLUMNS_BLOCKDIM_Z 4 #define COLUMNS_RESULT_STEPS 9 #define COLUMNS_HALO_STEPS 2 // Layer convolution // depth = 32 = 2^5 = divisible by 8*4 // radius should be <= 8*2 // sigma should be < 8*2/2 #define LAYERS_BLOCKDIM_X 8 #define LAYERS_BLOCKDIM_Y 8 #define LAYERS_BLOCKDIM_Z 4 #define LAYERS_RESULT_STEPS 4 #define LAYERS_HALO_STEPS 2 // Subtract filter #define SUB_BLOCKDIM_X 8 #define SUB_BLOCKDIM_Y 8 #define SUB_BLOCKDIM_Z 8 /* * Row convolution filter */ __global__ void convolutionRows3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { __shared__ float s_Data[ROWS_BLOCKDIM_Z][ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; //Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * ROWS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH]; //Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } //Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; //#pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += kernel[kernel_radius - j] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } extern "C" void convolutionRows3D( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius); assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(imageH % ROWS_BLOCKDIM_Y == 0); assert(imageD % ROWS_BLOCKDIM_Z == 0); dim3 blocks(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), imageH / ROWS_BLOCKDIM_Y, imageD / ROWS_BLOCKDIM_Z); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y, ROWS_BLOCKDIM_Z); convolutionRows3DKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageD, kernel_index, kernel_radius ); getLastCudaError("convolutionRows3DKernel() execution failed\n"); } /* * Column convolution filter */ __global__ void convolutionColumns3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_Z][COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * COLUMNS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH]; //Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * imageW]; } //Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y >= 0) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0; } //Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (baseY + i * COLUMNS_BLOCKDIM_Y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * imageW] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; //#pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += kernel[kernel_radius - j] * s_Data[threadIdx.z][threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * imageW] = sum; } } extern "C" void convolutionColumns3D( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius); assert(imageW % COLUMNS_BLOCKDIM_X == 0); assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); assert(imageD % COLUMNS_BLOCKDIM_Z == 0); dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y), imageD / COLUMNS_BLOCKDIM_Z); dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y, COLUMNS_BLOCKDIM_Z); convolutionColumns3DKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageD, kernel_index, kernel_radius ); getLastCudaError("convolutionColumns3DKernel() execution failed\n"); } /* * Layer convolution filter */ __global__ void convolutionLayers3DKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { __shared__ float s_Data[LAYERS_BLOCKDIM_X][LAYERS_BLOCKDIM_Y][(LAYERS_RESULT_STEPS + 2 * LAYERS_HALO_STEPS) * LAYERS_BLOCKDIM_Z + 1]; //Offset to the upper halo edge const int baseX = blockIdx.x * LAYERS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * LAYERS_BLOCKDIM_Y + threadIdx.y; const int baseZ = (blockIdx.z * LAYERS_RESULT_STEPS - LAYERS_HALO_STEPS) * LAYERS_BLOCKDIM_Z + threadIdx.z; d_Src += (baseZ * imageH + baseY) * imageW + baseX; d_Dst += (baseZ * imageH + baseY) * imageW + baseX; const int pitch = imageW*imageH; const float* kernel = &c_Kernel[kernel_index*MAX_KERNEL_LENGTH]; //Main data #pragma unroll for (int i = LAYERS_HALO_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z] = d_Src[i * LAYERS_BLOCKDIM_Z * pitch]; } //Upper halo #pragma unroll for (int i = 0; i < LAYERS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z] = (baseZ + i * LAYERS_BLOCKDIM_Z >= 0) ? d_Src[i * LAYERS_BLOCKDIM_Z * pitch] : 0; } //Lower halo #pragma unroll for (int i = LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS + LAYERS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z]= (baseZ + i * LAYERS_BLOCKDIM_Z < imageD) ? d_Src[i * LAYERS_BLOCKDIM_Z * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = LAYERS_HALO_STEPS; i < LAYERS_HALO_STEPS + LAYERS_RESULT_STEPS; i++) { float sum = 0; //#pragma unroll for (int j = -kernel_radius; j <= kernel_radius; j++) { sum += kernel[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_BLOCKDIM_Z + j]; } d_Dst[i * LAYERS_BLOCKDIM_Z * pitch] = sum; } } extern "C" void convolutionLayers3D( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD, int kernel_index, int kernel_radius ) { assert(LAYERS_BLOCKDIM_Z * LAYERS_HALO_STEPS >= kernel_radius); assert(imageW % LAYERS_BLOCKDIM_X == 0); assert(imageH % LAYERS_BLOCKDIM_Y == 0); assert(imageD % (LAYERS_RESULT_STEPS * LAYERS_BLOCKDIM_Z) == 0); dim3 blocks(imageW / LAYERS_BLOCKDIM_X, imageH / LAYERS_BLOCKDIM_Y, imageD / (LAYERS_RESULT_STEPS * LAYERS_BLOCKDIM_Z)); dim3 threads(LAYERS_BLOCKDIM_X, LAYERS_BLOCKDIM_Y, LAYERS_BLOCKDIM_Z); convolutionLayers3DKernel<<<blocks, threads>>>( d_Dst, d_Src, imageW, imageH, imageD, kernel_index, kernel_radius ); getLastCudaError("convolutionLayers3DKernel() execution failed\n"); } __global__ void NormalizeSubtract3DKernel(float * img_src, const float * img_sub, const int width, const int height, const int depth, float normalizer) { const int baseX = blockIdx.x * SUB_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * SUB_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * SUB_BLOCKDIM_Z + threadIdx.z; const int idx = (baseZ * height + baseY) * width + baseX; img_src[idx] = (img_src[idx] - img_sub[idx]) * normalizer; } extern "C" void NormalizeSubtract3DFilter(float *d_src, const float *d_sub, const int width, const int height, const int depth, float normalizer) { assert(width % (SUB_BLOCKDIM_X) == 0); assert(height % (SUB_BLOCKDIM_Y) == 0); assert(depth % (SUB_BLOCKDIM_Z) == 0); dim3 blocks(width / (SUB_BLOCKDIM_X), height/(SUB_BLOCKDIM_Y), depth / (SUB_BLOCKDIM_Z)); dim3 threads(SUB_BLOCKDIM_X, SUB_BLOCKDIM_Y, SUB_BLOCKDIM_Z); NormalizeSubtract3DKernel<<<blocks, threads>>>(d_src, d_sub, width, height, depth, normalizer); getLastCudaError("Error: Subtract3DKernel() kernel execution FAILED!"); } /* * Gaussian 3D filter */ extern "C" int calcGaussianWeight(float *h_kernel, float sigma) { int lw = (int)(TRUNCATE * sigma + 0.5); int length = lw * 2 + 1; float p,sum; sum = 1.; h_kernel[lw] = 1.; sigma *= sigma; for(int i = 1; i <= lw; i++) { p = exp(-0.5 * i*i / sigma); h_kernel[lw-i] = p; h_kernel[lw+i] = p; sum += p * 2; } for(int i = 0; i < length; i++) h_kernel[i] /= sum; for(int i=length; i < MAX_KERNEL_LENGTH; i++) h_kernel[i] = 0; return lw; } extern "C" int initGaussian3DKernel(const float sigma_xy1, const float sigma_z1, const float sigma_xy2, const float sigma_z2){ // save as global sigma_kernel[0] = sigma_xy1; sigma_kernel[1] = sigma_z1; sigma_kernel[2] = sigma_xy2; sigma_kernel[3] = sigma_z2; float *h_kernel = (float *)malloc(4*MAX_KERNEL_LENGTH*sizeof(float)); radius_kernel[0] = calcGaussianWeight(&h_kernel[0], sigma_xy1); radius_kernel[1] = calcGaussianWeight(&h_kernel[MAX_KERNEL_LENGTH], sigma_z1); radius_kernel[2] = calcGaussianWeight(&h_kernel[MAX_KERNEL_LENGTH*2], sigma_xy2); radius_kernel[3] = calcGaussianWeight(&h_kernel[MAX_KERNEL_LENGTH*3], sigma_z2); cudaMemcpyToSymbol(c_Kernel, h_kernel, 4*MAX_KERNEL_LENGTH * sizeof(float)); getLastCudaError("Error: MemcpyToSymbol failed!"); free(h_kernel); return radius_kernel[3]; } extern "C" void Gaussian3DFilter(float *d_img, float *d_temp, float *d_result, const int sigma_num, const int width, const int height, const int depth) { if(sigma_num == 1) { convolutionRows3D(d_result, d_img, width, height, depth, 0, radius_kernel[0]); convolutionColumns3D(d_temp, d_result, width, height, depth, 0, radius_kernel[0]); convolutionLayers3D(d_result, d_temp, width, height, depth, 1, radius_kernel[1]); } else if(sigma_num == 2){ convolutionRows3D(d_result, d_img, width, height, depth, 2, radius_kernel[2]); convolutionColumns3D(d_temp, d_result, width, height, depth, 2, radius_kernel[2]); convolutionLayers3D(d_result, d_temp, width, height, depth, 3, radius_kernel[3]); } getLastCudaError("Error: convolutionRows3D() or convolutionColumns3D() or convolutionLayers3D() kernel execution FAILED!"); //checkCudaErrors(cudaDeviceSynchronize()); } extern "C" void DoG3DFilter(float *d_img, float *d_temp1, float *d_temp2, float *d_result, const int width, const int height, const int depth, float gamma) { float normalizer = powf(sigma_kernel[0], gamma*2)*powf(sigma_kernel[1], gamma); Gaussian3DFilter(d_img, d_temp1, d_temp2, 2, width, height, depth); Gaussian3DFilter(d_img, d_temp1, d_result, 1, width, height, depth); NormalizeSubtract3DFilter(d_result, d_temp2, width, height, depth, normalizer); //checkCudaErrors(cudaDeviceSynchronize()); }
07209a8f31393d7d7679a69f12e7c68070017235.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_pr.cu * * @brief Simple test driver program for computing Pagerank. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <iostream> #include <cstdlib> #include <fstream> #include <algorithm> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // PR includes #include <gunrock/app/pr/pr_enactor.cuh> #include <gunrock/app/pr/pr_problem.cuh> #include <gunrock/app/pr/pr_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // boost includes #include <boost/config.hpp> #include <boost/utility.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/page_rank.hpp> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::pr; /****************************************************************************** * Defines, constants, globals ******************************************************************************/ template <typename VertexId, typename Value> struct RankPair { VertexId vertex_id; Value page_rank; RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {} }; template<typename RankPair> bool PRCompare( RankPair elem1, RankPair elem2) { return elem1.page_rank > elem2.page_rank; } /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "test <graph-type> [graph-type-arguments]\n" "Graph type and graph type arguments:\n" " market <matrix-market-file-name>\n" " Reads a Matrix-Market coordinate-formatted graph of\n" " directed/undirected edges from STDIN (or from the\n" " optionally-specified file).\n" " rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n" " Generate R-MAT graph as input\n" " --rmat_scale=<vertex-scale>\n" " --rmat_nodes=<number-nodes>\n" " --rmat_edgefactor=<edge-factor>\n" " --rmat_edges=<number-edges>\n" " --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n" " --rmat_seed=<seed>\n" " rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n" " Generate Random Geometry Graph as input\n" " --rgg_scale=<vertex-scale>\n" " --rgg_nodes=<number-nodes>\n" " --rgg_thfactor=<threshold-factor>\n" " --rgg_threshold=<threshold>\n" " --rgg_vmultipiler=<vmultipiler>\n" " --rgg_seed=<seed>\n\n" "Optional arguments:\n" "[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n" "[--instrumented] Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty.\n" " (a relative indicator of load imbalance.)\n" "[--quick] Skip the CPU reference validation process.\n" "[--disable-size-check] Disable frontier queue size check.\n" "[--grid-size=<grid size>] Maximum allowed grid size setting.\n" "[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--in-sizing=<in/out_queue_scale_factor>]\n" " Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--v] Print verbose per iteration debug info.\n" "[--iteration-num=<num>] Number of runs to perform the test.\n" "[--max-iter=<num>] Max iteration for rank score distribution\n" " before one round of PageRank run end.\n" "[--partition-method=<random|biasrandom|clustered|metis>]\n" " Choose partitioner (Default use random).\n" "[--delta=<delta>] Delta for PageRank (Default 0.85f).\n" "[--error=<error>] Error threshold for PageRank (Default 0.01f).\n" "[--quiet] No output (unless --json is specified).\n" "[--json] Output JSON-format statistics to STDOUT.\n" "[--jsonfile=<name>] Output JSON-format statistics to file <name>\n" "[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n" " where name is auto-generated.\n" ); } /** * @brief Displays the PageRank result * * @param[in] node Node vertex Id * @param[in] rank Rank value for the node * @param[in] nodes Number of nodes in the graph. */ template<typename VertexId, typename SizeT, typename Value> void DisplaySolution(VertexId *node, Value *rank, SizeT nodes) { SizeT top = (nodes < 10) ? nodes : 10; // at most top 10 ranked nodes printf("\nTop %lld Ranked Vertices and PageRanks:\n", (long long)top); for (SizeT i = 0; i < top; ++i) { printf("Vertex ID: %lld, PageRank: %.8le\n", (long long)node[i], (double)rank[i]); } } /** * @brief Compares the equivalence of two arrays. If incorrect, print the location * of the first incorrect value appears, the incorrect value, and the reference * value. * * @tparam T datatype of the values being compared with. * @tparam SizeT datatype of the array length. * * @param[in] computed Vector of values to be compared. * @param[in] reference Vector of reference values. * @param[in] len Vector length. * @param[in] verbose Whether to print values around the incorrect one. * @param[in] quiet Don't print out anything to stdout. * @param[in] threshold Results error checking threshold. * * \return Zero if two vectors are exactly the same, non-zero if there is any difference. */ template <typename SizeT, typename Value> int CompareResults_( Value* computed, Value* reference, SizeT len, bool verbose = true, bool quiet = false, Value threshold = 0.05f) { int flag = 0; for (SizeT i = 0; i < len; i++) { // Use relative error rate here. bool is_right = true; if (fabs(computed[i]) < 0.01f && fabs(reference[i] - 1) < 0.01f) continue; if (fabs(computed[i] - 0.0) < 0.01f) { if (fabs(computed[i] - reference[i]) > threshold) is_right = false; } else { if (fabs((computed[i] - reference[i]) / reference[i]) > threshold) is_right = false; } if (!is_right && flag == 0) { if (!quiet) { printf("\nINCORRECT: [%lu]: ", (unsigned long) i); PrintValue<Value>(computed[i]); printf(" != "); PrintValue<Value>(reference[i]); if (verbose) { printf("\nresult[..."); for (SizeT j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<Value>(computed[j]); printf(", "); } printf("...]"); printf("\nreference[..."); for (SizeT j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<Value>(reference[j]); printf(", "); } printf("...]"); } } flag += 1; } if (!is_right && flag > 0) flag += 1; } if (!quiet) { printf("\n"); if (!flag) { printf("CORRECT"); } } return flag; } /****************************************************************************** * PageRank Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference Page Rank implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_id Source node for personalized PageRank (if any) * @param[in] rank Host-side vector to store CPU computed labels for each node * @param[in] delta Delta for computing PR * @param[in] error Error threshold * @param[in] max_iteration Maximum iteration to go * @param[in] directed Whether the graph is directed * @param[in] quiet Don't print out anything to stdout */ template < typename VertexId, typename SizeT, typename Value > void ReferencePageRank( const Csr<VertexId, SizeT, Value> &graph, VertexId *node_id, Value *rank, Value delta, Value error, SizeT max_iteration, bool directed, bool quiet = false) { using namespace boost; // preparation typedef adjacency_list< vecS, vecS, bidirectionalS, no_property, property<edge_index_t, int> > Graph; Graph g; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j) { Graph::edge_descriptor e = add_edge(i, graph.column_indices[j], g).first; put(edge_index, g, e, i); } } // compute PageRank CpuTimer cpu_timer; cpu_timer.Start(); std::vector<Value> ranks(num_vertices(g)); page_rank(g, make_iterator_property_map( ranks.begin(), get(boost::vertex_index, g)), boost::graph::n_iterations(max_iteration)); cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); for (std::size_t i = 0; i < num_vertices(g); ++i) { rank[i] = ranks[i]; } // Sort the top ranked vertices RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc( sizeof(RankPair<SizeT, Value>) * num_vertices(g)); for (int i = 0; i < num_vertices(g); ++i) { pr_list[i].vertex_id = i; pr_list[i].page_rank = rank[i]; } std::stable_sort(pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >); for (int i = 0; i < num_vertices(g); ++i) { node_id[i] = pr_list[i].vertex_id; rank[i] = pr_list[i].page_rank; } free(pr_list); if (!quiet) { printf("CPU PageRank finished in %lf msec.\n", elapsed); } } /*template < typename VertexId, typename Value> class Sort_Pair { public: VertexId v; Value val; }; template < typename VertexId, typename Value> inline bool operator< (const Sort_Pair<VertexId, Value>& lhs, const Sort_Pair<VertexId, Value>& rhs) { if (lhs.val < rhs.val) return true; if (rhs.val < lhs.val) return false; return false; }*/ /** * @brief A simple CPU-based reference Page Rank implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_id Source node for personalized PageRank (if any) * @param[in] rank Host-side vector to store CPU computed labels for each node * @param[in] delta Delta for computing PR * @param[in] error Error threshold * @param[in] max_iteration Maximum iteration to go * @param[in] directed Whether the graph is directed * @param[in] quiet Don't print out anything to stdout * @param[in] scaled Normalized flag */ template < typename VertexId, typename SizeT, typename Value > void ReferencePageRank_Normalized( const Csr<VertexId, SizeT, Value> &graph, VertexId *node_id, Value *rank, Value delta, Value error, SizeT max_iteration, bool directed, bool quiet = false, bool scaled = false) { SizeT nodes = graph.nodes; Value *rank_current = (Value*) malloc (sizeof(Value) * nodes); Value *rank_next = (Value*) malloc (sizeof(Value) * nodes); bool to_continue = true; SizeT iteration = 0; Value reset_value = scaled ? 1.0 - delta : ((1.0 - delta) / (Value)nodes); CpuTimer cpu_timer; cpu_timer.Start(); //#pragma omp parallel { #pragma omp parallel for for (VertexId v=0; v<nodes; v++) { rank_current[v] = scaled ? 1.0 : (1.0 / (Value)nodes); rank_next [v] = 0; } while (to_continue) { to_continue = false; #pragma omp parallel for for (VertexId src=0; src<nodes; src++) { SizeT start_e = graph.row_offsets[src]; SizeT end_e = graph.row_offsets[src+1]; if (start_e == end_e) continue; // 0 out degree vertex Value dist_rank = rank_current[src] / (Value)(end_e - start_e); if (!isfinite(dist_rank)) continue; for (SizeT e = start_e; e < end_e; e++) { VertexId dest = graph.column_indices[e]; #pragma omp atomic rank_next[dest] += dist_rank; } } iteration ++; #pragma omp parallel for for (VertexId v=0; v<nodes; v++) { Value rank_new = delta * rank_next[v]; if (!isfinite(rank_new)) rank_new = 0; rank_new = rank_new + reset_value; if (iteration <= max_iteration && fabs(rank_new - rank_current[v]) > error * rank_current[v]) { to_continue = true; } rank_current[v] = rank_new; rank_next [v] = 0; } //#pragma omp single //{ // iteration ++; //} } } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); // Sort the top ranked vertices RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc( sizeof(RankPair<SizeT, Value>) * nodes); #pragma omp parallel for for (VertexId i = 0; i < nodes; ++i) { pr_list[i].vertex_id = i; pr_list[i].page_rank = rank_current[i]; } std::stable_sort(pr_list, pr_list + nodes, PRCompare<RankPair<SizeT, Value> >); #pragma omp parallel for for (VertexId i = 0; i < nodes; ++i) { node_id[i] = pr_list[i].vertex_id; rank[i] = scaled ? (pr_list[i].page_rank / (Value)nodes) : pr_list[i].page_rank; } free(pr_list ); pr_list = NULL; free(rank_current); rank_current = NULL; free(rank_next ); rank_next = NULL; if (!quiet) { printf("CPU iteration : %lld\n", (long long)iteration); printf("CPU PageRank finished in %lf msec.\n", elapsed); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. * * \return hipError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value, bool NORMALIZED> hipError_t RunTests(Info<VertexId, SizeT, Value> *info) { typedef PRProblem <VertexId, SizeT, Value, NORMALIZED> Problem; typedef PREnactor <Problem> //INSTRUMENT, //DEBUG, //SIZE_CHECK > Enactor; // parse configurations from mObject info Csr<VertexId, SizeT, Value> *graph = info->csr_ptr; VertexId src = info->info["source_vertex" ].get_int64(); bool undirected = info->info["undirected" ].get_bool (); bool quiet_mode = info->info["quiet_mode" ].get_bool (); bool quick_mode = info->info["quick_mode" ].get_bool (); bool stream_from_host = info->info["stream_from_host" ].get_bool (); int max_grid_size = info->info["max_grid_size" ].get_int (); int num_gpus = info->info["num_gpus" ].get_int (); int max_iteration = info->info["max_iteration" ].get_int (); double max_queue_sizing = 0.0; //info->info["max_queue_sizing" ].get_real (); double max_queue_sizing1 = 0.0; //info->info["max_queue_sizing1"].get_real (); double max_in_sizing = 1.0; //info->info["max_in_sizing" ].get_real (); std::string partition_method = info->info["partition_method" ].get_str (); double partition_factor = info->info["partition_factor" ].get_real (); int partition_seed = info->info["partition_seed" ].get_int (); bool instrument = info->info["instrument" ].get_bool (); bool debug = info->info["debug_mode" ].get_bool (); bool size_check = info->info["size_check" ].get_bool (); int iterations = info->info["num_iteration" ].get_int (); std::string traversal_mode = info->info["traversal_mode" ].get_str (); std::string ref_filename = info->info["ref_filename" ].get_str (); Value delta = info->info["delta" ].get_real (); Value error = info->info["error" ].get_real (); bool scaled = info->info["scaled" ].get_bool (); bool compensate = info->info["compensate" ].get_bool (); int communicate_latency = info->info["communicate_latency"].get_int (); float communicate_multipy = info->info["communicate_multipy"].get_real(); int expand_latency = info->info["expand_latency" ].get_int (); int subqueue_latency = info->info["subqueue_latency" ].get_int (); int fullqueue_latency = info->info["fullqueue_latency" ].get_int (); int makeout_latency = info->info["makeout_latency" ].get_int (); if (communicate_multipy > 1) max_in_sizing *= communicate_multipy; CpuTimer cpu_timer; hipError_t retval = hipSuccess; cpu_timer.Start(); json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // TODO: remove after merge mgpu-cq ContextPtr *context = (ContextPtr*) info->context; hipStream_t *streams = (hipStream_t*)info->streams; size_t *org_size = new size_t[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; if (retval = hipSetDevice(gpu_idx[gpu])) return retval; if (retval = hipMemGetInfo(&(org_size[gpu]), &dummy)) return retval; } if (compensate) { util::Array1D<SizeT, VertexId> zero_out_vertices; zero_out_vertices.Allocate(graph -> nodes, util::HOST); SizeT counter = 0; for (VertexId v = 0; v< graph->nodes; v++) if (graph -> row_offsets[v+1] == graph -> row_offsets[v]) { zero_out_vertices[counter] = v; counter ++; } if (counter != 0) { if (!quiet_mode) printf("Adding 1 vertex and %lld edges to compensate 0 degree vertices\n", (long long)counter + (long long)graph -> nodes); util::Array1D<SizeT, VertexId> new_column_indices; util::Array1D<SizeT, SizeT > new_row_offsets; new_column_indices.Allocate(graph -> edges + counter + graph -> nodes, util::HOST); new_row_offsets .Allocate(graph -> nodes + 2); SizeT edge_counter = 0; for (VertexId v = 0; v < graph->nodes; v++) { new_row_offsets[v] = edge_counter; if (graph -> row_offsets[v+1] == graph -> row_offsets[v]) { new_column_indices[edge_counter] = graph -> nodes; edge_counter ++; } else { SizeT num_neighbors = graph -> row_offsets[v+1] - graph -> row_offsets[v]; for (SizeT e = 0; e < num_neighbors; e++) new_column_indices[edge_counter + e] = graph -> column_indices[graph -> row_offsets[v] + e]; edge_counter += num_neighbors; } } for (VertexId v = 0; v< graph -> nodes; v++) new_column_indices[edge_counter + v] = v; new_row_offsets[graph -> nodes] = edge_counter; edge_counter += graph -> nodes; new_row_offsets[graph -> nodes + 1] = edge_counter; free(graph -> column_indices); graph -> column_indices = (VertexId*) malloc((long long)edge_counter * sizeof(VertexId)); memcpy(graph -> column_indices, new_column_indices.GetPointer(util::HOST), sizeof(VertexId) * (long long)edge_counter); new_column_indices.Release(); free(graph -> row_offsets); graph -> row_offsets = (SizeT*) malloc (((long long)graph -> nodes + 2) * sizeof(SizeT)); memcpy(graph -> row_offsets, new_row_offsets.GetPointer(util::HOST), sizeof(SizeT) * ((long long)graph -> nodes + 2)); graph -> edges = edge_counter; graph -> nodes +=1; } } // Allocate host-side array (for both reference and GPU-computed results) Value *ref_rank = new Value [graph->nodes]; Value *h_rank = new Value [graph->nodes]; VertexId *h_node_id = new VertexId[graph->nodes]; VertexId *ref_node_id = new VertexId[graph->nodes]; //Value *ref_check = (quick_mode) ? NULL : ref_rank; Problem *problem = new Problem(scaled); // allocate problem on GPU if (retval = util::GRError(problem->Init( stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, context, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "PR Problem Init failed", __FILE__, __LINE__)) return retval; Enactor *enactor = new Enactor( num_gpus, gpu_idx, instrument, debug, size_check); // enactor map if (retval = util::GRError(enactor->Init( context, problem, traversal_mode, max_grid_size), "PR Enactor Init failed", __FILE__, __LINE__)) return retval; enactor -> communicate_latency = communicate_latency; enactor -> communicate_multipy = communicate_multipy; enactor -> expand_latency = expand_latency; enactor -> subqueue_latency = subqueue_latency; enactor -> fullqueue_latency = fullqueue_latency; enactor -> makeout_latency = makeout_latency; if (retval = util::SetDevice(gpu_idx[0])) return retval; if (retval = util::latency::Test( streams[0], problem -> data_slices[0] -> latency_data, communicate_latency, communicate_multipy, expand_latency, subqueue_latency, fullqueue_latency, makeout_latency)) return retval; cpu_timer.Stop(); info -> info["preprocess_time"] = cpu_timer.ElapsedMillis(); // perform PageRank double total_elapsed = 0.0; double single_elapsed = 0.0; double max_elapsed = 0.0; double min_elapsed = 1e10; json_spirit::mArray process_times; if (!quiet_mode) printf("Using traversal mode %s\n", traversal_mode.c_str()); for (int iter = 0; iter < iterations; ++iter) { if (retval = util::GRError(problem->Reset( src, delta, error, max_iteration, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1, traversal_mode == "TWC" ? true : false), "PR Problem Data Reset Failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Reset(traversal_mode), "PR Enactor Reset Reset failed", __FILE__, __LINE__)) return retval; if (!quiet_mode) { printf("__________________________\n"); fflush(stdout); } cpu_timer.Start(); if (retval = util::GRError(enactor->Enact(traversal_mode), "PR Problem Enact Failed", __FILE__, __LINE__)) return retval; cpu_timer.Stop(); single_elapsed = cpu_timer.ElapsedMillis(); total_elapsed += single_elapsed; process_times.push_back(single_elapsed); if (single_elapsed > max_elapsed) max_elapsed = single_elapsed; if (single_elapsed < min_elapsed) min_elapsed = single_elapsed; if (!quiet_mode) { printf("--------------------------\n" "iteration %d elapsed: %lf ms\n", iter, single_elapsed); fflush(stdout); } } total_elapsed /= iterations; info -> info["process_times"] = process_times; info -> info["min_process_time"] = min_elapsed; info -> info["max_process_time"] = max_elapsed; cpu_timer.Start(); // copy out results if (retval = util::GRError(enactor->Extract(), "PR Enactor extract failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(problem->Extract(h_rank, h_node_id), "PR Problem Data Extraction Failed", __FILE__, __LINE__)) return retval; if (!quiet_mode) { double total_pr = 0; for (SizeT i = 0; i < graph->nodes; ++i) { total_pr += h_rank[i]; } printf("Total rank : %.10lf\n", total_pr); } // compute reference CPU solution if (!quick_mode) { if (!quiet_mode) { printf("Computing reference value ...\n"); } if (NORMALIZED) ReferencePageRank_Normalized <VertexId, SizeT, Value>( *graph, ref_node_id, ref_rank, delta, error, max_iteration, !undirected, quiet_mode, scaled); else ReferencePageRank <VertexId, SizeT, Value>( *graph, ref_node_id, ref_rank, delta, error, max_iteration, !undirected, quiet_mode); if (!quiet_mode) { printf("\n"); } // Verify the result if (!quiet_mode) { printf("Validity Rank: \n"); } Value *unorder_rank = new Value[graph->nodes]; SizeT *v_count = new SizeT[graph->nodes]; SizeT error_count = 0; for (VertexId i=0; i<graph->nodes; i++) v_count[i] = 0; for (VertexId i=0; i<graph->nodes; i++) { VertexId v = h_node_id[i]; if (v < 0 || v >= graph->nodes) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : node_id[%lld] (%lld) is out of bound\n", (long long)i, (long long)v); error_count ++; continue; } if (v_count[v] > 0) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : node_id[%lld] (%lld) appears more than once\n", (long long)i, (long long)v); error_count ++; continue; } v_count[v] ++; unorder_rank[v] = h_rank[i]; } for (VertexId v=0; v<graph->nodes; v++) if (v_count[v] == 0) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : vertex %lld does not appear in result\n", (long long)v); error_count ++; } double ref_total_rank = 0; double max_diff = 0; VertexId max_diff_pos = graph->nodes; double max_rdiff = 0; VertexId max_rdiff_pos= graph->nodes; for (VertexId i=0; i<graph->nodes; i++) { VertexId v = ref_node_id[i]; if (v < 0 || v >= graph->nodes) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : ref_node_id[%lld] = %lld, out of bound\n", (long long)i, (long long)v); error_count ++; continue; } ref_total_rank += ref_rank[i]; Value diff = fabs(ref_rank[i] - unorder_rank[v]); if ((ref_rank[i] > 1e-12 && diff > error * ref_rank[i]) || (ref_rank[i] <= 1e-12 && diff > error)) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : rank[%lld] (%.8le) != %.8le\n", (long long)v, (double)unorder_rank[v], (double)ref_rank[i]); error_count ++; } if (diff > max_diff) { max_diff = diff; max_diff_pos = i; } if (ref_rank[i] > 1e-12) { Value rdiff = diff / ref_rank[i]; if (rdiff > max_rdiff) { max_rdiff = rdiff; max_rdiff_pos = i; } } } if (error_count == 0 && !quiet_mode) printf("CORRECT\n"); else if (!quiet_mode) printf("number of errors : %lld\n", (long long) error_count); printf("Reference total rank : %.10lf\n", ref_total_rank); fflush(stdout); printf("Maximum difference : "); if (max_diff_pos < graph->nodes) printf("rank[%lld] %.8le vs. %.8le, ", (long long)ref_node_id[max_diff_pos], (double)unorder_rank[ref_node_id[max_diff_pos]], (double)ref_rank[max_diff_pos]); printf("%.8le\n", (double)max_diff); printf("Maximum relative difference :"); if (max_rdiff_pos < graph->nodes) printf("rank[%lld] %.8le vs. %.8le, ", (long long)ref_node_id[max_rdiff_pos], (double)unorder_rank[ref_node_id[max_rdiff_pos]], (double)ref_rank[max_rdiff_pos]); printf("%.8lf %%\n", (double)max_rdiff * 100); if (!quiet_mode) { printf("Validity Order: \n"); } error_count = 0; for (SizeT i=0; i<graph->nodes-1; i++) if (h_rank[i] < h_rank[i+1]) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : rank[%lld] (%.8le), place %lld < rank[%lld] (%.8le), place %lld\n", (long long)h_node_id[i ], (double)h_rank[i ], (long long)i, (long long)h_node_id[i+1], (double)h_rank[i+1], (long long)i+1); error_count ++; } if (error_count == 0 && !quiet_mode) printf("CORRECT\n"); else if (!quiet_mode) printf("number of errors : %lld\n", (long long) error_count); delete[] unorder_rank; unorder_rank = NULL; /*SizeT errors_count = CompareResults_( h_rank, ref_check, graph->nodes, true, quiet_mode, error); if (errors_count > 0) { if (!quiet_mode) { printf("number of errors : %lld\n", (long long) errors_count); } }*/ } if (!quiet_mode) { //printf("\nFirst 40 labels of the GPU result."); // Display Solution DisplaySolution(h_node_id, h_rank, graph->nodes); } info->ComputeCommonStats( // compute running statistics enactor->enactor_stats.GetPointer(), total_elapsed, (VertexId*)NULL, true); if (!quiet_mode) { printf("\n\tMemory Usage(B)\t"); for (int gpu = 0; gpu < num_gpus; gpu++) if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);} else printf(" #keys%d,0\t #keys%d,1", gpu, gpu); if (num_gpus > 1) printf(" #keys%d", num_gpus); printf("\n"); double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t gpu_free, dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&gpu_free, &dummy); printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free); for (int i = 0; i < num_gpus; i++) { for (int j = 0; j < 2; j++) { SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor; } if (num_gpus > 1 && i != 0 ) for (int t = 0; t < 2; t++) { SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) max_in_sizing_ = factor; } } if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_); printf("\n"); } // Clean up if (org_size ) { delete[] org_size ; org_size = NULL; } if (enactor ) { if (retval = util::GRError(enactor -> Release(), "BFS Enactor Release failed", __FILE__, __LINE__)) return retval; delete enactor ; enactor = NULL; } if (problem ) { if (retval = util::GRError(problem -> Release(), "BFS Problem Release failed", __FILE__, __LINE__)) return retval; delete problem ; problem = NULL; } if (ref_rank ) { delete[] ref_rank ; ref_rank = NULL; } if (ref_node_id) { delete[] ref_node_id; ref_node_id = NULL; } cpu_timer.Stop(); info->info["postprocess_time"] = cpu_timer.ElapsedMillis(); if (h_rank ) { if (info->info["output_filename"].get_str() !="") { cpu_timer.Start(); std::ofstream fout; size_t buf_size = 1024 * 1024 * 16; char *fout_buf = new char[buf_size]; fout.rdbuf() -> pubsetbuf(fout_buf, buf_size); fout.open(info->info["output_filename"].get_str().c_str()); for (VertexId i=0; i<graph->nodes; i++) { fout<< h_node_id[i]+1 << "," << h_rank[i] << std::endl; } fout.close(); delete[] fout_buf; fout_buf = NULL; cpu_timer.Stop(); info->info["write_time"] = cpu_timer.ElapsedMillis(); } delete[] h_rank ; h_rank = NULL; } if (h_node_id ) { delete[] h_node_id ; h_node_id = NULL; } cpu_timer.Stop(); info->info["postprocess_time"] = cpu_timer.ElapsedMillis(); return retval; } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. * * \return hipError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value> hipError_t RunTests_normalized(Info<VertexId, SizeT, Value> *info) { if (info->info["normalized"].get_bool()) return RunTests<VertexId, SizeT, Value, true>(info); else return RunTests<VertexId, SizeT, Value, false>(info); } /****************************************************************************** * Main ******************************************************************************/ template< typename VertexId, typename SizeT, typename Value> int main_(CommandLineArgs *args) { hipError_t retval = hipSuccess; CpuTimer cpu_timer, cpu_timer2; cpu_timer.Start(); Csr <VertexId, SizeT, Value> csr(false); // graph we process on Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>; // graph construction or generation related parameters if (args -> CheckCmdLineFlag("normalized")) info->info["undirected"] = args -> CheckCmdLineFlag("undirected"); else info->info["undirected"] = true; // require undirected input graph when unnormalized cpu_timer2.Start(); info->Init("PageRank", *args, csr); // initialize Info structure cpu_timer2.Stop(); info->info["load_time"] = cpu_timer2.ElapsedMillis(); retval = RunTests_normalized<VertexId, SizeT, Value>(info); // run test cpu_timer.Stop(); info->info["total_time"] = cpu_timer.ElapsedMillis(); if (!(info->info["quiet_mode"].get_bool())) { info->DisplayStats(); // display collected statistics } info->CollectInfo(); // collected all the info and put into JSON mObject return retval; } template < typename VertexId, // the vertex identifier type, usually int or long long typename SizeT> int main_Value(CommandLineArgs *args) { // can be disabled to reduce compile time // if (args -> CheckCmdLineFlag("64bit-Value")) // return main_<VertexId, SizeT, double>(args); // else return main_<VertexId, SizeT, float >(args); } template < typename VertexId> int main_SizeT(CommandLineArgs *args) { // can be disabled to reduce compile time if (args -> CheckCmdLineFlag("64bit-SizeT") || sizeof(VertexId) > 4) return main_Value<VertexId, long long>(args); else return main_Value<VertexId, int >(args); } int main_VertexId(CommandLineArgs *args) { // can be disabled to reduce compile time if (args -> CheckCmdLineFlag("64bit-VertexId")) return main_SizeT<long long>(args); else return main_SizeT<int >(args); } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help")) { Usage(); return 1; } return main_VertexId(&args); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
07209a8f31393d7d7679a69f12e7c68070017235.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_pr.cu * * @brief Simple test driver program for computing Pagerank. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <iostream> #include <cstdlib> #include <fstream> #include <algorithm> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // PR includes #include <gunrock/app/pr/pr_enactor.cuh> #include <gunrock/app/pr/pr_problem.cuh> #include <gunrock/app/pr/pr_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> // boost includes #include <boost/config.hpp> #include <boost/utility.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/graph/page_rank.hpp> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::pr; /****************************************************************************** * Defines, constants, globals ******************************************************************************/ template <typename VertexId, typename Value> struct RankPair { VertexId vertex_id; Value page_rank; RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {} }; template<typename RankPair> bool PRCompare( RankPair elem1, RankPair elem2) { return elem1.page_rank > elem2.page_rank; } /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "test <graph-type> [graph-type-arguments]\n" "Graph type and graph type arguments:\n" " market <matrix-market-file-name>\n" " Reads a Matrix-Market coordinate-formatted graph of\n" " directed/undirected edges from STDIN (or from the\n" " optionally-specified file).\n" " rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n" " Generate R-MAT graph as input\n" " --rmat_scale=<vertex-scale>\n" " --rmat_nodes=<number-nodes>\n" " --rmat_edgefactor=<edge-factor>\n" " --rmat_edges=<number-edges>\n" " --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n" " --rmat_seed=<seed>\n" " rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n" " Generate Random Geometry Graph as input\n" " --rgg_scale=<vertex-scale>\n" " --rgg_nodes=<number-nodes>\n" " --rgg_thfactor=<threshold-factor>\n" " --rgg_threshold=<threshold>\n" " --rgg_vmultipiler=<vmultipiler>\n" " --rgg_seed=<seed>\n\n" "Optional arguments:\n" "[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n" "[--instrumented] Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty.\n" " (a relative indicator of load imbalance.)\n" "[--quick] Skip the CPU reference validation process.\n" "[--disable-size-check] Disable frontier queue size check.\n" "[--grid-size=<grid size>] Maximum allowed grid size setting.\n" "[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--in-sizing=<in/out_queue_scale_factor>]\n" " Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--v] Print verbose per iteration debug info.\n" "[--iteration-num=<num>] Number of runs to perform the test.\n" "[--max-iter=<num>] Max iteration for rank score distribution\n" " before one round of PageRank run end.\n" "[--partition-method=<random|biasrandom|clustered|metis>]\n" " Choose partitioner (Default use random).\n" "[--delta=<delta>] Delta for PageRank (Default 0.85f).\n" "[--error=<error>] Error threshold for PageRank (Default 0.01f).\n" "[--quiet] No output (unless --json is specified).\n" "[--json] Output JSON-format statistics to STDOUT.\n" "[--jsonfile=<name>] Output JSON-format statistics to file <name>\n" "[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n" " where name is auto-generated.\n" ); } /** * @brief Displays the PageRank result * * @param[in] node Node vertex Id * @param[in] rank Rank value for the node * @param[in] nodes Number of nodes in the graph. */ template<typename VertexId, typename SizeT, typename Value> void DisplaySolution(VertexId *node, Value *rank, SizeT nodes) { SizeT top = (nodes < 10) ? nodes : 10; // at most top 10 ranked nodes printf("\nTop %lld Ranked Vertices and PageRanks:\n", (long long)top); for (SizeT i = 0; i < top; ++i) { printf("Vertex ID: %lld, PageRank: %.8le\n", (long long)node[i], (double)rank[i]); } } /** * @brief Compares the equivalence of two arrays. If incorrect, print the location * of the first incorrect value appears, the incorrect value, and the reference * value. * * @tparam T datatype of the values being compared with. * @tparam SizeT datatype of the array length. * * @param[in] computed Vector of values to be compared. * @param[in] reference Vector of reference values. * @param[in] len Vector length. * @param[in] verbose Whether to print values around the incorrect one. * @param[in] quiet Don't print out anything to stdout. * @param[in] threshold Results error checking threshold. * * \return Zero if two vectors are exactly the same, non-zero if there is any difference. */ template <typename SizeT, typename Value> int CompareResults_( Value* computed, Value* reference, SizeT len, bool verbose = true, bool quiet = false, Value threshold = 0.05f) { int flag = 0; for (SizeT i = 0; i < len; i++) { // Use relative error rate here. bool is_right = true; if (fabs(computed[i]) < 0.01f && fabs(reference[i] - 1) < 0.01f) continue; if (fabs(computed[i] - 0.0) < 0.01f) { if (fabs(computed[i] - reference[i]) > threshold) is_right = false; } else { if (fabs((computed[i] - reference[i]) / reference[i]) > threshold) is_right = false; } if (!is_right && flag == 0) { if (!quiet) { printf("\nINCORRECT: [%lu]: ", (unsigned long) i); PrintValue<Value>(computed[i]); printf(" != "); PrintValue<Value>(reference[i]); if (verbose) { printf("\nresult[..."); for (SizeT j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<Value>(computed[j]); printf(", "); } printf("...]"); printf("\nreference[..."); for (SizeT j = (i >= 5) ? i - 5 : 0; (j < i + 5) && (j < len); j++) { PrintValue<Value>(reference[j]); printf(", "); } printf("...]"); } } flag += 1; } if (!is_right && flag > 0) flag += 1; } if (!quiet) { printf("\n"); if (!flag) { printf("CORRECT"); } } return flag; } /****************************************************************************** * PageRank Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference Page Rank implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_id Source node for personalized PageRank (if any) * @param[in] rank Host-side vector to store CPU computed labels for each node * @param[in] delta Delta for computing PR * @param[in] error Error threshold * @param[in] max_iteration Maximum iteration to go * @param[in] directed Whether the graph is directed * @param[in] quiet Don't print out anything to stdout */ template < typename VertexId, typename SizeT, typename Value > void ReferencePageRank( const Csr<VertexId, SizeT, Value> &graph, VertexId *node_id, Value *rank, Value delta, Value error, SizeT max_iteration, bool directed, bool quiet = false) { using namespace boost; // preparation typedef adjacency_list< vecS, vecS, bidirectionalS, no_property, property<edge_index_t, int> > Graph; Graph g; for (int i = 0; i < graph.nodes; ++i) { for (int j = graph.row_offsets[i]; j < graph.row_offsets[i + 1]; ++j) { Graph::edge_descriptor e = add_edge(i, graph.column_indices[j], g).first; put(edge_index, g, e, i); } } // compute PageRank CpuTimer cpu_timer; cpu_timer.Start(); std::vector<Value> ranks(num_vertices(g)); page_rank(g, make_iterator_property_map( ranks.begin(), get(boost::vertex_index, g)), boost::graph::n_iterations(max_iteration)); cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); for (std::size_t i = 0; i < num_vertices(g); ++i) { rank[i] = ranks[i]; } // Sort the top ranked vertices RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc( sizeof(RankPair<SizeT, Value>) * num_vertices(g)); for (int i = 0; i < num_vertices(g); ++i) { pr_list[i].vertex_id = i; pr_list[i].page_rank = rank[i]; } std::stable_sort(pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >); for (int i = 0; i < num_vertices(g); ++i) { node_id[i] = pr_list[i].vertex_id; rank[i] = pr_list[i].page_rank; } free(pr_list); if (!quiet) { printf("CPU PageRank finished in %lf msec.\n", elapsed); } } /*template < typename VertexId, typename Value> class Sort_Pair { public: VertexId v; Value val; }; template < typename VertexId, typename Value> inline bool operator< (const Sort_Pair<VertexId, Value>& lhs, const Sort_Pair<VertexId, Value>& rhs) { if (lhs.val < rhs.val) return true; if (rhs.val < lhs.val) return false; return false; }*/ /** * @brief A simple CPU-based reference Page Rank implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] graph Reference to the CSR graph we process on * @param[in] node_id Source node for personalized PageRank (if any) * @param[in] rank Host-side vector to store CPU computed labels for each node * @param[in] delta Delta for computing PR * @param[in] error Error threshold * @param[in] max_iteration Maximum iteration to go * @param[in] directed Whether the graph is directed * @param[in] quiet Don't print out anything to stdout * @param[in] scaled Normalized flag */ template < typename VertexId, typename SizeT, typename Value > void ReferencePageRank_Normalized( const Csr<VertexId, SizeT, Value> &graph, VertexId *node_id, Value *rank, Value delta, Value error, SizeT max_iteration, bool directed, bool quiet = false, bool scaled = false) { SizeT nodes = graph.nodes; Value *rank_current = (Value*) malloc (sizeof(Value) * nodes); Value *rank_next = (Value*) malloc (sizeof(Value) * nodes); bool to_continue = true; SizeT iteration = 0; Value reset_value = scaled ? 1.0 - delta : ((1.0 - delta) / (Value)nodes); CpuTimer cpu_timer; cpu_timer.Start(); //#pragma omp parallel { #pragma omp parallel for for (VertexId v=0; v<nodes; v++) { rank_current[v] = scaled ? 1.0 : (1.0 / (Value)nodes); rank_next [v] = 0; } while (to_continue) { to_continue = false; #pragma omp parallel for for (VertexId src=0; src<nodes; src++) { SizeT start_e = graph.row_offsets[src]; SizeT end_e = graph.row_offsets[src+1]; if (start_e == end_e) continue; // 0 out degree vertex Value dist_rank = rank_current[src] / (Value)(end_e - start_e); if (!isfinite(dist_rank)) continue; for (SizeT e = start_e; e < end_e; e++) { VertexId dest = graph.column_indices[e]; #pragma omp atomic rank_next[dest] += dist_rank; } } iteration ++; #pragma omp parallel for for (VertexId v=0; v<nodes; v++) { Value rank_new = delta * rank_next[v]; if (!isfinite(rank_new)) rank_new = 0; rank_new = rank_new + reset_value; if (iteration <= max_iteration && fabs(rank_new - rank_current[v]) > error * rank_current[v]) { to_continue = true; } rank_current[v] = rank_new; rank_next [v] = 0; } //#pragma omp single //{ // iteration ++; //} } } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); // Sort the top ranked vertices RankPair<SizeT, Value> *pr_list = (RankPair<SizeT, Value>*)malloc( sizeof(RankPair<SizeT, Value>) * nodes); #pragma omp parallel for for (VertexId i = 0; i < nodes; ++i) { pr_list[i].vertex_id = i; pr_list[i].page_rank = rank_current[i]; } std::stable_sort(pr_list, pr_list + nodes, PRCompare<RankPair<SizeT, Value> >); #pragma omp parallel for for (VertexId i = 0; i < nodes; ++i) { node_id[i] = pr_list[i].vertex_id; rank[i] = scaled ? (pr_list[i].page_rank / (Value)nodes) : pr_list[i].page_rank; } free(pr_list ); pr_list = NULL; free(rank_current); rank_current = NULL; free(rank_next ); rank_next = NULL; if (!quiet) { printf("CPU iteration : %lld\n", (long long)iteration); printf("CPU PageRank finished in %lf msec.\n", elapsed); } } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. * * \return cudaError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value, bool NORMALIZED> cudaError_t RunTests(Info<VertexId, SizeT, Value> *info) { typedef PRProblem <VertexId, SizeT, Value, NORMALIZED> Problem; typedef PREnactor <Problem> //INSTRUMENT, //DEBUG, //SIZE_CHECK > Enactor; // parse configurations from mObject info Csr<VertexId, SizeT, Value> *graph = info->csr_ptr; VertexId src = info->info["source_vertex" ].get_int64(); bool undirected = info->info["undirected" ].get_bool (); bool quiet_mode = info->info["quiet_mode" ].get_bool (); bool quick_mode = info->info["quick_mode" ].get_bool (); bool stream_from_host = info->info["stream_from_host" ].get_bool (); int max_grid_size = info->info["max_grid_size" ].get_int (); int num_gpus = info->info["num_gpus" ].get_int (); int max_iteration = info->info["max_iteration" ].get_int (); double max_queue_sizing = 0.0; //info->info["max_queue_sizing" ].get_real (); double max_queue_sizing1 = 0.0; //info->info["max_queue_sizing1"].get_real (); double max_in_sizing = 1.0; //info->info["max_in_sizing" ].get_real (); std::string partition_method = info->info["partition_method" ].get_str (); double partition_factor = info->info["partition_factor" ].get_real (); int partition_seed = info->info["partition_seed" ].get_int (); bool instrument = info->info["instrument" ].get_bool (); bool debug = info->info["debug_mode" ].get_bool (); bool size_check = info->info["size_check" ].get_bool (); int iterations = info->info["num_iteration" ].get_int (); std::string traversal_mode = info->info["traversal_mode" ].get_str (); std::string ref_filename = info->info["ref_filename" ].get_str (); Value delta = info->info["delta" ].get_real (); Value error = info->info["error" ].get_real (); bool scaled = info->info["scaled" ].get_bool (); bool compensate = info->info["compensate" ].get_bool (); int communicate_latency = info->info["communicate_latency"].get_int (); float communicate_multipy = info->info["communicate_multipy"].get_real(); int expand_latency = info->info["expand_latency" ].get_int (); int subqueue_latency = info->info["subqueue_latency" ].get_int (); int fullqueue_latency = info->info["fullqueue_latency" ].get_int (); int makeout_latency = info->info["makeout_latency" ].get_int (); if (communicate_multipy > 1) max_in_sizing *= communicate_multipy; CpuTimer cpu_timer; cudaError_t retval = cudaSuccess; cpu_timer.Start(); json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // TODO: remove after merge mgpu-cq ContextPtr *context = (ContextPtr*) info->context; cudaStream_t *streams = (cudaStream_t*)info->streams; size_t *org_size = new size_t[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; if (retval = cudaSetDevice(gpu_idx[gpu])) return retval; if (retval = cudaMemGetInfo(&(org_size[gpu]), &dummy)) return retval; } if (compensate) { util::Array1D<SizeT, VertexId> zero_out_vertices; zero_out_vertices.Allocate(graph -> nodes, util::HOST); SizeT counter = 0; for (VertexId v = 0; v< graph->nodes; v++) if (graph -> row_offsets[v+1] == graph -> row_offsets[v]) { zero_out_vertices[counter] = v; counter ++; } if (counter != 0) { if (!quiet_mode) printf("Adding 1 vertex and %lld edges to compensate 0 degree vertices\n", (long long)counter + (long long)graph -> nodes); util::Array1D<SizeT, VertexId> new_column_indices; util::Array1D<SizeT, SizeT > new_row_offsets; new_column_indices.Allocate(graph -> edges + counter + graph -> nodes, util::HOST); new_row_offsets .Allocate(graph -> nodes + 2); SizeT edge_counter = 0; for (VertexId v = 0; v < graph->nodes; v++) { new_row_offsets[v] = edge_counter; if (graph -> row_offsets[v+1] == graph -> row_offsets[v]) { new_column_indices[edge_counter] = graph -> nodes; edge_counter ++; } else { SizeT num_neighbors = graph -> row_offsets[v+1] - graph -> row_offsets[v]; for (SizeT e = 0; e < num_neighbors; e++) new_column_indices[edge_counter + e] = graph -> column_indices[graph -> row_offsets[v] + e]; edge_counter += num_neighbors; } } for (VertexId v = 0; v< graph -> nodes; v++) new_column_indices[edge_counter + v] = v; new_row_offsets[graph -> nodes] = edge_counter; edge_counter += graph -> nodes; new_row_offsets[graph -> nodes + 1] = edge_counter; free(graph -> column_indices); graph -> column_indices = (VertexId*) malloc((long long)edge_counter * sizeof(VertexId)); memcpy(graph -> column_indices, new_column_indices.GetPointer(util::HOST), sizeof(VertexId) * (long long)edge_counter); new_column_indices.Release(); free(graph -> row_offsets); graph -> row_offsets = (SizeT*) malloc (((long long)graph -> nodes + 2) * sizeof(SizeT)); memcpy(graph -> row_offsets, new_row_offsets.GetPointer(util::HOST), sizeof(SizeT) * ((long long)graph -> nodes + 2)); graph -> edges = edge_counter; graph -> nodes +=1; } } // Allocate host-side array (for both reference and GPU-computed results) Value *ref_rank = new Value [graph->nodes]; Value *h_rank = new Value [graph->nodes]; VertexId *h_node_id = new VertexId[graph->nodes]; VertexId *ref_node_id = new VertexId[graph->nodes]; //Value *ref_check = (quick_mode) ? NULL : ref_rank; Problem *problem = new Problem(scaled); // allocate problem on GPU if (retval = util::GRError(problem->Init( stream_from_host, graph, NULL, num_gpus, gpu_idx, partition_method, streams, context, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "PR Problem Init failed", __FILE__, __LINE__)) return retval; Enactor *enactor = new Enactor( num_gpus, gpu_idx, instrument, debug, size_check); // enactor map if (retval = util::GRError(enactor->Init( context, problem, traversal_mode, max_grid_size), "PR Enactor Init failed", __FILE__, __LINE__)) return retval; enactor -> communicate_latency = communicate_latency; enactor -> communicate_multipy = communicate_multipy; enactor -> expand_latency = expand_latency; enactor -> subqueue_latency = subqueue_latency; enactor -> fullqueue_latency = fullqueue_latency; enactor -> makeout_latency = makeout_latency; if (retval = util::SetDevice(gpu_idx[0])) return retval; if (retval = util::latency::Test( streams[0], problem -> data_slices[0] -> latency_data, communicate_latency, communicate_multipy, expand_latency, subqueue_latency, fullqueue_latency, makeout_latency)) return retval; cpu_timer.Stop(); info -> info["preprocess_time"] = cpu_timer.ElapsedMillis(); // perform PageRank double total_elapsed = 0.0; double single_elapsed = 0.0; double max_elapsed = 0.0; double min_elapsed = 1e10; json_spirit::mArray process_times; if (!quiet_mode) printf("Using traversal mode %s\n", traversal_mode.c_str()); for (int iter = 0; iter < iterations; ++iter) { if (retval = util::GRError(problem->Reset( src, delta, error, max_iteration, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1, traversal_mode == "TWC" ? true : false), "PR Problem Data Reset Failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Reset(traversal_mode), "PR Enactor Reset Reset failed", __FILE__, __LINE__)) return retval; if (!quiet_mode) { printf("__________________________\n"); fflush(stdout); } cpu_timer.Start(); if (retval = util::GRError(enactor->Enact(traversal_mode), "PR Problem Enact Failed", __FILE__, __LINE__)) return retval; cpu_timer.Stop(); single_elapsed = cpu_timer.ElapsedMillis(); total_elapsed += single_elapsed; process_times.push_back(single_elapsed); if (single_elapsed > max_elapsed) max_elapsed = single_elapsed; if (single_elapsed < min_elapsed) min_elapsed = single_elapsed; if (!quiet_mode) { printf("--------------------------\n" "iteration %d elapsed: %lf ms\n", iter, single_elapsed); fflush(stdout); } } total_elapsed /= iterations; info -> info["process_times"] = process_times; info -> info["min_process_time"] = min_elapsed; info -> info["max_process_time"] = max_elapsed; cpu_timer.Start(); // copy out results if (retval = util::GRError(enactor->Extract(), "PR Enactor extract failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(problem->Extract(h_rank, h_node_id), "PR Problem Data Extraction Failed", __FILE__, __LINE__)) return retval; if (!quiet_mode) { double total_pr = 0; for (SizeT i = 0; i < graph->nodes; ++i) { total_pr += h_rank[i]; } printf("Total rank : %.10lf\n", total_pr); } // compute reference CPU solution if (!quick_mode) { if (!quiet_mode) { printf("Computing reference value ...\n"); } if (NORMALIZED) ReferencePageRank_Normalized <VertexId, SizeT, Value>( *graph, ref_node_id, ref_rank, delta, error, max_iteration, !undirected, quiet_mode, scaled); else ReferencePageRank <VertexId, SizeT, Value>( *graph, ref_node_id, ref_rank, delta, error, max_iteration, !undirected, quiet_mode); if (!quiet_mode) { printf("\n"); } // Verify the result if (!quiet_mode) { printf("Validity Rank: \n"); } Value *unorder_rank = new Value[graph->nodes]; SizeT *v_count = new SizeT[graph->nodes]; SizeT error_count = 0; for (VertexId i=0; i<graph->nodes; i++) v_count[i] = 0; for (VertexId i=0; i<graph->nodes; i++) { VertexId v = h_node_id[i]; if (v < 0 || v >= graph->nodes) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : node_id[%lld] (%lld) is out of bound\n", (long long)i, (long long)v); error_count ++; continue; } if (v_count[v] > 0) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : node_id[%lld] (%lld) appears more than once\n", (long long)i, (long long)v); error_count ++; continue; } v_count[v] ++; unorder_rank[v] = h_rank[i]; } for (VertexId v=0; v<graph->nodes; v++) if (v_count[v] == 0) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : vertex %lld does not appear in result\n", (long long)v); error_count ++; } double ref_total_rank = 0; double max_diff = 0; VertexId max_diff_pos = graph->nodes; double max_rdiff = 0; VertexId max_rdiff_pos= graph->nodes; for (VertexId i=0; i<graph->nodes; i++) { VertexId v = ref_node_id[i]; if (v < 0 || v >= graph->nodes) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : ref_node_id[%lld] = %lld, out of bound\n", (long long)i, (long long)v); error_count ++; continue; } ref_total_rank += ref_rank[i]; Value diff = fabs(ref_rank[i] - unorder_rank[v]); if ((ref_rank[i] > 1e-12 && diff > error * ref_rank[i]) || (ref_rank[i] <= 1e-12 && diff > error)) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : rank[%lld] (%.8le) != %.8le\n", (long long)v, (double)unorder_rank[v], (double)ref_rank[i]); error_count ++; } if (diff > max_diff) { max_diff = diff; max_diff_pos = i; } if (ref_rank[i] > 1e-12) { Value rdiff = diff / ref_rank[i]; if (rdiff > max_rdiff) { max_rdiff = rdiff; max_rdiff_pos = i; } } } if (error_count == 0 && !quiet_mode) printf("CORRECT\n"); else if (!quiet_mode) printf("number of errors : %lld\n", (long long) error_count); printf("Reference total rank : %.10lf\n", ref_total_rank); fflush(stdout); printf("Maximum difference : "); if (max_diff_pos < graph->nodes) printf("rank[%lld] %.8le vs. %.8le, ", (long long)ref_node_id[max_diff_pos], (double)unorder_rank[ref_node_id[max_diff_pos]], (double)ref_rank[max_diff_pos]); printf("%.8le\n", (double)max_diff); printf("Maximum relative difference :"); if (max_rdiff_pos < graph->nodes) printf("rank[%lld] %.8le vs. %.8le, ", (long long)ref_node_id[max_rdiff_pos], (double)unorder_rank[ref_node_id[max_rdiff_pos]], (double)ref_rank[max_rdiff_pos]); printf("%.8lf %%\n", (double)max_rdiff * 100); if (!quiet_mode) { printf("Validity Order: \n"); } error_count = 0; for (SizeT i=0; i<graph->nodes-1; i++) if (h_rank[i] < h_rank[i+1]) { if (error_count == 0 && !quiet_mode) printf("INCORRECT : rank[%lld] (%.8le), place %lld < rank[%lld] (%.8le), place %lld\n", (long long)h_node_id[i ], (double)h_rank[i ], (long long)i, (long long)h_node_id[i+1], (double)h_rank[i+1], (long long)i+1); error_count ++; } if (error_count == 0 && !quiet_mode) printf("CORRECT\n"); else if (!quiet_mode) printf("number of errors : %lld\n", (long long) error_count); delete[] unorder_rank; unorder_rank = NULL; /*SizeT errors_count = CompareResults_( h_rank, ref_check, graph->nodes, true, quiet_mode, error); if (errors_count > 0) { if (!quiet_mode) { printf("number of errors : %lld\n", (long long) errors_count); } }*/ } if (!quiet_mode) { //printf("\nFirst 40 labels of the GPU result."); // Display Solution DisplaySolution(h_node_id, h_rank, graph->nodes); } info->ComputeCommonStats( // compute running statistics enactor->enactor_stats.GetPointer(), total_elapsed, (VertexId*)NULL, true); if (!quiet_mode) { printf("\n\tMemory Usage(B)\t"); for (int gpu = 0; gpu < num_gpus; gpu++) if (num_gpus > 1) {if (gpu != 0) printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); else printf(" #keys%d,0\t #keys%d,1", gpu, gpu);} else printf(" #keys%d,0\t #keys%d,1", gpu, gpu); if (num_gpus > 1) printf(" #keys%d", num_gpus); printf("\n"); double max_queue_sizing_[2] = {0, 0}, max_in_sizing_ = 0; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t gpu_free, dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&gpu_free, &dummy); printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free); for (int i = 0; i < num_gpus; i++) { for (int j = 0; j < 2; j++) { SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) max_queue_sizing_[j] = factor; } if (num_gpus > 1 && i != 0 ) for (int t = 0; t < 2; t++) { SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) max_in_sizing_ = factor; } } if (num_gpus > 1) printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus > 1) printf("\t in_sizing =\t %lf", max_in_sizing_); printf("\n"); } // Clean up if (org_size ) { delete[] org_size ; org_size = NULL; } if (enactor ) { if (retval = util::GRError(enactor -> Release(), "BFS Enactor Release failed", __FILE__, __LINE__)) return retval; delete enactor ; enactor = NULL; } if (problem ) { if (retval = util::GRError(problem -> Release(), "BFS Problem Release failed", __FILE__, __LINE__)) return retval; delete problem ; problem = NULL; } if (ref_rank ) { delete[] ref_rank ; ref_rank = NULL; } if (ref_node_id) { delete[] ref_node_id; ref_node_id = NULL; } cpu_timer.Stop(); info->info["postprocess_time"] = cpu_timer.ElapsedMillis(); if (h_rank ) { if (info->info["output_filename"].get_str() !="") { cpu_timer.Start(); std::ofstream fout; size_t buf_size = 1024 * 1024 * 16; char *fout_buf = new char[buf_size]; fout.rdbuf() -> pubsetbuf(fout_buf, buf_size); fout.open(info->info["output_filename"].get_str().c_str()); for (VertexId i=0; i<graph->nodes; i++) { fout<< h_node_id[i]+1 << "," << h_rank[i] << std::endl; } fout.close(); delete[] fout_buf; fout_buf = NULL; cpu_timer.Stop(); info->info["write_time"] = cpu_timer.ElapsedMillis(); } delete[] h_rank ; h_rank = NULL; } if (h_node_id ) { delete[] h_node_id ; h_node_id = NULL; } cpu_timer.Stop(); info->info["postprocess_time"] = cpu_timer.ElapsedMillis(); return retval; } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. * * \return cudaError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value> cudaError_t RunTests_normalized(Info<VertexId, SizeT, Value> *info) { if (info->info["normalized"].get_bool()) return RunTests<VertexId, SizeT, Value, true>(info); else return RunTests<VertexId, SizeT, Value, false>(info); } /****************************************************************************** * Main ******************************************************************************/ template< typename VertexId, typename SizeT, typename Value> int main_(CommandLineArgs *args) { cudaError_t retval = cudaSuccess; CpuTimer cpu_timer, cpu_timer2; cpu_timer.Start(); Csr <VertexId, SizeT, Value> csr(false); // graph we process on Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>; // graph construction or generation related parameters if (args -> CheckCmdLineFlag("normalized")) info->info["undirected"] = args -> CheckCmdLineFlag("undirected"); else info->info["undirected"] = true; // require undirected input graph when unnormalized cpu_timer2.Start(); info->Init("PageRank", *args, csr); // initialize Info structure cpu_timer2.Stop(); info->info["load_time"] = cpu_timer2.ElapsedMillis(); retval = RunTests_normalized<VertexId, SizeT, Value>(info); // run test cpu_timer.Stop(); info->info["total_time"] = cpu_timer.ElapsedMillis(); if (!(info->info["quiet_mode"].get_bool())) { info->DisplayStats(); // display collected statistics } info->CollectInfo(); // collected all the info and put into JSON mObject return retval; } template < typename VertexId, // the vertex identifier type, usually int or long long typename SizeT> int main_Value(CommandLineArgs *args) { // can be disabled to reduce compile time // if (args -> CheckCmdLineFlag("64bit-Value")) // return main_<VertexId, SizeT, double>(args); // else return main_<VertexId, SizeT, float >(args); } template < typename VertexId> int main_SizeT(CommandLineArgs *args) { // can be disabled to reduce compile time if (args -> CheckCmdLineFlag("64bit-SizeT") || sizeof(VertexId) > 4) return main_Value<VertexId, long long>(args); else return main_Value<VertexId, int >(args); } int main_VertexId(CommandLineArgs *args) { // can be disabled to reduce compile time if (args -> CheckCmdLineFlag("64bit-VertexId")) return main_SizeT<long long>(args); else return main_SizeT<int >(args); } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help")) { Usage(); return 1; } return main_VertexId(&args); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
825e374ffc5288a0c6fe1a8092773b6df877030c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Purpose: Times 1-d Stencil on CPU/GPU for array size n (integer) * * Author: Gurpal Singh * Date: 3/22/2017 * To Compile: nvcc task3.cu -arch=sm_30 -o task3.exe * To Run: ./task3.exe */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include "timer.h" //Defining the Radius adn BlockSize #define RADIUS 3 #define BLOCK_SIZE 256 //GPU Kernel __global__ void stencil_GPU(float *in, float *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; //Shared Variable int gindex = threadIdx.x + blockIdx.x * blockDim.x; //Creating index term int lindex = threadIdx.x + RADIUS; //Creating index term for each element //Read input elements into shared memory temp[lindex] = in[gindex]; if(threadIdx.x < RADIUS){ temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } //Synchronize to ensure all data is available __syncthreads(); //Apply the Stencil int result = 0; for(int offset = -RADIUS; offset <= RADIUS; offset++) result += temp[lindex+offset]; //Store the Result out[gindex] = result; } //CPU Function void stencil_CPU (int n, float *a, float *b){ int i,j; //Stencil 1-d Computation for (i = RADIUS; i < n - RADIUS; i++){ float stencil_sum = 0.f; for(j = -RADIUS; j <= RADIUS; j++){ stencil_sum += a[i+j]; } b[i] = stencil_sum; } } int main(void){ //Scan for array size int n; printf("Enter the integer value for n: "); scanf("%d", &n); //Initialize pointers float *x, *y; //Allocate Memory hipMallocManaged( &x, n * sizeof (float) + (2*RADIUS)); hipMallocManaged( &y, n * sizeof (float) + (2*RADIUS)); //Initialize Array Values for (int i = 0; i < n; i++){ x[i] = 1; y[i] = 0; } //Timing the CPU Function StartTimer(); stencil_CPU(n, x, y); float CPU_time = GetTimer(); //Get the time elapsed CPU addition CPU_time = CPU_time*1000; //Converting seconds to ms printf("CPU y[100] = %.2f\n", y[100]); printf("elapsed wall time (CPU): %.2f ms\n", CPU_time); //Timing the GPU Kernel hipEvent_t timeStart, timeStop; //WARNING!!! use events only to time the device hipEventCreate(&timeStart); hipEventCreate(&timeStop); float elapsedTime; // make sure it is of type float, precision is milliseconds (ms) !!! int blockSize = 256; int nBlocks = (n + blockSize -1) / blockSize; //round up if n is not a multiple of blocksize hipEventRecord(timeStart, 0); //don't worry for the 2nd argument zero, it is about cuda streams hipLaunchKernelGGL(( stencil_GPU) , dim3(nBlocks), dim3(blockSize) , 0, 0, x + RADIUS, y + RADIUS); hipDeviceSynchronize(); printf("GPU y[100] = %.2f\n",y[100]); hipEventRecord(timeStop, 0); hipEventSynchronize(timeStop); //WARNING!!! do not simply print (timeStop-timeStart)!! hipEventElapsedTime(&elapsedTime, timeStart, timeStop); printf("elapsed wall time (GPU) = %.2f ms\n", elapsedTime); hipEventDestroy(timeStart); hipEventDestroy(timeStop); //Verify the results are correct int i; for(i = 3; i < n-3 ; ++i){ if (y[i] != 7){ printf("Element y[%d] == %d != 7\n", i, y[i]); break; } } if (i == n-3){ printf("SUCCESS!\n"); } //Writing the Results to a File FILE *fptr = fopen("Task3_Result.txt", "a+"); if (fptr == NULL) { printf("Error!"); exit(1); } fprintf(fptr, "\n"); fprintf(fptr, "Vector Size: %d\n", n); fprintf(fptr, "elapsed wall time (CPU) = %.2f ms\n", CPU_time); fprintf(fptr, "elapsed wall time (GPU) = %.2f ms\n", elapsedTime); fclose(fptr); //Cleaning Up hipFree(x); hipFree(y); return EXIT_SUCCESS; }
825e374ffc5288a0c6fe1a8092773b6df877030c.cu
/* * Purpose: Times 1-d Stencil on CPU/GPU for array size n (integer) * * Author: Gurpal Singh * Date: 3/22/2017 * To Compile: nvcc task3.cu -arch=sm_30 -o task3.exe * To Run: ./task3.exe */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/resource.h> #include "timer.h" //Defining the Radius adn BlockSize #define RADIUS 3 #define BLOCK_SIZE 256 //GPU Kernel __global__ void stencil_GPU(float *in, float *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; //Shared Variable int gindex = threadIdx.x + blockIdx.x * blockDim.x; //Creating index term int lindex = threadIdx.x + RADIUS; //Creating index term for each element //Read input elements into shared memory temp[lindex] = in[gindex]; if(threadIdx.x < RADIUS){ temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } //Synchronize to ensure all data is available __syncthreads(); //Apply the Stencil int result = 0; for(int offset = -RADIUS; offset <= RADIUS; offset++) result += temp[lindex+offset]; //Store the Result out[gindex] = result; } //CPU Function void stencil_CPU (int n, float *a, float *b){ int i,j; //Stencil 1-d Computation for (i = RADIUS; i < n - RADIUS; i++){ float stencil_sum = 0.f; for(j = -RADIUS; j <= RADIUS; j++){ stencil_sum += a[i+j]; } b[i] = stencil_sum; } } int main(void){ //Scan for array size int n; printf("Enter the integer value for n: "); scanf("%d", &n); //Initialize pointers float *x, *y; //Allocate Memory cudaMallocManaged( &x, n * sizeof (float) + (2*RADIUS)); cudaMallocManaged( &y, n * sizeof (float) + (2*RADIUS)); //Initialize Array Values for (int i = 0; i < n; i++){ x[i] = 1; y[i] = 0; } //Timing the CPU Function StartTimer(); stencil_CPU(n, x, y); float CPU_time = GetTimer(); //Get the time elapsed CPU addition CPU_time = CPU_time*1000; //Converting seconds to ms printf("CPU y[100] = %.2f\n", y[100]); printf("elapsed wall time (CPU): %.2f ms\n", CPU_time); //Timing the GPU Kernel cudaEvent_t timeStart, timeStop; //WARNING!!! use events only to time the device cudaEventCreate(&timeStart); cudaEventCreate(&timeStop); float elapsedTime; // make sure it is of type float, precision is milliseconds (ms) !!! int blockSize = 256; int nBlocks = (n + blockSize -1) / blockSize; //round up if n is not a multiple of blocksize cudaEventRecord(timeStart, 0); //don't worry for the 2nd argument zero, it is about cuda streams stencil_GPU <<< nBlocks, blockSize >>> (x + RADIUS, y + RADIUS); cudaDeviceSynchronize(); printf("GPU y[100] = %.2f\n",y[100]); cudaEventRecord(timeStop, 0); cudaEventSynchronize(timeStop); //WARNING!!! do not simply print (timeStop-timeStart)!! cudaEventElapsedTime(&elapsedTime, timeStart, timeStop); printf("elapsed wall time (GPU) = %.2f ms\n", elapsedTime); cudaEventDestroy(timeStart); cudaEventDestroy(timeStop); //Verify the results are correct int i; for(i = 3; i < n-3 ; ++i){ if (y[i] != 7){ printf("Element y[%d] == %d != 7\n", i, y[i]); break; } } if (i == n-3){ printf("SUCCESS!\n"); } //Writing the Results to a File FILE *fptr = fopen("Task3_Result.txt", "a+"); if (fptr == NULL) { printf("Error!"); exit(1); } fprintf(fptr, "\n"); fprintf(fptr, "Vector Size: %d\n", n); fprintf(fptr, "elapsed wall time (CPU) = %.2f ms\n", CPU_time); fprintf(fptr, "elapsed wall time (GPU) = %.2f ms\n", elapsedTime); fclose(fptr); //Cleaning Up cudaFree(x); cudaFree(y); return EXIT_SUCCESS; }
b42d68ccac5a2e4091a49476298943fd1a28c16e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/NumericLimits.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/max_pool3d_with_indices_native.h> #include <ATen/ops/max_pool3d_with_indices_backward_native.h> #endif namespace at::native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( const scalar_t* inputData, scalar_t* outputData, int64_t* indicesData, int features, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ, bool channels_last) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = 0; // used only for channels-first indexing int64_t slice = 0; // used only for channels-last indexing int batch = 0; int channel = 0; if (!channels_last) { // indexing order: batch, channel, time oFrame = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % otime; // output frame/time slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / otime; // output slice/feature } else { // indexing order: batch, time, channel channel = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % features; // output feature (channel) slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / features; // output slice (batch + time) batch = slice / otime; oFrame = slice % otime; } // For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822 if (oRow < oheight && oColumn < owidth && oFrame < otime && channel < features && batch < obatch) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; // maxIndex remains in "channels-first"/contiguous int64_t maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; if (!channels_last) { inputData += (int64_t) slice * itime * iheight * iwidth; } else { inputData += ((int64_t) batch * itime * iheight * iwidth * features) + channel; } scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { scalar_t val; int index = t * iheight * iwidth + h * iwidth + w; if (!channels_last) { val = inputData[index]; } else { int64_t index_channels_last = index*features; val = inputData[index_channels_last]; } if ((max < val) || at::_isnan(val)) { max = val; maxIndex = index; } } } } int64_t out_index; if (!channels_last) { out_index = (int64_t) slice*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn; } else { out_index = ((int64_t) batch*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn)*features + channel; } outputData[out_index] = max; indicesData[out_index] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( const scalar_t* input_data, const Tensor& output, const Tensor& indices, int features, int64_t totalZ, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, bool channels_last) { int offsetZ = 0; int threadX = 32; int threadY = 8; int threadZ = 1; int stepZ = 65535; if (channels_last) { threadX = 2; threadY = 4; threadZ = 64; } dim3 block(threadX, threadY, threadZ); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > stepZ*threadZ ? stepZ : ceil_div(totalZ, static_cast<int64_t>(threadZ))); hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output.mutable_data_ptr<scalar_t>(), indices.mutable_data_ptr<int64_t>(), features, itime, iheight, iwidth, obatch, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ, channels_last); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= threadZ*stepZ; offsetZ += threadZ*stepZ; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, const scalar_t *gradOutputData, const int64_t *indicesData, int features, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, int offsetZ, bool channels_last) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = 0; // used only for channels-first indexing int64_t slice = 0; // used only for channels-last indexing int batch = 0; int channel = 0; if (!channels_last) { // indexing order: batch, channel, time oFrame = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % otime; // output frame/time slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / otime; // output slice/feature } else { // indexing order: batch, time, channel channel = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % features; // output feature (channel) slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / features; // output slice (batch + time) batch = slice / otime; oFrame = slice % otime; } if (oRow < oheight && oColumn < owidth && oFrame < otime && batch < obatch && channel < features) { int64_t out_index; if (!channels_last) { out_index = (int64_t) slice*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn; } else { out_index = ((int64_t) batch*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn)*features + channel; } int64_t maxIndex = indicesData[out_index]; if (maxIndex != -1) { if (!channels_last) { gpuAtomicAddNoReturn(&gradInputData[(int64_t) slice * itime * iheight * iwidth + maxIndex], gradOutputData[out_index]); } else { gpuAtomicAddNoReturn(&gradInputData[((int64_t) batch * itime * iheight * iwidth + maxIndex) * features + channel], gradOutputData[out_index]); } } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int features, int64_t totalZ, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, bool channels_last) { int offsetZ = 0; int threadX = 32; int threadY = 8; int threadZ = 1; int stepZ = 65535; if (channels_last) { threadX = 2; threadY = 4; threadZ = 64; } dim3 block(threadX, threadY, threadZ); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > stepZ*threadZ ? stepZ : ceil_div(totalZ, static_cast<int64_t>(block.z))); hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInputData, gradOutput.const_data_ptr<scalar_t>(), indices.const_data_ptr<int64_t>(), features, itime, iheight, iwidth, obatch, otime, oheight, owidth, offsetZ, channels_last); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= threadZ*stepZ; offsetZ += threadZ*stepZ; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_out_cuda_template()"); bool channels_last = input.ndimension() == 5 && input.suggest_memory_format() == at::MemoryFormat::ChannelsLast3d; Tensor _input = input; if (input.ndimension() == 4) { Tensor input_channels_last_check = input.unsqueeze(0); // work around buggy behavior of suggest_memory_format here where // suggested format of unsqueezed tensor is contiguous while it is // really only contiguous in ChannelsLast3d channels_last = (!input_channels_last_check.is_contiguous()) && input_channels_last_check.is_contiguous(at::MemoryFormat::ChannelsLast3d); if (!channels_last) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { _input = input_channels_last_check; output.resize_({1, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); indices.resize_({1, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); output = output.squeeze(0); indices = indices.squeeze(0); } } else { if (!channels_last) { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); indices.resize_({nbatch, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); } } if (input.numel() == 0) { return; } Tensor work_input; Tensor work_output = output; if (!channels_last) { work_input = input.contiguous(); } else { work_input = _input.contiguous(at::MemoryFormat::ChannelsLast3d); } Tensor work_indices = indices; AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ const scalar_t *input_data = work_input.const_data_ptr<scalar_t>(); const int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, nslices, // features totalZ, itime, iheight, iwidth, nbatch, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, channels_last); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D input tensor, but got ", input.sizes()); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes()); // Resize and initialize result tensor. bool channels_last = input.ndimension() == 5 && input.suggest_memory_format() == at::MemoryFormat::ChannelsLast3d; Tensor _input = input; if (input.ndimension() == 4) { Tensor input_channels_last_check = input.unsqueeze(0); // work around buggy behavior of suggest_memory_format here where // suggested format of unsqueezed tensor is contiguous while it is // really only contiguous in ChannelsLast3d channels_last = (!input_channels_last_check.is_contiguous()) && input_channels_last_check.is_contiguous(at::MemoryFormat::ChannelsLast3d); if (channels_last) { _input = input_channels_last_check; } } if (!channels_last) { gradInput.resize_as_(input); } else { gradInput.resize_as_(_input, at::MemoryFormat::ChannelsLast3d); } gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_backward_out_cuda_template()"); if (gradOutput.numel() == 0) { return; } Tensor work_grad_input = gradInput; Tensor work_grad_output; Tensor work_indices; if (!channels_last) { work_grad_output = gradOutput.contiguous(); work_indices = indices.contiguous(); } else { if (input.ndimension() == 4) { work_grad_output = gradOutput.unsqueeze(0).contiguous(at::MemoryFormat::ChannelsLast3d); work_indices = indices.unsqueeze(0).contiguous(at::MemoryFormat::ChannelsLast3d); } else { work_grad_output = gradOutput.contiguous(at::MemoryFormat::ChannelsLast3d); work_indices = indices.contiguous(at::MemoryFormat::ChannelsLast3d); } } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.mutable_data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, nslices, totalZ, itime, iheight, iwidth, nbatch, otime, oheight, owidth, channels_last); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor& output, Tensor& indices) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::empty(input.sizes(), input.options()); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native
b42d68ccac5a2e4091a49476298943fd1a28c16e.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/max_pool3d_with_indices_native.h> #include <ATen/ops/max_pool3d_with_indices_backward_native.h> #endif namespace at::native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( const scalar_t* inputData, scalar_t* outputData, int64_t* indicesData, int features, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ, bool channels_last) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = 0; // used only for channels-first indexing int64_t slice = 0; // used only for channels-last indexing int batch = 0; int channel = 0; if (!channels_last) { // indexing order: batch, channel, time oFrame = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % otime; // output frame/time slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / otime; // output slice/feature } else { // indexing order: batch, time, channel channel = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % features; // output feature (channel) slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / features; // output slice (batch + time) batch = slice / otime; oFrame = slice % otime; } // For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822 if (oRow < oheight && oColumn < owidth && oFrame < otime && channel < features && batch < obatch) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; // maxIndex remains in "channels-first"/contiguous int64_t maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; if (!channels_last) { inputData += (int64_t) slice * itime * iheight * iwidth; } else { inputData += ((int64_t) batch * itime * iheight * iwidth * features) + channel; } scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { scalar_t val; int index = t * iheight * iwidth + h * iwidth + w; if (!channels_last) { val = inputData[index]; } else { int64_t index_channels_last = index*features; val = inputData[index_channels_last]; } if ((max < val) || at::_isnan(val)) { max = val; maxIndex = index; } } } } int64_t out_index; if (!channels_last) { out_index = (int64_t) slice*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn; } else { out_index = ((int64_t) batch*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn)*features + channel; } outputData[out_index] = max; indicesData[out_index] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( const scalar_t* input_data, const Tensor& output, const Tensor& indices, int features, int64_t totalZ, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, bool channels_last) { int offsetZ = 0; int threadX = 32; int threadY = 8; int threadZ = 1; int stepZ = 65535; if (channels_last) { threadX = 2; threadY = 4; threadZ = 64; } dim3 block(threadX, threadY, threadZ); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > stepZ*threadZ ? stepZ : ceil_div(totalZ, static_cast<int64_t>(threadZ))); max_pool3d_with_indices_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output.mutable_data_ptr<scalar_t>(), indices.mutable_data_ptr<int64_t>(), features, itime, iheight, iwidth, obatch, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ, channels_last); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= threadZ*stepZ; offsetZ += threadZ*stepZ; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, const scalar_t *gradOutputData, const int64_t *indicesData, int features, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, int offsetZ, bool channels_last) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = 0; // used only for channels-first indexing int64_t slice = 0; // used only for channels-last indexing int batch = 0; int channel = 0; if (!channels_last) { // indexing order: batch, channel, time oFrame = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % otime; // output frame/time slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / otime; // output slice/feature } else { // indexing order: batch, time, channel channel = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) % features; // output feature (channel) slice = (blockIdx.z * blockDim.z + threadIdx.z + offsetZ) / features; // output slice (batch + time) batch = slice / otime; oFrame = slice % otime; } if (oRow < oheight && oColumn < owidth && oFrame < otime && batch < obatch && channel < features) { int64_t out_index; if (!channels_last) { out_index = (int64_t) slice*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn; } else { out_index = ((int64_t) batch*otime*oheight*owidth + oFrame*oheight*owidth + oRow*owidth + oColumn)*features + channel; } int64_t maxIndex = indicesData[out_index]; if (maxIndex != -1) { if (!channels_last) { gpuAtomicAddNoReturn(&gradInputData[(int64_t) slice * itime * iheight * iwidth + maxIndex], gradOutputData[out_index]); } else { gpuAtomicAddNoReturn(&gradInputData[((int64_t) batch * itime * iheight * iwidth + maxIndex) * features + channel], gradOutputData[out_index]); } } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int features, int64_t totalZ, int itime, int iheight, int iwidth, int obatch, int otime, int oheight, int owidth, bool channels_last) { int offsetZ = 0; int threadX = 32; int threadY = 8; int threadZ = 1; int stepZ = 65535; if (channels_last) { threadX = 2; threadY = 4; threadZ = 64; } dim3 block(threadX, threadY, threadZ); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > stepZ*threadZ ? stepZ : ceil_div(totalZ, static_cast<int64_t>(block.z))); max_pool3d_with_indices_backward_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( gradInputData, gradOutput.const_data_ptr<scalar_t>(), indices.const_data_ptr<int64_t>(), features, itime, iheight, iwidth, obatch, otime, oheight, owidth, offsetZ, channels_last); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= threadZ*stepZ; offsetZ += threadZ*stepZ; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_out_cuda_template()"); bool channels_last = input.ndimension() == 5 && input.suggest_memory_format() == at::MemoryFormat::ChannelsLast3d; Tensor _input = input; if (input.ndimension() == 4) { Tensor input_channels_last_check = input.unsqueeze(0); // work around buggy behavior of suggest_memory_format here where // suggested format of unsqueezed tensor is contiguous while it is // really only contiguous in ChannelsLast3d channels_last = (!input_channels_last_check.is_contiguous()) && input_channels_last_check.is_contiguous(at::MemoryFormat::ChannelsLast3d); if (!channels_last) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { _input = input_channels_last_check; output.resize_({1, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); indices.resize_({1, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); output = output.squeeze(0); indices = indices.squeeze(0); } } else { if (!channels_last) { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); indices.resize_({nbatch, nslices, otime, oheight, owidth}, at::MemoryFormat::ChannelsLast3d); } } if (input.numel() == 0) { return; } Tensor work_input; Tensor work_output = output; if (!channels_last) { work_input = input.contiguous(); } else { work_input = _input.contiguous(at::MemoryFormat::ChannelsLast3d); } Tensor work_indices = indices; AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ const scalar_t *input_data = work_input.const_data_ptr<scalar_t>(); const int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, nslices, // features totalZ, itime, iheight, iwidth, nbatch, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, channels_last); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D input tensor, but got ", input.sizes()); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes()); // Resize and initialize result tensor. bool channels_last = input.ndimension() == 5 && input.suggest_memory_format() == at::MemoryFormat::ChannelsLast3d; Tensor _input = input; if (input.ndimension() == 4) { Tensor input_channels_last_check = input.unsqueeze(0); // work around buggy behavior of suggest_memory_format here where // suggested format of unsqueezed tensor is contiguous while it is // really only contiguous in ChannelsLast3d channels_last = (!input_channels_last_check.is_contiguous()) && input_channels_last_check.is_contiguous(at::MemoryFormat::ChannelsLast3d); if (channels_last) { _input = input_channels_last_check; } } if (!channels_last) { gradInput.resize_as_(input); } else { gradInput.resize_as_(_input, at::MemoryFormat::ChannelsLast3d); } gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_backward_out_cuda_template()"); if (gradOutput.numel() == 0) { return; } Tensor work_grad_input = gradInput; Tensor work_grad_output; Tensor work_indices; if (!channels_last) { work_grad_output = gradOutput.contiguous(); work_indices = indices.contiguous(); } else { if (input.ndimension() == 4) { work_grad_output = gradOutput.unsqueeze(0).contiguous(at::MemoryFormat::ChannelsLast3d); work_indices = indices.unsqueeze(0).contiguous(at::MemoryFormat::ChannelsLast3d); } else { work_grad_output = gradOutput.contiguous(at::MemoryFormat::ChannelsLast3d); work_indices = indices.contiguous(at::MemoryFormat::ChannelsLast3d); } } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.mutable_data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, nslices, totalZ, itime, iheight, iwidth, nbatch, otime, oheight, owidth, channels_last); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor& output, Tensor& indices) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::empty(input.sizes(), input.options()); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native
49ec048b1920cc1f19a50ab76af63b8a8b81a6c9.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <mutex> #include "ExecutionWorker.h" ExecutionWorker::ExecutionWorker(std::vector<int>& gpu_ids) { this->gpu_ids = gpu_ids; this->initialize(gpu_ids.size()); } ExecutionWorker::~ExecutionWorker() { delete this->pool; } void ExecutionWorker::initialize(int gpu_num) { this->gpu_num = gpu_num; this->pool = new BS::thread_pool(gpu_num); // Test send data between GPU for (int d = 0; d < gpu_num; ++d) { size_t device_no = gpu_ids[d]; hipSetDevice(device_no); // Check that device can directly send data to other device for (size_t other_idx = 0; other_idx < gpu_num; other_idx++) { int other_id = gpu_ids[other_idx]; if (device_no != other_id) { int canAccessPeer = 0; hipDeviceCanAccessPeer(&canAccessPeer, device_no, other_id); if (canAccessPeer) { hipDeviceEnablePeerAccess(other_id, device_no); } else { std::cerr << "P2P from " << device_no << " to " << other_id << "is not accessable\n"; } } } } } BS::thread_pool *ExecutionWorker::getPool() { return this->pool; } std::vector<int> ExecutionWorker::getGpuIds() const { return this->gpu_ids; } int ExecutionWorker::getGpuId(int i) const { return this->gpu_ids[i]; } size_t ExecutionWorker::size() const { return this->gpu_num; }
49ec048b1920cc1f19a50ab76af63b8a8b81a6c9.cu
#include <iostream> #include <mutex> #include "ExecutionWorker.h" ExecutionWorker::ExecutionWorker(std::vector<int>& gpu_ids) { this->gpu_ids = gpu_ids; this->initialize(gpu_ids.size()); } ExecutionWorker::~ExecutionWorker() { delete this->pool; } void ExecutionWorker::initialize(int gpu_num) { this->gpu_num = gpu_num; this->pool = new BS::thread_pool(gpu_num); // Test send data between GPU for (int d = 0; d < gpu_num; ++d) { size_t device_no = gpu_ids[d]; cudaSetDevice(device_no); // Check that device can directly send data to other device for (size_t other_idx = 0; other_idx < gpu_num; other_idx++) { int other_id = gpu_ids[other_idx]; if (device_no != other_id) { int canAccessPeer = 0; cudaDeviceCanAccessPeer(&canAccessPeer, device_no, other_id); if (canAccessPeer) { cudaDeviceEnablePeerAccess(other_id, device_no); } else { std::cerr << "P2P from " << device_no << " to " << other_id << "is not accessable\n"; } } } } } BS::thread_pool *ExecutionWorker::getPool() { return this->pool; } std::vector<int> ExecutionWorker::getGpuIds() const { return this->gpu_ids; } int ExecutionWorker::getGpuId(int i) const { return this->gpu_ids[i]; } size_t ExecutionWorker::size() const { return this->gpu_num; }
d01fd83ce9f9225e1138edfdf55f19594586e239.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMode.cu" #else THC_API void THCTensor_(calculateMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position) { THAssert(THCTensor_(isContiguous)(state, input)); // Because the input is contiguous, we want to get a reference to the // location of the buffer at the innermost dimension that we are going // to calculate the mode for --> we do this by manually doing the stride // calculations to get an offset real *data = THCTensor_(data)(state, input); for (int i = 0; i < THLongStorage_size(position); ++i) { data += THLongStorage_data(position)[i] * THCTensor_(stride)(state, input, i); } int64_t nElement = THCTensor_(size)(state, input, THCTensor_(nDimension)(state, input) - 1); THCThrustAllocator thrustAlloc(state); // Wrap input data, sortBuffer, in Thrust device vectors thrust::device_ptr<real> vecPtr = thrust::device_pointer_cast(data); thrust::device_vector<real> iter(vecPtr, vecPtr + nElement); thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer)); thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement); // Fill sortBuffer with [0, 1, 2, ... nElement - 1] thrust::sequence( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif seq.begin(), seq.end()); // Sort the input data. The original indices of the data are stored in seq thrust::sort_by_key( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), seq.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfLess() #endif ); // Count # of unique elements via an inner product between adjacent elements. // Add 1 if two neighboring element are not equal. int unique = 1 + thrust::inner_product( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(), #if defined(THC_REAL_IS_HALF) ThrustHalfNotEqualTo() #else thrust::not_equal_to<real>() #endif ); // Count frequency of each element thrust::device_vector<real> keys(unique); thrust::device_vector<int> counts(unique); thrust::reduce_by_key( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), thrust::constant_iterator<int>(1), keys.begin(), counts.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfEqualTo() #endif ); // Find index of maximum count thrust::device_vector<int>::iterator it = thrust::max_element( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif counts.begin(), counts.end()); real mode = keys[it - counts.begin()]; // Find first index within which it occurs #if defined(THC_REAL_IS_HALF) thrust::device_vector<real>::iterator positionIter = thrust::find_if( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode)); #else thrust::device_vector<real>::iterator positionIter = thrust::find( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), mode); #endif THAssert(positionIter != iter.end()); int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()]; // Place mode, index in output ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values); int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices); for (int i = 0; i < THLongStorage_size(position); ++i) { int64_t pos = THLongStorage_data(position)[i]; valuesOffset += THCTensor_(stride)(state, values, i) * pos; indicesOffset += THCudaLongTensor_stride(state, indices, i) * pos; } THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode); THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index); } // this probably could be a loop, not a recursive algorithm THC_API void THCTensor_(dimApplyMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position, int curDim) { int64_t ndim = THCTensor_(nDimension)(state, input); // Because we have transposed the Tensor, the data for the dimension we are mode'ing along // is always in the innermost dimension if (curDim == ndim - 1) { THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position); } else { // Loop through the values and recurse for (int i = 0; i < THCTensor_(size)(state, input, curDim); ++i) { position->data[curDim] = i; THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1); } } } #define MAX_GRID_SIZE 65535 #define MAX_BLOCK_SIZE 1024 THC_API void THCTensor_(mode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, int dimension) { THLongStorage *dim; THCTensor *transposed, *contiguous, *valuesTransposed; THLongStorage *position; THCudaLongStorage *sortBuffer; THCudaLongTensor *indicesTransposed; int64_t ndim, sliceSize, slices; THAssert(THCTensor_(checkGPU)(state, 1, values)); // Verify they are asking for a valid dimension ndim = THCTensor_(nDimension)(state, input); THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds"); sliceSize = THCTensor_(size)(state, input, dimension); slices = THCTensor_(nElement)(state, input) / sliceSize; // Resize output value, index Tensors to appropriate sizes (i.e. the same as // the input Tensor, except at dim=dimension, the size is 1) dim = THCTensor_(newSizeOf)(state, input); THLongStorage_set(dim, dimension, 1); THCTensor_(resize)(state, values, dim, NULL); THCudaLongTensor_resize(state, indices, dim, NULL); THLongStorage_free(dim); // If sliceSize is 1, copy input to values and set indices if (sliceSize == 1) { THCTensor_(copy)(state, values, input); THCudaLongTensor_fill(state, indices, TH_INDEX_BASE); return; } // Requirements for fused kernel implementation: // // 1. sliceSize <= 2 * max threads per block // 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for // a kernel launch // 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed) if (sliceSize <= MAX_BLOCK_SIZE && slices <= MAX_GRID_SIZE && TensorUtils<THCTensor>::canUse32BitIndexMath(state, input)) { // Beginning our optimized implementation. First thing we want to do is to transpose // the input Tensor along the sort dimension, and then make it contiguous transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1); // Set-up TensorInfo structs for passing to kernel TensorInfo<real, unsigned int> tiValues = getTensorInfo<THCTensor, unsigned int>(state, valuesTransposed); TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<THCudaLongTensor, unsigned int>(state, indicesTransposed); // The number of blocks is the number of slices that we need to calculate the mode for. Each block // is responsible for computing a single mode dim3 grid; THC_getGridFromTiles(slices, grid); // The blocksize is two elements per thread, rounded up to the nearest power of 2 int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize); // Macro that calls kernel --> note that we set the block dimensions here, and // the amount of shared memory #define HANDLE_MODE(SIZE) \ { \ dim3 blockSize(SIZE / 2); \ \ int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \ hipLaunchKernelGGL(( computeMode<real, SIZE>) \ , dim3(grid), dim3(blockSize), memsize, THCState_getCurrentStream(state), \ THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \ } // Tradeoff between compilation time and the number of specializations. Ideally we would have // one HANDLE_MODE for each power of 2 switch(ceilPowerOf2) { case 2048: HANDLE_MODE(2048) break; case 1024: case 512: case 256: HANDLE_MODE(1024) break; case 128: case 64: HANDLE_MODE(128) break; case 32: case 16: case 8: case 4: case 2: HANDLE_MODE(32) break; case 1: default: assert(false); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, transposed); THCTensor_(free)(state, contiguous); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); } else { // Beginning our naive implementation: We don't want to mutate the input Tensor, but // we need to be able to sort the inputs along the dimension in order to calculate the // mode. Additionally, its ideal if the data along the dimension is contiguous. So // we transpose the dimension with the innermost dimension and make a new contiguous // version that we can use. transposed = THCTensor_(newClone)(state, input); THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); THCTensor_(free)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1); // Position is a Storage that will store the dimension values we are processing position = THLongStorage_newWithSize(ndim - 1); // Sort Buffer is a Storage that will be used in the internal sort required to calculate // the mode efficiently sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize); // Call mode THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0); THCTensor_(free)(state, contiguous); THLongStorage_free(position); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); THCudaLongStorage_free(state, sortBuffer); } } #undef MAX_GRID_SIZE #undef MAX_BLOCK_SIZE #endif
d01fd83ce9f9225e1138edfdf55f19594586e239.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMode.cu" #else THC_API void THCTensor_(calculateMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position) { THAssert(THCTensor_(isContiguous)(state, input)); // Because the input is contiguous, we want to get a reference to the // location of the buffer at the innermost dimension that we are going // to calculate the mode for --> we do this by manually doing the stride // calculations to get an offset real *data = THCTensor_(data)(state, input); for (int i = 0; i < THLongStorage_size(position); ++i) { data += THLongStorage_data(position)[i] * THCTensor_(stride)(state, input, i); } int64_t nElement = THCTensor_(size)(state, input, THCTensor_(nDimension)(state, input) - 1); THCThrustAllocator thrustAlloc(state); // Wrap input data, sortBuffer, in Thrust device vectors thrust::device_ptr<real> vecPtr = thrust::device_pointer_cast(data); thrust::device_vector<real> iter(vecPtr, vecPtr + nElement); thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer)); thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement); // Fill sortBuffer with [0, 1, 2, ... nElement - 1] thrust::sequence( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif seq.begin(), seq.end()); // Sort the input data. The original indices of the data are stored in seq thrust::sort_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), seq.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfLess() #endif ); // Count # of unique elements via an inner product between adjacent elements. // Add 1 if two neighboring element are not equal. int unique = 1 + thrust::inner_product( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(), #if defined(THC_REAL_IS_HALF) ThrustHalfNotEqualTo() #else thrust::not_equal_to<real>() #endif ); // Count frequency of each element thrust::device_vector<real> keys(unique); thrust::device_vector<int> counts(unique); thrust::reduce_by_key( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), thrust::constant_iterator<int>(1), keys.begin(), counts.begin() #if defined(THC_REAL_IS_HALF) , ThrustHalfEqualTo() #endif ); // Find index of maximum count thrust::device_vector<int>::iterator it = thrust::max_element( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif counts.begin(), counts.end()); real mode = keys[it - counts.begin()]; // Find first index within which it occurs #if defined(THC_REAL_IS_HALF) thrust::device_vector<real>::iterator positionIter = thrust::find_if( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode)); #else thrust::device_vector<real>::iterator positionIter = thrust::find( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #else thrust::device, #endif iter.begin(), iter.end(), mode); #endif THAssert(positionIter != iter.end()); int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()]; // Place mode, index in output ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values); int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices); for (int i = 0; i < THLongStorage_size(position); ++i) { int64_t pos = THLongStorage_data(position)[i]; valuesOffset += THCTensor_(stride)(state, values, i) * pos; indicesOffset += THCudaLongTensor_stride(state, indices, i) * pos; } THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode); THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index); } // this probably could be a loop, not a recursive algorithm THC_API void THCTensor_(dimApplyMode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, THCudaLongStorage *sortBuffer, int dimension, THLongStorage *position, int curDim) { int64_t ndim = THCTensor_(nDimension)(state, input); // Because we have transposed the Tensor, the data for the dimension we are mode'ing along // is always in the innermost dimension if (curDim == ndim - 1) { THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position); } else { // Loop through the values and recurse for (int i = 0; i < THCTensor_(size)(state, input, curDim); ++i) { position->data[curDim] = i; THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1); } } } #define MAX_GRID_SIZE 65535 #define MAX_BLOCK_SIZE 1024 THC_API void THCTensor_(mode)(THCState *state, THCTensor *values, THCudaLongTensor *indices, THCTensor *input, int dimension) { THLongStorage *dim; THCTensor *transposed, *contiguous, *valuesTransposed; THLongStorage *position; THCudaLongStorage *sortBuffer; THCudaLongTensor *indicesTransposed; int64_t ndim, sliceSize, slices; THAssert(THCTensor_(checkGPU)(state, 1, values)); // Verify they are asking for a valid dimension ndim = THCTensor_(nDimension)(state, input); THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds"); sliceSize = THCTensor_(size)(state, input, dimension); slices = THCTensor_(nElement)(state, input) / sliceSize; // Resize output value, index Tensors to appropriate sizes (i.e. the same as // the input Tensor, except at dim=dimension, the size is 1) dim = THCTensor_(newSizeOf)(state, input); THLongStorage_set(dim, dimension, 1); THCTensor_(resize)(state, values, dim, NULL); THCudaLongTensor_resize(state, indices, dim, NULL); THLongStorage_free(dim); // If sliceSize is 1, copy input to values and set indices if (sliceSize == 1) { THCTensor_(copy)(state, values, input); THCudaLongTensor_fill(state, indices, TH_INDEX_BASE); return; } // Requirements for fused kernel implementation: // // 1. sliceSize <= 2 * max threads per block // 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for // a kernel launch // 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed) if (sliceSize <= MAX_BLOCK_SIZE && slices <= MAX_GRID_SIZE && TensorUtils<THCTensor>::canUse32BitIndexMath(state, input)) { // Beginning our optimized implementation. First thing we want to do is to transpose // the input Tensor along the sort dimension, and then make it contiguous transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1); // Set-up TensorInfo structs for passing to kernel TensorInfo<real, unsigned int> tiValues = getTensorInfo<THCTensor, unsigned int>(state, valuesTransposed); TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<THCudaLongTensor, unsigned int>(state, indicesTransposed); // The number of blocks is the number of slices that we need to calculate the mode for. Each block // is responsible for computing a single mode dim3 grid; THC_getGridFromTiles(slices, grid); // The blocksize is two elements per thread, rounded up to the nearest power of 2 int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize); // Macro that calls kernel --> note that we set the block dimensions here, and // the amount of shared memory #define HANDLE_MODE(SIZE) \ { \ dim3 blockSize(SIZE / 2); \ \ int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \ computeMode<real, SIZE> \ <<<grid, blockSize, memsize, THCState_getCurrentStream(state)>>>( \ THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \ } // Tradeoff between compilation time and the number of specializations. Ideally we would have // one HANDLE_MODE for each power of 2 switch(ceilPowerOf2) { case 2048: HANDLE_MODE(2048) break; case 1024: case 512: case 256: HANDLE_MODE(1024) break; case 128: case 64: HANDLE_MODE(128) break; case 32: case 16: case 8: case 4: case 2: HANDLE_MODE(32) break; case 1: default: assert(false); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, transposed); THCTensor_(free)(state, contiguous); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); } else { // Beginning our naive implementation: We don't want to mutate the input Tensor, but // we need to be able to sort the inputs along the dimension in order to calculate the // mode. Additionally, its ideal if the data along the dimension is contiguous. So // we transpose the dimension with the innermost dimension and make a new contiguous // version that we can use. transposed = THCTensor_(newClone)(state, input); THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1); contiguous = THCTensor_(newContiguous)(state, transposed); THCTensor_(free)(state, transposed); // We also need to view the values and indices Tensors as transposed in order to // properly determine the offset into the underlying storage in which to place the // mode and index for a particular set of dimension values valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1); indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1); // Position is a Storage that will store the dimension values we are processing position = THLongStorage_newWithSize(ndim - 1); // Sort Buffer is a Storage that will be used in the internal sort required to calculate // the mode efficiently sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize); // Call mode THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0); THCTensor_(free)(state, contiguous); THLongStorage_free(position); THCTensor_(free)(state, valuesTransposed); THCudaLongTensor_free(state, indicesTransposed); THCudaLongStorage_free(state, sortBuffer); } } #undef MAX_GRID_SIZE #undef MAX_BLOCK_SIZE #endif
538d1dcc8845f85d1cbfb650877f51fc4c5e2aed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void sobel_gpu(QUANTUM_TYPE *img_out, QUANTUM_TYPE *img_in, int WIDTH, int HEIGHT){ int xc,yc; xc=blockDim.x*blockIdx.x+threadIdx.x; yc=blockDim.y*blockIdx.y+threadIdx.y; QUANTUM_TYPE LUp,LCnt,LDw,RUp,RCnt,RDw; int pixel; int xm1=xc-1; int xp1=xc+1; int ym1=yc-1; int yp1=yc+1; if(xc<WIDTH && yc<HEIGHT){ QUANTUM_TYPE p1=img_in[xm1+ym1*WIDTH]; QUANTUM_TYPE p2=img_in[xm1+yc*WIDTH]; QUANTUM_TYPE p3=img_in[xm1+yp1*WIDTH]; QUANTUM_TYPE p4=img_in[xp1+ym1*WIDTH]; QUANTUM_TYPE p5=img_in[xp1+yc*WIDTH]; QUANTUM_TYPE p6=img_in[xp1+yp1*WIDTH]; LUp = (xc-1>=0 && yc-1>=0)? p1:0; LCnt= (xc-1>=0)? p2:0; LDw = (xc-1>=0 && yc+1<HEIGHT)? p3:0; RUp = (xc+1<WIDTH && yc-1>=0)? p4:0; RCnt= (xc+1<WIDTH)? p5:0; RDw = (xc+1<WIDTH && yc+1<HEIGHT)? p6:0; pixel = -1*LUp + 1*RUp + -2*LCnt + 2*RCnt + -1*LDw + 1*RDw; pixel=(pixel<0)?0:pixel; pixel=(pixel>MAXRGB)?MAXRGB:pixel; img_out[xc+yc*WIDTH]=pixel; } }
538d1dcc8845f85d1cbfb650877f51fc4c5e2aed.cu
__global__ void sobel_gpu(QUANTUM_TYPE *img_out, QUANTUM_TYPE *img_in, int WIDTH, int HEIGHT){ int xc,yc; xc=blockDim.x*blockIdx.x+threadIdx.x; yc=blockDim.y*blockIdx.y+threadIdx.y; QUANTUM_TYPE LUp,LCnt,LDw,RUp,RCnt,RDw; int pixel; int xm1=xc-1; int xp1=xc+1; int ym1=yc-1; int yp1=yc+1; if(xc<WIDTH && yc<HEIGHT){ QUANTUM_TYPE p1=img_in[xm1+ym1*WIDTH]; QUANTUM_TYPE p2=img_in[xm1+yc*WIDTH]; QUANTUM_TYPE p3=img_in[xm1+yp1*WIDTH]; QUANTUM_TYPE p4=img_in[xp1+ym1*WIDTH]; QUANTUM_TYPE p5=img_in[xp1+yc*WIDTH]; QUANTUM_TYPE p6=img_in[xp1+yp1*WIDTH]; LUp = (xc-1>=0 && yc-1>=0)? p1:0; LCnt= (xc-1>=0)? p2:0; LDw = (xc-1>=0 && yc+1<HEIGHT)? p3:0; RUp = (xc+1<WIDTH && yc-1>=0)? p4:0; RCnt= (xc+1<WIDTH)? p5:0; RDw = (xc+1<WIDTH && yc+1<HEIGHT)? p6:0; pixel = -1*LUp + 1*RUp + -2*LCnt + 2*RCnt + -1*LDw + 1*RDw; pixel=(pixel<0)?0:pixel; pixel=(pixel>MAXRGB)?MAXRGB:pixel; img_out[xc+yc*WIDTH]=pixel; } }
f3a174449041bc0a606c8da76dea25bedfaef6bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hipfft.h> typedef float2 CComplex; typedef double2 ZComplex; template<typename T> static __device__ __host__ inline T operator+(const T a, const T b); template<typename T> static __device__ __host__ inline T operator*(const T a, const T b); // Complex addition template<typename T> static __device__ __host__ inline T operator+(const T a, const T b) { T c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex multiplication template<typename T> static __device__ __host__ inline T operator*(const T a, const T b) { T c; c.x = a.x*b.x - a.y*b.y; c.y = a.x*b.y + a.y*b.x; return c; } /// @brief Computes: \f$ X \leftarrow B X \f$. /// @param[in,out] x On input this is the spectra of the waveform. /// On exit, this is the spectra multiplied with the /// spectra of the filter - i.e., the convolution. /// This is an array whose dimension is [nw]. /// @param[in] b The spectra of the filter coefficients. /// This is an array whose dimension is [nw]. /// @param[in] nw The number of frequencies. static __global__ void multiplySpectra(CComplex *x, const CComplex *b, const int nw) { const int numThreads = blockDim.x*gridDim.x; const int threadID = blockIdx.x*blockDim.x + threadIdx.x; for (int i=threadID; i<nw; i=i+numThreads) { x[i] = x[i]*b[i]; } } struct cufft32z_struct { hipfftHandle mPlan; }; extern "C" void clear(struct cufft32z_struct *cuft); extern "C" void initialize(struct cufft32z_struct *cuft); void clear(struct cufft32z_struct *cuft) { hipfftDestroy(cuft->mPlan); } void initialize(struct cufft32z_struct *cuft) { //checkCudaErrors(hipfftMakePlan1d(plan_input, new_size, HIPFFT_C2C, 1, worksize)); }
f3a174449041bc0a606c8da76dea25bedfaef6bb.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cufft.h> typedef float2 CComplex; typedef double2 ZComplex; template<typename T> static __device__ __host__ inline T operator+(const T a, const T b); template<typename T> static __device__ __host__ inline T operator*(const T a, const T b); // Complex addition template<typename T> static __device__ __host__ inline T operator+(const T a, const T b) { T c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex multiplication template<typename T> static __device__ __host__ inline T operator*(const T a, const T b) { T c; c.x = a.x*b.x - a.y*b.y; c.y = a.x*b.y + a.y*b.x; return c; } /// @brief Computes: \f$ X \leftarrow B X \f$. /// @param[in,out] x On input this is the spectra of the waveform. /// On exit, this is the spectra multiplied with the /// spectra of the filter - i.e., the convolution. /// This is an array whose dimension is [nw]. /// @param[in] b The spectra of the filter coefficients. /// This is an array whose dimension is [nw]. /// @param[in] nw The number of frequencies. static __global__ void multiplySpectra(CComplex *x, const CComplex *b, const int nw) { const int numThreads = blockDim.x*gridDim.x; const int threadID = blockIdx.x*blockDim.x + threadIdx.x; for (int i=threadID; i<nw; i=i+numThreads) { x[i] = x[i]*b[i]; } } struct cufft32z_struct { cufftHandle mPlan; }; extern "C" void clear(struct cufft32z_struct *cuft); extern "C" void initialize(struct cufft32z_struct *cuft); void clear(struct cufft32z_struct *cuft) { cufftDestroy(cuft->mPlan); } void initialize(struct cufft32z_struct *cuft) { //checkCudaErrors(cufftMakePlan1d(plan_input, new_size, CUFFT_C2C, 1, worksize)); }
00507e271d683ab587175e3ab00e17e4470f9ce1.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstddef> #include <cassert> #include <cmath> #include <stdio.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include "cutil.h" #include "cutil_inline_runtime.h" #include "common.h" #include "black_scholes.cuh" #include "timer.h" #include "bsconfig.h" using namespace std; __global__ void setup_rnd_kernel ( hiprandState_t * state, time_t seed ) { long id = (blockIdx.x * WINDOW_WIDTH) + threadIdx.x; hiprand_init ( seed, id, 0, &state[id] ); } __device__ double black_scholes_value (BSConfig cf, const double random_number) { const double current_value = cf.S * exp ( (cf.r - (cf.sigma*cf.sigma) / 2.0) * cf.T + cf.sigma * sqrt (cf.T) * random_number ); return exp (-cf.r * cf.T) * ((current_value - cf.E < 0.0) ? 0.0 : current_value - cf.E); } // standard normal distributed random number [0~1] __device__ gaussrand_result_t gaussrand (hiprandState_t* localState) { gaussrand_result_t result; double v1, v2, s; do { v1 = 2.0 * hiprand_uniform(localState) - 1.0; v2 = 2.0 * hiprand_uniform(localState) - 1.0; s = v1 * v1 + v2 * v2; } while (s >= 1 || s== 0); double w = sqrt ( (-2.0 * log (s)) / s); result.grand1 = v1 * w; result.grand2 = v2 * w; return result; } #ifdef __GOGO_DEBUG__ __global__ void black_scholes_kernel(double* blockMeans, double* cudaTrials, hiprandState_t* randStates, const double* fixedRands, double* debug, BSConfig config) { #else __global__ void black_scholes_kernel(double* blockMeans, double* cudaTrials, hiprandState_t* randStates, BSConfig config) { #endif __shared__ double means[WINDOW_WIDTH]; const long LOOP_SIZE = (long)1 < (config.M / (BLOCK_SIZE * WINDOW_WIDTH)) ? config.M / (BLOCK_SIZE * WINDOW_WIDTH) : 1; const unsigned int GID = (blockIdx.x * blockDim.x) * LOOP_SIZE + threadIdx.x * LOOP_SIZE; const unsigned int TID = threadIdx.x; hiprandState_t localState = randStates[(blockIdx.x * blockDim.x) + threadIdx.x]; gaussrand_result_t gresult; means[TID] = 0.0; // Do the Black-Scholes iterations for(long trial = 0; trial < LOOP_SIZE; trial++) { double value = 0.0; if (trial%2 == 0) { if (config.RND_MODE == 1) { gresult.grand1 = 1.0; gresult.grand2 = 1.0; } #ifdef __GOGO_DEBUG__ // use pre-generated random number else if (config.RND_MODE == 2) { gresult.grand1 = fixedRands[GID + trial]; gresult.grand2 = fixedRands[GID + trial+1]; if(config.DEBUG_LEVEL == 2) { debug[GID + trial] = gresult.grand1; debug[GID + trial + 1] = gresult.grand2; } } #endif // use gaussian random number (standard normal distributed) else { gresult = gaussrand (&localState); } value = black_scholes_value (config, gresult.grand1); } else { value = black_scholes_value (config, gresult.grand2); } // we need to keep origianl trial values for calculatng standard deviation // for current calculation, we use trials // Also, to prevent overflow caused by adding, divide the value by M in advance means[TID] += value/config.M; cudaTrials[GID + trial] = value; } for(unsigned int stride = blockDim.x>>1; stride > 0; stride >>= 1) { __syncthreads(); if (TID < stride) means[TID] += means[TID + stride]; } if(TID == 0) { blockMeans[blockIdx.x] = means[0]; } } __device__ void trunc(double* target) { if (*target < 0.0000000005 && *target > 0) *target = 0.0; else if (*target > -0.0000000005 && *target < 0) *target = 0.0; } #ifdef __GOGO_DEBUG__ __global__ void black_scholes_variance_kernel(const long M, const double mean, double* cudaTrials, double* cudaVariances, double* debug) { #else __global__ void black_scholes_variance_kernel(const long M, const double mean, double* cudaTrials, double* cudaVariances) { #endif __shared__ double variances[WINDOW_WIDTH]; const long LOOP_SIZE = (long)1 < (M / (BLOCK_SIZE * WINDOW_WIDTH)) ? M / (BLOCK_SIZE * WINDOW_WIDTH) : 1; const unsigned int GID = (blockIdx.x * blockDim.x) * LOOP_SIZE + threadIdx.x * LOOP_SIZE; const unsigned int TID = threadIdx.x; // Do the Black-Scholes iterations variances[TID] = 0; for(long trial = 0; trial < LOOP_SIZE; trial++) { double v = cudaTrials[GID + trial]; v = v - mean; // Meaningless value such as 1.1E-15 could lead invalid result // when number of trial is so high. Thus, truncate all after the 10th // decimal place. Even though we truncate them, the result still in // acceptable valid range trunc(&v); variances[TID] += (v * v) / (double)(M-1); #ifdef __GOGO_DEBUG__ debug[GID + trial] = v; #endif } for(unsigned int stride = WINDOW_WIDTH>>1; stride > 0; stride >>= 1) { __syncthreads(); if (stride > TID) variances[TID] += variances[TID + stride]; } if(TID == 0) { cudaVariances[blockIdx.x] = variances[0]; } } #ifdef __GOGO_DEBUG__ Result black_scholes(double* cudafixedRands, BSConfig config) { #else Result black_scholes(BSConfig config) { #endif Result result; double* means = new double[config.totalNumOfBlocks()]; double conf_width = 0.0; double t1, t2; assert (config.M > 0); long size = config.M * sizeof(double); dim3 dimGrid(config.totalNumOfBlocks()); dim3 dimBlock(WINDOW_WIDTH); // part5_start t1 = get_seconds(); hiprandState_t* randStates; cutilSafeCall(hipMalloc((void **) &randStates, config.totalNumOfThread() * sizeof(hiprandState_t))); hipLaunchKernelGGL(( setup_rnd_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, randStates, time(NULL)); t2 = get_seconds(); result.init_seeds_setup_time = t2 - t1; // part5_end // part3_begin t1 = get_seconds(); double* blockMeans; cutilSafeCall(hipMalloc((void**) &blockMeans, config.totalNumOfBlocks() * sizeof(double))); double* cudaTrials; cutilSafeCall(hipMalloc((void**) &cudaTrials, size)); #ifdef __GOGO_DEBUG__ double* hostDebug = new double[config.M]; double* cudaDebug; cutilSafeCall(hipMalloc((void**) &cudaDebug, size)); hipLaunchKernelGGL(( black_scholes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, blockMeans, cudaTrials, randStates, cudafixedRands, cudaDebug, config); #else hipLaunchKernelGGL(( black_scholes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, blockMeans, cudaTrials, randStates, config); #endif cutilSafeCall(hipMemcpy(means, blockMeans, config.totalNumOfBlocks() * sizeof(double), hipMemcpyDeviceToHost)); #ifdef __GOGO_DEBUG__ if (config.DEBUG_LEVEL == 2) { hipMemcpy(hostDebug, cudaDebug, size, hipMemcpyDeviceToHost); for (int i = 0; i < config.M; i++) { if(i < 10 || i > (config.M - 10)) printf("RND[%d]: %lf\n", i, hostDebug[i]); } puts("\n"); for (int i = 0; i < config.totalNumOfBlocks(); i++) { if(i < 10 || i > (config.M - 10)) printf("MEAN[%d]: %lf\n", i, means[i]); } puts(""); double* t = new double[config.M]; cutilSafeCall(hipMemcpy(t, cudaTrials, size, hipMemcpyDeviceToHost)); for (int i = 0; i < config.M; i++) { if(i < 10 || i > (config.M - 10)) printf("TRIAL[%d]: %lf\n", i, t[i]); } puts(""); delete [] t; } #endif t2 = get_seconds(); result.black_sholes_kernel_time = t2 - t1; // part3_end // part4_begin t1 = get_seconds(); result.mean = 0.0; // combine results from each blocks for (long i = 0; i < config.totalNumOfBlocks(); i++) { result.mean += means[i]; } result.stddev = black_scholes_stddev(result.mean, config, cudaTrials); t2 = get_seconds(); result.calc_stddev_time = t2 - t1; // part4_end // confidence interval conf_width = 1.96 * result.stddev / sqrt ((double) config.M); result.min = result.mean - conf_width; result.max = result.mean + conf_width; /* clean up */ #ifdef __GOGO_DEBUG__ hipFree(cudaDebug); #endif hipFree(cudaTrials); hipFree(blockMeans); hipFree(randStates); #ifdef __GOGO_DEBUG__ if(hostDebug != NULL) delete [] hostDebug; #endif if(means != NULL) delete [] means; return result; } double black_scholes_stddev (const double mean, BSConfig config, double* cudaTrials) { double* variances = new double[config.totalNumOfBlocks()]; double* cudaVariances; cutilSafeCall(hipMalloc((void**) &cudaVariances, (config.totalNumOfBlocks()) * sizeof(double))); dim3 dimGrid(config.totalNumOfBlocks()); dim3 dimBlock(WINDOW_WIDTH); double variance = 0.0; #ifdef __GOGO_DEBUG__ double* debug = new double[config.M]; double* cudaDebug; cutilSafeCall(hipMalloc((void**) &cudaDebug, config.M * sizeof(double))); hipLaunchKernelGGL(( black_scholes_variance_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, config.M, mean, cudaTrials, cudaVariances, cudaDebug); cutilSafeCall(hipMemcpy(variances, cudaVariances, config.totalNumOfBlocks() * sizeof(double), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(debug, cudaDebug, config.M * sizeof(double), hipMemcpyDeviceToHost)); for(long idx=0; idx<config.M; idx++) { if(config.DEBUG_LEVEL == 2) { if(idx < 10 || idx > (config.M - 10)) cout << "THR_VAR[" << idx << "]: " << debug[idx] << endl; } } cout << endl; hipFree(cudaDebug); delete [] debug; #else hipLaunchKernelGGL(( black_scholes_variance_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, config.M, mean, cudaTrials, cudaVariances); #endif cutilSafeCall(hipMemcpy(variances, cudaVariances, config.totalNumOfBlocks() * sizeof(double), hipMemcpyDeviceToHost)); for(long idx=0; idx<config.totalNumOfBlocks(); idx++) { #ifdef __GOGO_DEBUG__ if(config.DEBUG_LEVEL == 2) cout << "BLK_VARI[" << idx << "]: " << variances[idx] << endl; #endif variance += variances[idx]; } #ifdef __GOGO_DEBUG__ cout << endl; #endif hipFree(cudaVariances); delete [] variances; return sqrt(variance); }
00507e271d683ab587175e3ab00e17e4470f9ce1.cu
#include <iostream> #include <cstddef> #include <cassert> #include <cmath> #include <stdio.h> #include <cuda.h> #include <curand_kernel.h> #include "cutil.h" #include "cutil_inline_runtime.h" #include "common.h" #include "black_scholes.cuh" #include "timer.h" #include "bsconfig.h" using namespace std; __global__ void setup_rnd_kernel ( curandState * state, time_t seed ) { long id = (blockIdx.x * WINDOW_WIDTH) + threadIdx.x; curand_init ( seed, id, 0, &state[id] ); } __device__ double black_scholes_value (BSConfig cf, const double random_number) { const double current_value = cf.S * exp ( (cf.r - (cf.sigma*cf.sigma) / 2.0) * cf.T + cf.sigma * sqrt (cf.T) * random_number ); return exp (-cf.r * cf.T) * ((current_value - cf.E < 0.0) ? 0.0 : current_value - cf.E); } // standard normal distributed random number [0~1] __device__ gaussrand_result_t gaussrand (curandState* localState) { gaussrand_result_t result; double v1, v2, s; do { v1 = 2.0 * curand_uniform(localState) - 1.0; v2 = 2.0 * curand_uniform(localState) - 1.0; s = v1 * v1 + v2 * v2; } while (s >= 1 || s== 0); double w = sqrt ( (-2.0 * log (s)) / s); result.grand1 = v1 * w; result.grand2 = v2 * w; return result; } #ifdef __GOGO_DEBUG__ __global__ void black_scholes_kernel(double* blockMeans, double* cudaTrials, curandState* randStates, const double* fixedRands, double* debug, BSConfig config) { #else __global__ void black_scholes_kernel(double* blockMeans, double* cudaTrials, curandState* randStates, BSConfig config) { #endif __shared__ double means[WINDOW_WIDTH]; const long LOOP_SIZE = (long)1 < (config.M / (BLOCK_SIZE * WINDOW_WIDTH)) ? config.M / (BLOCK_SIZE * WINDOW_WIDTH) : 1; const unsigned int GID = (blockIdx.x * blockDim.x) * LOOP_SIZE + threadIdx.x * LOOP_SIZE; const unsigned int TID = threadIdx.x; curandState localState = randStates[(blockIdx.x * blockDim.x) + threadIdx.x]; gaussrand_result_t gresult; means[TID] = 0.0; // Do the Black-Scholes iterations for(long trial = 0; trial < LOOP_SIZE; trial++) { double value = 0.0; if (trial%2 == 0) { if (config.RND_MODE == 1) { gresult.grand1 = 1.0; gresult.grand2 = 1.0; } #ifdef __GOGO_DEBUG__ // use pre-generated random number else if (config.RND_MODE == 2) { gresult.grand1 = fixedRands[GID + trial]; gresult.grand2 = fixedRands[GID + trial+1]; if(config.DEBUG_LEVEL == 2) { debug[GID + trial] = gresult.grand1; debug[GID + trial + 1] = gresult.grand2; } } #endif // use gaussian random number (standard normal distributed) else { gresult = gaussrand (&localState); } value = black_scholes_value (config, gresult.grand1); } else { value = black_scholes_value (config, gresult.grand2); } // we need to keep origianl trial values for calculatng standard deviation // for current calculation, we use trials // Also, to prevent overflow caused by adding, divide the value by M in advance means[TID] += value/config.M; cudaTrials[GID + trial] = value; } for(unsigned int stride = blockDim.x>>1; stride > 0; stride >>= 1) { __syncthreads(); if (TID < stride) means[TID] += means[TID + stride]; } if(TID == 0) { blockMeans[blockIdx.x] = means[0]; } } __device__ void trunc(double* target) { if (*target < 0.0000000005 && *target > 0) *target = 0.0; else if (*target > -0.0000000005 && *target < 0) *target = 0.0; } #ifdef __GOGO_DEBUG__ __global__ void black_scholes_variance_kernel(const long M, const double mean, double* cudaTrials, double* cudaVariances, double* debug) { #else __global__ void black_scholes_variance_kernel(const long M, const double mean, double* cudaTrials, double* cudaVariances) { #endif __shared__ double variances[WINDOW_WIDTH]; const long LOOP_SIZE = (long)1 < (M / (BLOCK_SIZE * WINDOW_WIDTH)) ? M / (BLOCK_SIZE * WINDOW_WIDTH) : 1; const unsigned int GID = (blockIdx.x * blockDim.x) * LOOP_SIZE + threadIdx.x * LOOP_SIZE; const unsigned int TID = threadIdx.x; // Do the Black-Scholes iterations variances[TID] = 0; for(long trial = 0; trial < LOOP_SIZE; trial++) { double v = cudaTrials[GID + trial]; v = v - mean; // Meaningless value such as 1.1E-15 could lead invalid result // when number of trial is so high. Thus, truncate all after the 10th // decimal place. Even though we truncate them, the result still in // acceptable valid range trunc(&v); variances[TID] += (v * v) / (double)(M-1); #ifdef __GOGO_DEBUG__ debug[GID + trial] = v; #endif } for(unsigned int stride = WINDOW_WIDTH>>1; stride > 0; stride >>= 1) { __syncthreads(); if (stride > TID) variances[TID] += variances[TID + stride]; } if(TID == 0) { cudaVariances[blockIdx.x] = variances[0]; } } #ifdef __GOGO_DEBUG__ Result black_scholes(double* cudafixedRands, BSConfig config) { #else Result black_scholes(BSConfig config) { #endif Result result; double* means = new double[config.totalNumOfBlocks()]; double conf_width = 0.0; double t1, t2; assert (config.M > 0); long size = config.M * sizeof(double); dim3 dimGrid(config.totalNumOfBlocks()); dim3 dimBlock(WINDOW_WIDTH); // part5_start t1 = get_seconds(); curandState* randStates; cutilSafeCall(cudaMalloc((void **) &randStates, config.totalNumOfThread() * sizeof(curandState))); setup_rnd_kernel<<<dimGrid, dimBlock>>>(randStates, time(NULL)); t2 = get_seconds(); result.init_seeds_setup_time = t2 - t1; // part5_end // part3_begin t1 = get_seconds(); double* blockMeans; cutilSafeCall(cudaMalloc((void**) &blockMeans, config.totalNumOfBlocks() * sizeof(double))); double* cudaTrials; cutilSafeCall(cudaMalloc((void**) &cudaTrials, size)); #ifdef __GOGO_DEBUG__ double* hostDebug = new double[config.M]; double* cudaDebug; cutilSafeCall(cudaMalloc((void**) &cudaDebug, size)); black_scholes_kernel<<<dimGrid, dimBlock>>>(blockMeans, cudaTrials, randStates, cudafixedRands, cudaDebug, config); #else black_scholes_kernel<<<dimGrid, dimBlock>>>(blockMeans, cudaTrials, randStates, config); #endif cutilSafeCall(cudaMemcpy(means, blockMeans, config.totalNumOfBlocks() * sizeof(double), cudaMemcpyDeviceToHost)); #ifdef __GOGO_DEBUG__ if (config.DEBUG_LEVEL == 2) { cudaMemcpy(hostDebug, cudaDebug, size, cudaMemcpyDeviceToHost); for (int i = 0; i < config.M; i++) { if(i < 10 || i > (config.M - 10)) printf("RND[%d]: %lf\n", i, hostDebug[i]); } puts("\n"); for (int i = 0; i < config.totalNumOfBlocks(); i++) { if(i < 10 || i > (config.M - 10)) printf("MEAN[%d]: %lf\n", i, means[i]); } puts(""); double* t = new double[config.M]; cutilSafeCall(cudaMemcpy(t, cudaTrials, size, cudaMemcpyDeviceToHost)); for (int i = 0; i < config.M; i++) { if(i < 10 || i > (config.M - 10)) printf("TRIAL[%d]: %lf\n", i, t[i]); } puts(""); delete [] t; } #endif t2 = get_seconds(); result.black_sholes_kernel_time = t2 - t1; // part3_end // part4_begin t1 = get_seconds(); result.mean = 0.0; // combine results from each blocks for (long i = 0; i < config.totalNumOfBlocks(); i++) { result.mean += means[i]; } result.stddev = black_scholes_stddev(result.mean, config, cudaTrials); t2 = get_seconds(); result.calc_stddev_time = t2 - t1; // part4_end // confidence interval conf_width = 1.96 * result.stddev / sqrt ((double) config.M); result.min = result.mean - conf_width; result.max = result.mean + conf_width; /* clean up */ #ifdef __GOGO_DEBUG__ cudaFree(cudaDebug); #endif cudaFree(cudaTrials); cudaFree(blockMeans); cudaFree(randStates); #ifdef __GOGO_DEBUG__ if(hostDebug != NULL) delete [] hostDebug; #endif if(means != NULL) delete [] means; return result; } double black_scholes_stddev (const double mean, BSConfig config, double* cudaTrials) { double* variances = new double[config.totalNumOfBlocks()]; double* cudaVariances; cutilSafeCall(cudaMalloc((void**) &cudaVariances, (config.totalNumOfBlocks()) * sizeof(double))); dim3 dimGrid(config.totalNumOfBlocks()); dim3 dimBlock(WINDOW_WIDTH); double variance = 0.0; #ifdef __GOGO_DEBUG__ double* debug = new double[config.M]; double* cudaDebug; cutilSafeCall(cudaMalloc((void**) &cudaDebug, config.M * sizeof(double))); black_scholes_variance_kernel<<<dimGrid, dimBlock>>>(config.M, mean, cudaTrials, cudaVariances, cudaDebug); cutilSafeCall(cudaMemcpy(variances, cudaVariances, config.totalNumOfBlocks() * sizeof(double), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(debug, cudaDebug, config.M * sizeof(double), cudaMemcpyDeviceToHost)); for(long idx=0; idx<config.M; idx++) { if(config.DEBUG_LEVEL == 2) { if(idx < 10 || idx > (config.M - 10)) cout << "THR_VAR[" << idx << "]: " << debug[idx] << endl; } } cout << endl; cudaFree(cudaDebug); delete [] debug; #else black_scholes_variance_kernel<<<dimGrid, dimBlock>>>(config.M, mean, cudaTrials, cudaVariances); #endif cutilSafeCall(cudaMemcpy(variances, cudaVariances, config.totalNumOfBlocks() * sizeof(double), cudaMemcpyDeviceToHost)); for(long idx=0; idx<config.totalNumOfBlocks(); idx++) { #ifdef __GOGO_DEBUG__ if(config.DEBUG_LEVEL == 2) cout << "BLK_VARI[" << idx << "]: " << variances[idx] << endl; #endif variance += variances[idx]; } #ifdef __GOGO_DEBUG__ cout << endl; #endif cudaFree(cudaVariances); delete [] variances; return sqrt(variance); }
3679513d3a28377b397149da344628ed4b841cfb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Drawer.h" #include "CudaError.hpp" __global__ void redrawOnDevice(GameOfLife game, uchar4* pixels) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(game.withinBounds(i,j)){ int index = game.findIndex(i,j); // RED if(game.cellDied(i,j)) pixels[index].x = 127; else if(pixels[index].x > 0) --pixels[index].x; // GREEN pixels[index].y = (game.isAliveOnDevice(i,j)) ? 255 : 0; // BLUE if(game.isAliveOnDevice(i,j) && pixels[index].z < 255) ++pixels[index].z; // ALPHA pixels[index].w = 255; } } Drawer::Drawer(GameOfLife& argGame) : game(argGame), speed(1) {} Drawer::~Drawer() { }; void Drawer::redraw(uchar4* pixels, int ticks) { dim3 threadGrid(16,16); dim3 blockGrid(game.getWidth()/16+1,game.getHeight()/16+1); for(int i = 0; i < speed; ++i) { game.evolve(); hipLaunchKernelGGL(( redrawOnDevice), dim3(blockGrid),dim3(threadGrid), 0, 0, game, pixels); } }
3679513d3a28377b397149da344628ed4b841cfb.cu
#include "Drawer.h" #include "CudaError.hpp" __global__ void redrawOnDevice(GameOfLife game, uchar4* pixels) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(game.withinBounds(i,j)){ int index = game.findIndex(i,j); // RED if(game.cellDied(i,j)) pixels[index].x = 127; else if(pixels[index].x > 0) --pixels[index].x; // GREEN pixels[index].y = (game.isAliveOnDevice(i,j)) ? 255 : 0; // BLUE if(game.isAliveOnDevice(i,j) && pixels[index].z < 255) ++pixels[index].z; // ALPHA pixels[index].w = 255; } } Drawer::Drawer(GameOfLife& argGame) : game(argGame), speed(1) {} Drawer::~Drawer() { }; void Drawer::redraw(uchar4* pixels, int ticks) { dim3 threadGrid(16,16); dim3 blockGrid(game.getWidth()/16+1,game.getHeight()/16+1); for(int i = 0; i < speed; ++i) { game.evolve(); redrawOnDevice<<<blockGrid,threadGrid>>>(game, pixels); } }
441dfed20c0ebac66046e14a7fd2e889d3648500.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void constrain_weight_updates_kernel(int N, float coef, float *weights_gpu, float *weight_updates_gpu) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < N) { const float w = weights_gpu[i]; const float wu = weight_updates_gpu[i]; const float wu_sign = (wu == 0) ? 0 : (fabs(wu) / wu); const float abs_limit = fabs(w * coef); if (fabs(wu) > abs_limit) weight_updates_gpu[i] = abs_limit * wu_sign; } }
441dfed20c0ebac66046e14a7fd2e889d3648500.cu
#include "includes.h" __global__ void constrain_weight_updates_kernel(int N, float coef, float *weights_gpu, float *weight_updates_gpu) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < N) { const float w = weights_gpu[i]; const float wu = weight_updates_gpu[i]; const float wu_sign = (wu == 0) ? 0 : (fabs(wu) / wu); const float abs_limit = fabs(w * coef); if (fabs(wu) > abs_limit) weight_updates_gpu[i] = abs_limit * wu_sign; } }
e5005e53a3dadbcafe55cd1f684c29fedc3d7253.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/hip/TensorTopK.h> #include <ATen/core/TensorBase.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/hip/ScanUtils.cuh> #include <ATen/hip/AsmUtils.cuh> #include <ATen/hip/DeviceUtils.cuh> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/native/hip/SortingRadixSelect.cuh> #include <ATen/native/hip/SortUtils.cuh> #include <ATen/hip/cub.cuh> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include <c10/macros/Macros.h> using namespace at::native; namespace at { namespace native { namespace sbtopk { // single_block_topk template <typename T> struct AddOp { __device__ __forceinline__ T operator()(T const &lhs, T const &rhs) { return (lhs + rhs); } }; template <typename T, typename IndexType, int Dim, bool WithKthValues> C10_LAUNCH_BOUNDS_1(1024) __global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` bool largest, IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride, T* kthValues) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of IndexType #if defined(USE_ROCM) __shared__ int smem[64]; #else __shared__ int smem[32]; // one per each warp, up to warp limit #endif IndexType slice = getLinearBlockId<IndexType>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice IndexType sliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input); IndexType topKSliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK); IndexType indicesSliceStartIndex = at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices); T* inputSliceStart = &input.data[sliceStartIndex]; T* topKSliceStart = &topK.data[topKSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input T topKValue; if (WithKthValues){ topKValue = kthValues[slice]; } else { topKValue = static_cast<T>(0); radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType>( inputSliceStart, outputSliceSize, largest, inputSliceSize, inputWithinSliceStride, smem, &topKValue); } const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue); // Every value that is strictly less/greater than `pattern` // (depending on sort dir) in sorted int format is in the top-K. // The top-K value itself might not be unique. // // Since there are a variable number of elements that we see that // are within the top-k, we don't know at what index to write out // the resulting values. // In order to get this, we perform an exclusive prefix sum of // `hasTopK`. This will return the resulting index into which we // need to write the result, if a thread has a result. // All threads need to participate in the loop and the prefix sum, // but not necessarily in the load; hence loop bounds being rounded // up to a multiple of the block dim. IndexType numIterations = round_up(inputSliceSize, (IndexType) blockDim.x); IndexType writeIndexStart = 0; for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK; if (largest) { hasTopK = inRange && (convertedV > topKConverted); } else { hasTopK = inRange && (convertedV < topKConverted); } int index; int carry; at::cuda::exclusiveBinaryPrefixScan<int, true>( smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } writeIndexStart += carry; } // We need to fill in the rest with actual == top-K values. // The number that we need is outputSliceSize - // writeIndexStart. There might be more than that number available, // in which case we have to choose the first seen set. We do this // via a prefix sum to calculate indices for writing results. CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart); IndexType topKRemaining = (outputSliceSize - writeIndexStart); for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK = inRange && (convertedV == topKConverted); int index; int carry; at::cuda::exclusiveBinaryPrefixScan<int, true>( smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK && index < topKRemaining) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } if (carry >= topKRemaining) { break; } topKRemaining -= carry; writeIndexStart += carry; } }; template <typename T, typename IndexType, int Dim> void launch( at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` bool largest, IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride) { dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk"); dim3 block(::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)C10_WARP_SIZE) * (int64_t)C10_WARP_SIZE, (int64_t)1024)); hipLaunchKernelGGL(( gatherTopK<T, IndexType, Dim, /* WithKthValues= */false>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input, inputSliceSize, outputSliceSize, largest, numInputSlices, inputWithinSliceStride, topK, topKWithinSliceStride, indices, indicesWithinSliceStride, nullptr); C10_HIP_KERNEL_LAUNCH_CHECK(); } } // namespace sbtopk namespace mbtopk { // multi_block_topk constexpr int BLOCK_THREADS = 256; // Over what radix we are selecting values constexpr int RADIX_BITS = 8; constexpr int RADIX_DIGITS = 1 << RADIX_BITS; // 2 ^ RADIX_BITS constexpr int RADIX_MASK = (RADIX_DIGITS - 1); static_assert(RADIX_DIGITS <= BLOCK_THREADS, "radixFindKthValues kernel requires RADIX_DIGITS <= BLOCK_THREADS"); template <typename T, typename IndexType> __global__ void fill(T* x, T value, IndexType size) { IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; for (IndexType i = idx; i < size; i += gridDim.x * blockDim.x) { x[i] = value; } } // find the kth smallest value, // for largest topk, k_to_find = slice_size - k + 1 template <typename T, typename IndexType, typename Bitwise, int Dim> C10_LAUNCH_BOUNDS_1(BLOCK_THREADS) __global__ void radixFindKthValues( at::cuda::detail::TensorInfo<T, IndexType> input, IndexType slice_size, IndexType* ks_to_find, // size: num_slices IndexType num_slices, IndexType withinSliceStride, int current_bit, int items_per_thread, IndexType blocks_per_slice, Bitwise desiredMask, // outputs uint32_t* semaphores, // size: num_slices Bitwise* desires, // size: num_slices IndexType* counts, // size: num_slices * blocks_per_slice * radix_digits T* kthValues // size: num_slices, only write when current_bit reaches 0 ) { int items_per_block = items_per_thread * BLOCK_THREADS; int tidx = threadIdx.x; IndexType block_idx = getLinearBlockId<IndexType>(); IndexType slice_idx = block_idx / blocks_per_slice; IndexType blk_idx_in_slice = block_idx % blocks_per_slice; if (slice_idx >= num_slices) { return; } Bitwise desired = desires[slice_idx]; IndexType k_to_find = ks_to_find[slice_idx]; IndexType slice_start_index = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice_idx, input); T* data = &input.data[slice_start_index]; typedef hipcub::BlockScan<IndexType, BLOCK_THREADS> BlockScan; union __align__(16) TempStorage { uint32_t digit_counters[RADIX_DIGITS]; IndexType digit_count_cumsum[RADIX_DIGITS]; // only used if this it the last block for this slice typename BlockScan::TempStorage scan_storage; }; __shared__ TempStorage temp_storage; // fill digit_counters with zeros if (tidx < RADIX_DIGITS) { temp_storage.digit_counters[tidx] = 0; } __syncthreads(); items_per_thread = (blk_idx_in_slice + 1 < blocks_per_slice) ? items_per_thread : at::ceil_div((int64_t)(slice_size - blk_idx_in_slice * items_per_block), (int64_t)BLOCK_THREADS); // collect digit counts and store in shared memorey for (int i = 0; i < items_per_thread; ++i) { // Find the start offset for this slice IndexType idx = blk_idx_in_slice * items_per_block + i * BLOCK_THREADS + tidx; if (idx < slice_size) { idx *= withinSliceStride; Bitwise val = TopKTypeConfig<T>::convert(doLdg(&data[idx])); bool has_val = ((val & desiredMask) == (desired & desiredMask)); Bitwise digit = at::cuda::Bitfield<Bitwise>::getBitfield(val, current_bit, RADIX_BITS); if (has_val) { atomicAdd(&temp_storage.digit_counters[digit], 1); } } } __syncthreads(); // load digit counter to register, one digit per thread static_assert(RADIX_DIGITS <= BLOCK_THREADS, "this kernel requires RADIX_DIGITS <= BLOCK_THREADS"); IndexType digit_count = 0; if (tidx < RADIX_DIGITS) { digit_count = temp_storage.digit_counters[tidx]; } // if blocks_per_slice == 1, there is no need to do cross-block reduction // in this case counts saved at registers instead of global memory if (blocks_per_slice > 1) { if (tidx < RADIX_DIGITS) { counts[block_idx * RADIX_DIGITS + tidx] = digit_count; } __threadfence(); // make sure writes are globally visible __syncthreads(); // make sure all writes are finished before update semaphores } // the last block of each slice accumulates counters from multiple blocks and updates desired and ks_to_find __shared__ bool s_is_last_block_done; if (tidx == 0) { if (blocks_per_slice == 1) { s_is_last_block_done = true; } else { uint32_t blocks_finished_old = atomicAdd(&semaphores[slice_idx], 1); s_is_last_block_done = (blocks_finished_old == blocks_per_slice - 1); } } __syncthreads(); if (!s_is_last_block_done) return; // accumulates counters from multiple blocks if (tidx < RADIX_DIGITS && blocks_per_slice > 1) { digit_count = 0; for (int blk = 0; blk < blocks_per_slice; ++blk) { digit_count += counts[(slice_idx * blocks_per_slice + blk) * RADIX_DIGITS + tidx]; } } // compute the block-wide inclusive prefix sum IndexType digit_count_cumsum; BlockScan(temp_storage.scan_storage).InclusiveSum(digit_count, digit_count_cumsum); __syncthreads(); // every thread also need the perfix_sum of it's left value for comparison, so save a copy in shared mem if (tidx < RADIX_DIGITS) { temp_storage.digit_count_cumsum[tidx] = digit_count_cumsum; } __syncthreads(); if (tidx < RADIX_DIGITS) { IndexType digit_count_cumsum_left = (tidx == 0) ? 0 : temp_storage.digit_count_cumsum[tidx - 1]; // if not the last pass: update desired and ks_to_find // if last pass: write out the kth value if (digit_count_cumsum_left < k_to_find && k_to_find <= digit_count_cumsum) { desired = at::cuda::Bitfield<Bitwise>::setBitfield(desired, tidx, current_bit, RADIX_BITS); if (current_bit > 0) { desires[slice_idx] = desired; ks_to_find[slice_idx] = k_to_find - digit_count_cumsum_left; } else { kthValues[slice_idx] = TopKTypeConfig<T>::deconvert(desired); } } } // reset semaphores for the next pass if (tidx == 0) { semaphores[slice_idx] = 0; } }; int get_items_per_thread(uint64_t num_slices, uint64_t slice_size) { // occupancy of this kernel is limited by registers per threads constexpr int REGS_PER_THREAD = 40; // from nsight launch statistics constexpr int REGS_PER_BLOCK = REGS_PER_THREAD * BLOCK_THREADS; hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties(); int mpc = prop->multiProcessorCount; #if defined(USE_ROCM) int regs_per_mp = prop->regsPerBlock; int max_blocks_per_mp = 32; #else int regs_per_mp = prop->regsPerMultiprocessor; #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000 int max_blocks_per_mp = prop->maxBlocksPerMultiProcessor; #else int max_blocks_per_mp = 32; #endif #endif int blocks_per_mp = ::min(regs_per_mp / REGS_PER_BLOCK, max_blocks_per_mp); int64_t items_per_thread = at::ceil_div((int64_t)(slice_size * num_slices), (int64_t)(mpc * blocks_per_mp * BLOCK_THREADS)); items_per_thread = ::max(4, ::min((int)items_per_thread, 64)); // clamp to (4, 64) return items_per_thread; } template <typename T, typename IndexType, int Dim> void launch( at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` bool largest, IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride) { // configure items_per_thread based on device architecture and input size int items_per_thread = get_items_per_thread(numInputSlices, inputSliceSize); int items_per_block = items_per_thread * BLOCK_THREADS; using Bitwise = typename TopKTypeConfig<T>::RadixType; int64_t blocks_per_slice = at::ceil_div((int64_t)inputSliceSize, (int64_t)items_per_block); int64_t num_blocks = numInputSlices * blocks_per_slice; // temporary storage auto& allocator = *c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get(); auto kthValues_buffer = allocator.allocate(numInputSlices * sizeof(T)); T* kthValues = reinterpret_cast<T*>(kthValues_buffer.get()); TORCH_CHECK(blocks_per_slice <= std::numeric_limits<uint32_t>::max(), "blocks_per_slice larger than uint32 maximum is not supported"); auto semaphores_buffer = allocator.allocate(numInputSlices * sizeof(uint32_t)); uint32_t* semaphores = reinterpret_cast<uint32_t*>(semaphores_buffer.get()); AT_CUDA_CHECK(hipMemsetAsync(semaphores, 0, numInputSlices * sizeof(uint32_t), c10::hip::getCurrentHIPStreamMasqueradingAsCUDA())); auto ks_to_find_buffer = allocator.allocate(numInputSlices * sizeof(IndexType)); IndexType* ks_to_find = reinterpret_cast<IndexType*>(ks_to_find_buffer.get()); IndexType k_to_find = largest ? inputSliceSize - outputSliceSize + 1: outputSliceSize; hipLaunchKernelGGL(( fill<IndexType>), dim3(::min((numInputSlices + 511) / 512, (IndexType)65535)), dim3(512), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), ks_to_find, k_to_find, numInputSlices); C10_HIP_KERNEL_LAUNCH_CHECK(); auto desired_buffer = allocator.allocate(numInputSlices * sizeof(Bitwise)); Bitwise* desired = reinterpret_cast<Bitwise*>(desired_buffer.get()); auto counts_buffer = allocator.allocate(num_blocks * RADIX_DIGITS * sizeof(IndexType)); IndexType* counts = reinterpret_cast<IndexType*>(counts_buffer.get()); Bitwise desiredMask = 0; dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(num_blocks, grid), "Too many slices for topk"); dim3 block(BLOCK_THREADS); // iterate radix bits for multiple passes for (int current_bit = sizeof(T) * 8 - RADIX_BITS; current_bit >= 0; current_bit -= RADIX_BITS) { hipLaunchKernelGGL(( radixFindKthValues<T, IndexType, Bitwise, Dim>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input, inputSliceSize, ks_to_find, numInputSlices, inputWithinSliceStride, current_bit, items_per_thread, blocks_per_slice, desiredMask, semaphores, desired, counts, kthValues); C10_HIP_KERNEL_LAUNCH_CHECK(); desiredMask = at::cuda::Bitfield<Bitwise>::setBitfield(desiredMask, RADIX_MASK, current_bit, RADIX_BITS); } // Find topk values based on kth values { dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk"); dim3 block(::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)C10_WARP_SIZE) * (int64_t)C10_WARP_SIZE, (int64_t)1024)); hipLaunchKernelGGL(( sbtopk::gatherTopK<T, IndexType, Dim, /* WithKthValues= */true>), dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input, inputSliceSize, outputSliceSize, largest, numInputSlices, inputWithinSliceStride, topK, topKWithinSliceStride, indices, indicesWithinSliceStride, kthValues); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } // namespace mbtopk bool should_use_multiblock(int64_t num_slices, int64_t slice_size) { // This heuristics is based on the experiment in https://github.com/pytorch/pytorch/pull/71081 return (num_slices <= 400 && slice_size >= 5000) || (num_slices >= 400 && num_slices < 4000 && slice_size >= 1000) || (num_slices >= 4000 && slice_size >= 300); } void launch_gather_topk_kernel( const TensorBase& self, int64_t k, int64_t dim, bool largest, const TensorBase& values, const TensorBase& indices) { int numDims = self.dim(); numDims = numDims == 0 ? 1 : numDims; TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions"); int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim); auto input = self.contiguous(); // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, LAUNCH_FUNCTION_NAME) \ LAUNCH_FUNCTION_NAME<scalar_t, INDEX_T, DIM>( \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ largest, \ static_cast<INDEX_T>(numInputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); #define RUN_MB(INDEX_T, DIM) \ if (should_use_multiblock(numInputSlices, sliceSize)) { \ RUN_K(INDEX_T, DIM, mbtopk::launch); \ } else { \ RUN_K(INDEX_T, DIM, sbtopk::launch); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_MB(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_MB(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_MB(INDEX_T, 3); \ } else { \ RUN_MB(INDEX_T, -1); \ } #define RUN_T(INDEX_T) \ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \ at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \ at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \ /* tensorInfoLegacyIfScalar*/ \ if (!input.dim()) { \ inputInfo.dims = 1; \ inputInfo.sizes[0] = 1; \ inputInfo.strides[0] = 1; \ topKInfo.dims = 1; \ topKInfo.sizes[0] = 1; \ topKInfo.strides[0] = 1; \ indicesInfo.dims = 1; \ indicesInfo.sizes[0] = 1; \ indicesInfo.strides[0] = 1; \ } \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ /* stash the stride of dim because it can be accidentally collapsed */ \ auto strideTopK = topKInfo.strides[dim]; \ auto strideIndices = indicesInfo.strides[dim]; \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ /* restore stride in case it was collapsed */ \ topKInfo.strides[collapseTopKDim] = strideTopK; \ indicesInfo.strides[collapseIndicesDim] = strideIndices; \ int64_t numInputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ numInputSlices *= inputInfo.sizes[i]; \ } \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); \ }); // the below is safe with 0-dimensional tensors because it is based on // TensorInfo which implicitly expands to 1-dimensional. if (input.numel() > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (at::cuda::detail::canUse32BitIndexMath(input) && at::cuda::detail::canUse32BitIndexMath(values) && at::cuda::detail::canUse32BitIndexMath(indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_K } } // at::native } // at
e5005e53a3dadbcafe55cd1f684c29fedc3d7253.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/cuda/TensorTopK.h> #include <ATen/core/TensorBase.h> #include <ATen/ceil_div.h> #include <ATen/Dispatch.h> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/cuda/ScanUtils.cuh> #include <ATen/cuda/AsmUtils.cuh> #include <ATen/cuda/DeviceUtils.cuh> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/native/cuda/SortingRadixSelect.cuh> #include <ATen/native/cuda/SortUtils.cuh> #include <ATen/cuda/cub.cuh> #include <c10/cuda/CUDACachingAllocator.h> #include <c10/macros/Macros.h> using namespace at::native; namespace at { namespace native { namespace sbtopk { // single_block_topk template <typename T> struct AddOp { __device__ __forceinline__ T operator()(T const &lhs, T const &rhs) { return (lhs + rhs); } }; template <typename T, typename IndexType, int Dim, bool WithKthValues> C10_LAUNCH_BOUNDS_1(1024) __global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` bool largest, IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride, T* kthValues) { // Indices are limited to integer fp precision, so counts can fit in // int32, regardless of IndexType #if defined(USE_ROCM) __shared__ int smem[64]; #else __shared__ int smem[32]; // one per each warp, up to warp limit #endif IndexType slice = getLinearBlockId<IndexType>(); if (slice >= numInputSlices) { return; } // Find the start offset for our slice IndexType sliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input); IndexType topKSliceStartIndex = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK); IndexType indicesSliceStartIndex = at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices); T* inputSliceStart = &input.data[sliceStartIndex]; T* topKSliceStart = &topK.data[topKSliceStartIndex]; int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex]; // Find the k-th highest element in our input T topKValue; if (WithKthValues){ topKValue = kthValues[slice]; } else { topKValue = static_cast<T>(0); radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType>( inputSliceStart, outputSliceSize, largest, inputSliceSize, inputWithinSliceStride, smem, &topKValue); } const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue); // Every value that is strictly less/greater than `pattern` // (depending on sort dir) in sorted int format is in the top-K. // The top-K value itself might not be unique. // // Since there are a variable number of elements that we see that // are within the top-k, we don't know at what index to write out // the resulting values. // In order to get this, we perform an exclusive prefix sum of // `hasTopK`. This will return the resulting index into which we // need to write the result, if a thread has a result. // All threads need to participate in the loop and the prefix sum, // but not necessarily in the load; hence loop bounds being rounded // up to a multiple of the block dim. IndexType numIterations = round_up(inputSliceSize, (IndexType) blockDim.x); IndexType writeIndexStart = 0; for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK; if (largest) { hasTopK = inRange && (convertedV > topKConverted); } else { hasTopK = inRange && (convertedV < topKConverted); } int index; int carry; at::cuda::exclusiveBinaryPrefixScan<int, true>( smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } writeIndexStart += carry; } // We need to fill in the rest with actual == top-K values. // The number that we need is outputSliceSize - // writeIndexStart. There might be more than that number available, // in which case we have to choose the first seen set. We do this // via a prefix sum to calculate indices for writing results. CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart); IndexType topKRemaining = (outputSliceSize - writeIndexStart); for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) { bool inRange = (i < inputSliceSize); T v = inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0); const auto convertedV = at::native::TopKTypeConfig<T>::convert(v); bool hasTopK = inRange && (convertedV == topKConverted); int index; int carry; at::cuda::exclusiveBinaryPrefixScan<int, true>( smem, hasTopK, &index, &carry, AddOp<int>()); if (hasTopK && index < topKRemaining) { int writeIndex = writeIndexStart + index; CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize); IndexType topKOffset = writeIndex * topKWithinSliceStride; IndexType indexOffset = writeIndex * indicesWithinSliceStride; topKSliceStart[topKOffset] = v; indicesSliceStart[indexOffset] = i; } if (carry >= topKRemaining) { break; } topKRemaining -= carry; writeIndexStart += carry; } }; template <typename T, typename IndexType, int Dim> void launch( at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` bool largest, IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride) { dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk"); dim3 block(std::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)C10_WARP_SIZE) * (int64_t)C10_WARP_SIZE, (int64_t)1024)); gatherTopK<T, IndexType, Dim, /* WithKthValues= */false><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( input, inputSliceSize, outputSliceSize, largest, numInputSlices, inputWithinSliceStride, topK, topKWithinSliceStride, indices, indicesWithinSliceStride, nullptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } // namespace sbtopk namespace mbtopk { // multi_block_topk constexpr int BLOCK_THREADS = 256; // Over what radix we are selecting values constexpr int RADIX_BITS = 8; constexpr int RADIX_DIGITS = 1 << RADIX_BITS; // 2 ^ RADIX_BITS constexpr int RADIX_MASK = (RADIX_DIGITS - 1); static_assert(RADIX_DIGITS <= BLOCK_THREADS, "radixFindKthValues kernel requires RADIX_DIGITS <= BLOCK_THREADS"); template <typename T, typename IndexType> __global__ void fill(T* x, T value, IndexType size) { IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; for (IndexType i = idx; i < size; i += gridDim.x * blockDim.x) { x[i] = value; } } // find the kth smallest value, // for largest topk, k_to_find = slice_size - k + 1 template <typename T, typename IndexType, typename Bitwise, int Dim> C10_LAUNCH_BOUNDS_1(BLOCK_THREADS) __global__ void radixFindKthValues( at::cuda::detail::TensorInfo<T, IndexType> input, IndexType slice_size, IndexType* ks_to_find, // size: num_slices IndexType num_slices, IndexType withinSliceStride, int current_bit, int items_per_thread, IndexType blocks_per_slice, Bitwise desiredMask, // outputs uint32_t* semaphores, // size: num_slices Bitwise* desires, // size: num_slices IndexType* counts, // size: num_slices * blocks_per_slice * radix_digits T* kthValues // size: num_slices, only write when current_bit reaches 0 ) { int items_per_block = items_per_thread * BLOCK_THREADS; int tidx = threadIdx.x; IndexType block_idx = getLinearBlockId<IndexType>(); IndexType slice_idx = block_idx / blocks_per_slice; IndexType blk_idx_in_slice = block_idx % blocks_per_slice; if (slice_idx >= num_slices) { return; } Bitwise desired = desires[slice_idx]; IndexType k_to_find = ks_to_find[slice_idx]; IndexType slice_start_index = at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice_idx, input); T* data = &input.data[slice_start_index]; typedef cub::BlockScan<IndexType, BLOCK_THREADS> BlockScan; union __align__(16) TempStorage { uint32_t digit_counters[RADIX_DIGITS]; IndexType digit_count_cumsum[RADIX_DIGITS]; // only used if this it the last block for this slice typename BlockScan::TempStorage scan_storage; }; __shared__ TempStorage temp_storage; // fill digit_counters with zeros if (tidx < RADIX_DIGITS) { temp_storage.digit_counters[tidx] = 0; } __syncthreads(); items_per_thread = (blk_idx_in_slice + 1 < blocks_per_slice) ? items_per_thread : at::ceil_div((int64_t)(slice_size - blk_idx_in_slice * items_per_block), (int64_t)BLOCK_THREADS); // collect digit counts and store in shared memorey for (int i = 0; i < items_per_thread; ++i) { // Find the start offset for this slice IndexType idx = blk_idx_in_slice * items_per_block + i * BLOCK_THREADS + tidx; if (idx < slice_size) { idx *= withinSliceStride; Bitwise val = TopKTypeConfig<T>::convert(doLdg(&data[idx])); bool has_val = ((val & desiredMask) == (desired & desiredMask)); Bitwise digit = at::cuda::Bitfield<Bitwise>::getBitfield(val, current_bit, RADIX_BITS); if (has_val) { atomicAdd(&temp_storage.digit_counters[digit], 1); } } } __syncthreads(); // load digit counter to register, one digit per thread static_assert(RADIX_DIGITS <= BLOCK_THREADS, "this kernel requires RADIX_DIGITS <= BLOCK_THREADS"); IndexType digit_count = 0; if (tidx < RADIX_DIGITS) { digit_count = temp_storage.digit_counters[tidx]; } // if blocks_per_slice == 1, there is no need to do cross-block reduction // in this case counts saved at registers instead of global memory if (blocks_per_slice > 1) { if (tidx < RADIX_DIGITS) { counts[block_idx * RADIX_DIGITS + tidx] = digit_count; } __threadfence(); // make sure writes are globally visible __syncthreads(); // make sure all writes are finished before update semaphores } // the last block of each slice accumulates counters from multiple blocks and updates desired and ks_to_find __shared__ bool s_is_last_block_done; if (tidx == 0) { if (blocks_per_slice == 1) { s_is_last_block_done = true; } else { uint32_t blocks_finished_old = atomicAdd(&semaphores[slice_idx], 1); s_is_last_block_done = (blocks_finished_old == blocks_per_slice - 1); } } __syncthreads(); if (!s_is_last_block_done) return; // accumulates counters from multiple blocks if (tidx < RADIX_DIGITS && blocks_per_slice > 1) { digit_count = 0; for (int blk = 0; blk < blocks_per_slice; ++blk) { digit_count += counts[(slice_idx * blocks_per_slice + blk) * RADIX_DIGITS + tidx]; } } // compute the block-wide inclusive prefix sum IndexType digit_count_cumsum; BlockScan(temp_storage.scan_storage).InclusiveSum(digit_count, digit_count_cumsum); __syncthreads(); // every thread also need the perfix_sum of it's left value for comparison, so save a copy in shared mem if (tidx < RADIX_DIGITS) { temp_storage.digit_count_cumsum[tidx] = digit_count_cumsum; } __syncthreads(); if (tidx < RADIX_DIGITS) { IndexType digit_count_cumsum_left = (tidx == 0) ? 0 : temp_storage.digit_count_cumsum[tidx - 1]; // if not the last pass: update desired and ks_to_find // if last pass: write out the kth value if (digit_count_cumsum_left < k_to_find && k_to_find <= digit_count_cumsum) { desired = at::cuda::Bitfield<Bitwise>::setBitfield(desired, tidx, current_bit, RADIX_BITS); if (current_bit > 0) { desires[slice_idx] = desired; ks_to_find[slice_idx] = k_to_find - digit_count_cumsum_left; } else { kthValues[slice_idx] = TopKTypeConfig<T>::deconvert(desired); } } } // reset semaphores for the next pass if (tidx == 0) { semaphores[slice_idx] = 0; } }; int get_items_per_thread(uint64_t num_slices, uint64_t slice_size) { // occupancy of this kernel is limited by registers per threads constexpr int REGS_PER_THREAD = 40; // from nsight launch statistics constexpr int REGS_PER_BLOCK = REGS_PER_THREAD * BLOCK_THREADS; cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties(); int mpc = prop->multiProcessorCount; #if defined(USE_ROCM) int regs_per_mp = prop->regsPerBlock; int max_blocks_per_mp = 32; #else int regs_per_mp = prop->regsPerMultiprocessor; #if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 int max_blocks_per_mp = prop->maxBlocksPerMultiProcessor; #else int max_blocks_per_mp = 32; #endif #endif int blocks_per_mp = std::min(regs_per_mp / REGS_PER_BLOCK, max_blocks_per_mp); int64_t items_per_thread = at::ceil_div((int64_t)(slice_size * num_slices), (int64_t)(mpc * blocks_per_mp * BLOCK_THREADS)); items_per_thread = std::max(4, std::min((int)items_per_thread, 64)); // clamp to (4, 64) return items_per_thread; } template <typename T, typename IndexType, int Dim> void launch( at::cuda::detail::TensorInfo<T, IndexType> input, IndexType inputSliceSize, IndexType outputSliceSize, // aka `k` bool largest, IndexType numInputSlices, IndexType inputWithinSliceStride, at::cuda::detail::TensorInfo<T, IndexType> topK, IndexType topKWithinSliceStride, at::cuda::detail::TensorInfo<int64_t, IndexType> indices, IndexType indicesWithinSliceStride) { // configure items_per_thread based on device architecture and input size int items_per_thread = get_items_per_thread(numInputSlices, inputSliceSize); int items_per_block = items_per_thread * BLOCK_THREADS; using Bitwise = typename TopKTypeConfig<T>::RadixType; int64_t blocks_per_slice = at::ceil_div((int64_t)inputSliceSize, (int64_t)items_per_block); int64_t num_blocks = numInputSlices * blocks_per_slice; // temporary storage auto& allocator = *c10::cuda::CUDACachingAllocator::get(); auto kthValues_buffer = allocator.allocate(numInputSlices * sizeof(T)); T* kthValues = reinterpret_cast<T*>(kthValues_buffer.get()); TORCH_CHECK(blocks_per_slice <= std::numeric_limits<uint32_t>::max(), "blocks_per_slice larger than uint32 maximum is not supported"); auto semaphores_buffer = allocator.allocate(numInputSlices * sizeof(uint32_t)); uint32_t* semaphores = reinterpret_cast<uint32_t*>(semaphores_buffer.get()); AT_CUDA_CHECK(cudaMemsetAsync(semaphores, 0, numInputSlices * sizeof(uint32_t), c10::cuda::getCurrentCUDAStream())); auto ks_to_find_buffer = allocator.allocate(numInputSlices * sizeof(IndexType)); IndexType* ks_to_find = reinterpret_cast<IndexType*>(ks_to_find_buffer.get()); IndexType k_to_find = largest ? inputSliceSize - outputSliceSize + 1: outputSliceSize; fill<IndexType><<<std::min((numInputSlices + 511) / 512, (IndexType)65535), 512, 0, c10::cuda::getCurrentCUDAStream()>>>( ks_to_find, k_to_find, numInputSlices); C10_CUDA_KERNEL_LAUNCH_CHECK(); auto desired_buffer = allocator.allocate(numInputSlices * sizeof(Bitwise)); Bitwise* desired = reinterpret_cast<Bitwise*>(desired_buffer.get()); auto counts_buffer = allocator.allocate(num_blocks * RADIX_DIGITS * sizeof(IndexType)); IndexType* counts = reinterpret_cast<IndexType*>(counts_buffer.get()); Bitwise desiredMask = 0; dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(num_blocks, grid), "Too many slices for topk"); dim3 block(BLOCK_THREADS); // iterate radix bits for multiple passes for (int current_bit = sizeof(T) * 8 - RADIX_BITS; current_bit >= 0; current_bit -= RADIX_BITS) { radixFindKthValues<T, IndexType, Bitwise, Dim><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( input, inputSliceSize, ks_to_find, numInputSlices, inputWithinSliceStride, current_bit, items_per_thread, blocks_per_slice, desiredMask, semaphores, desired, counts, kthValues); C10_CUDA_KERNEL_LAUNCH_CHECK(); desiredMask = at::cuda::Bitfield<Bitwise>::setBitfield(desiredMask, RADIX_MASK, current_bit, RADIX_BITS); } // Find topk values based on kth values { dim3 grid; TORCH_INTERNAL_ASSERT(getGridFromTiles(numInputSlices, grid), "Too many slices for topk"); dim3 block(std::min(at::ceil_div((int64_t)inputSliceSize, (int64_t)C10_WARP_SIZE) * (int64_t)C10_WARP_SIZE, (int64_t)1024)); sbtopk::gatherTopK<T, IndexType, Dim, /* WithKthValues= */true><<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( input, inputSliceSize, outputSliceSize, largest, numInputSlices, inputWithinSliceStride, topK, topKWithinSliceStride, indices, indicesWithinSliceStride, kthValues); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } // namespace mbtopk bool should_use_multiblock(int64_t num_slices, int64_t slice_size) { // This heuristics is based on the experiment in https://github.com/pytorch/pytorch/pull/71081 return (num_slices <= 400 && slice_size >= 5000) || (num_slices >= 400 && num_slices < 4000 && slice_size >= 1000) || (num_slices >= 4000 && slice_size >= 300); } void launch_gather_topk_kernel( const TensorBase& self, int64_t k, int64_t dim, bool largest, const TensorBase& values, const TensorBase& indices) { int numDims = self.dim(); numDims = numDims == 0 ? 1 : numDims; TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions"); int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim); auto input = self.contiguous(); // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, LAUNCH_FUNCTION_NAME) \ LAUNCH_FUNCTION_NAME<scalar_t, INDEX_T, DIM>( \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ largest, \ static_cast<INDEX_T>(numInputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); #define RUN_MB(INDEX_T, DIM) \ if (should_use_multiblock(numInputSlices, sliceSize)) { \ RUN_K(INDEX_T, DIM, mbtopk::launch); \ } else { \ RUN_K(INDEX_T, DIM, sbtopk::launch); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_MB(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_MB(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_MB(INDEX_T, 3); \ } else { \ RUN_MB(INDEX_T, -1); \ } #define RUN_T(INDEX_T) \ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \ at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \ at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \ at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \ at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \ /* tensorInfoLegacyIfScalar*/ \ if (!input.dim()) { \ inputInfo.dims = 1; \ inputInfo.sizes[0] = 1; \ inputInfo.strides[0] = 1; \ topKInfo.dims = 1; \ topKInfo.sizes[0] = 1; \ topKInfo.strides[0] = 1; \ indicesInfo.dims = 1; \ indicesInfo.sizes[0] = 1; \ indicesInfo.strides[0] = 1; \ } \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ /* stash the stride of dim because it can be accidentally collapsed */ \ auto strideTopK = topKInfo.strides[dim]; \ auto strideIndices = indicesInfo.strides[dim]; \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ /* restore stride in case it was collapsed */ \ topKInfo.strides[collapseTopKDim] = strideTopK; \ indicesInfo.strides[collapseIndicesDim] = strideIndices; \ int64_t numInputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ numInputSlices *= inputInfo.sizes[i]; \ } \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); \ }); // the below is safe with 0-dimensional tensors because it is based on // TensorInfo which implicitly expands to 1-dimensional. if (input.numel() > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (at::cuda::detail::canUse32BitIndexMath(input) && at::cuda::detail::canUse32BitIndexMath(values) && at::cuda::detail::canUse32BitIndexMath(indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_K } } // at::native } // at
73bc818b3f96b45713a2574c90ebdf1dfcca11d6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample demonstrates stream ordered memory allocation on a GPU using * hipMallocAsync and cudaMemPool family of APIs. * * basicStreamOrderedAllocation(): demonstrates stream ordered allocation using * hipMallocAsync/hipFreeAsync APIs with default settings. * * streamOrderedAllocationPostSync(): demonstrates if there's a synchronization in between allocations, * then setting the release threshold on the pool will make sure the synchronize will not * free memory back to the OS. */ // System includes #include <stdio.h> #include <assert.h> #include <climits> // CUDA runtime #include <hip/hip_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #define MAX_ITER 20 /* Add two vectors on the GPU */ __global__ void vectorAddGPU(const float *a, const float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int basicStreamOrderedAllocation(const int dev, const int nelem, const float *a, const float *b, float *c) { float *d_a, *d_b, *d_c; // Device buffers float errorNorm, refNorm, ref, diff; size_t bytes = nelem * sizeof(float); hipStream_t stream; printf("Starting basicStreamOrderedAllocation()\n"); checkCudaErrors(hipSetDevice(dev)); checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); checkCudaErrors(hipMallocAsync(&d_a, bytes, stream)); checkCudaErrors(hipMallocAsync(&d_b, bytes, stream)); checkCudaErrors(hipMallocAsync(&d_c, bytes, stream)); checkCudaErrors(hipMemcpyAsync(d_a, a, bytes, hipMemcpyHostToDevice, stream)); checkCudaErrors(hipMemcpyAsync(d_b, b, bytes, hipMemcpyHostToDevice, stream)); dim3 block(256); dim3 grid((unsigned int)ceil(nelem/(float)block.x)); hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, stream, d_a, d_b, d_c, nelem); checkCudaErrors(hipFreeAsync(d_a, stream)); checkCudaErrors(hipFreeAsync(d_b, stream)); checkCudaErrors(hipMemcpyAsync(c, d_c, bytes, hipMemcpyDeviceToHost, stream)); checkCudaErrors(hipFreeAsync(d_c, stream)); checkCudaErrors(hipStreamSynchronize(stream)); /* Compare the results */ printf("> Checking the results from vectorAddGPU() ...\n"); errorNorm = 0.f; refNorm = 0.f; for (int n = 0; n < nelem; n++) { ref = a[n] + b[n]; diff = c[n] - ref; errorNorm += diff*diff; refNorm += ref*ref; } errorNorm = (float)sqrt((double)errorNorm); refNorm = (float)sqrt((double)refNorm); if (errorNorm/refNorm < 1.e-6f) printf("basicStreamOrderedAllocation PASSED\n"); checkCudaErrors(hipStreamDestroy(stream)); return errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE; } // streamOrderedAllocationPostSync(): demonstrates If the application wants the memory to persist in the pool beyond // synchronization, then it sets the release threshold on the pool. This way, when the application reaches the "steady state", // it is no longer allocating/freeing memory from the OS. int streamOrderedAllocationPostSync(const int dev, const int nelem, const float *a, const float *b, float *c) { float *d_a, *d_b, *d_c; // Device buffers float errorNorm, refNorm, ref, diff; size_t bytes = nelem * sizeof(float); hipStream_t stream; hipMemPool_t memPool; hipEvent_t start, end; printf("Starting streamOrderedAllocationPostSync()\n"); checkCudaErrors(hipSetDevice(dev)); checkCudaErrors(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&end)); checkCudaErrors(hipDeviceGetDefaultMemPool(&memPool, dev)); uint64_t thresholdVal = ULONG_MAX; // set high release threshold on the default pool so that hipFreeAsync will not actually release memory to the system. // By default, the release threshold for a memory pool is set to zero. This implies that the CUDA driver is // allowed to release a memory chunk back to the system as long as it does not contain any active suballocations. checkCudaErrors(hipMemPoolSetAttribute(memPool, hipMemPoolAttrReleaseThreshold, (void*)&thresholdVal)); // Record the start event checkCudaErrors(hipEventRecord(start, stream)); for (int i = 0; i < MAX_ITER; i++) { checkCudaErrors(hipMallocAsync(&d_a, bytes, stream)); checkCudaErrors(hipMallocAsync(&d_b, bytes, stream)); checkCudaErrors(hipMallocAsync(&d_c, bytes, stream)); checkCudaErrors(hipMemcpyAsync(d_a, a, bytes, hipMemcpyHostToDevice, stream)); checkCudaErrors(hipMemcpyAsync(d_b, b, bytes, hipMemcpyHostToDevice, stream)); dim3 block(256); dim3 grid((unsigned int)ceil(nelem/(float)block.x)); hipLaunchKernelGGL(( vectorAddGPU), dim3(grid), dim3(block), 0, stream, d_a, d_b, d_c, nelem); checkCudaErrors(hipFreeAsync(d_a, stream)); checkCudaErrors(hipFreeAsync(d_b, stream)); checkCudaErrors(hipMemcpyAsync(c, d_c, bytes, hipMemcpyDeviceToHost, stream)); checkCudaErrors(hipFreeAsync(d_c, stream)); checkCudaErrors(hipStreamSynchronize(stream)); } checkCudaErrors(hipEventRecord(end, stream)); // Wait for the end event to complete checkCudaErrors(hipEventSynchronize(end)); float msecTotal = 0.0f; checkCudaErrors(hipEventElapsedTime(&msecTotal, start, end)); printf("Total elapsed time = %f ms over %d iterations\n", msecTotal, MAX_ITER); /* Compare the results */ printf("> Checking the results from vectorAddGPU() ...\n"); errorNorm = 0.f; refNorm = 0.f; for (int n = 0; n < nelem; n++) { ref = a[n] + b[n]; diff = c[n] - ref; errorNorm += diff*diff; refNorm += ref*ref; } errorNorm = (float)sqrt((double)errorNorm); refNorm = (float)sqrt((double)refNorm); if (errorNorm/refNorm < 1.e-6f) printf("streamOrderedAllocationPostSync PASSED\n"); checkCudaErrors(hipStreamDestroy(stream)); return errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE; } int main(int argc, char **argv) { int nelem; int dev = 0; // use default device 0 size_t bytes; float *a, *b, *c; // Host if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printf("Usage: streamOrderedAllocation [OPTION]\n\n"); printf("Options:\n"); printf(" --device=[device #] Specify the device to be used\n"); return EXIT_SUCCESS; } dev = findCudaDevice(argc, (const char **)argv); int isMemPoolSupported = 0; checkCudaErrors(hipDeviceGetAttribute(&isMemPoolSupported, hipDeviceAttributeMemoryPoolsSupported, dev)); if (!isMemPoolSupported) { printf("Waiving execution as device does not support Memory Pools\n"); exit(EXIT_WAIVED); } // Allocate CPU memory. nelem = 1048576; bytes = nelem*sizeof(float); a = (float*) malloc(bytes); b = (float*) malloc(bytes); c = (float*) malloc(bytes); /* Initialize the vectors. */ for (int n = 0; n < nelem; n++) { a[n] = rand() / (float)RAND_MAX; b[n] = rand() / (float)RAND_MAX; } int ret1 = basicStreamOrderedAllocation(dev, nelem, a, b, c); int ret2 = streamOrderedAllocationPostSync(dev, nelem, a, b, c); /* Memory clean up */ free(a); free(b); free(c); return ((ret1 == EXIT_SUCCESS && ret2 == EXIT_SUCCESS) ? EXIT_SUCCESS : EXIT_FAILURE); }
73bc818b3f96b45713a2574c90ebdf1dfcca11d6.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This sample demonstrates stream ordered memory allocation on a GPU using * cudaMallocAsync and cudaMemPool family of APIs. * * basicStreamOrderedAllocation(): demonstrates stream ordered allocation using * cudaMallocAsync/cudaFreeAsync APIs with default settings. * * streamOrderedAllocationPostSync(): demonstrates if there's a synchronization in between allocations, * then setting the release threshold on the pool will make sure the synchronize will not * free memory back to the OS. */ // System includes #include <stdio.h> #include <assert.h> #include <climits> // CUDA runtime #include <cuda_runtime.h> // helper functions and utilities to work with CUDA #include <helper_functions.h> #include <helper_cuda.h> #define MAX_ITER 20 /* Add two vectors on the GPU */ __global__ void vectorAddGPU(const float *a, const float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int basicStreamOrderedAllocation(const int dev, const int nelem, const float *a, const float *b, float *c) { float *d_a, *d_b, *d_c; // Device buffers float errorNorm, refNorm, ref, diff; size_t bytes = nelem * sizeof(float); cudaStream_t stream; printf("Starting basicStreamOrderedAllocation()\n"); checkCudaErrors(cudaSetDevice(dev)); checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); checkCudaErrors(cudaMallocAsync(&d_a, bytes, stream)); checkCudaErrors(cudaMallocAsync(&d_b, bytes, stream)); checkCudaErrors(cudaMallocAsync(&d_c, bytes, stream)); checkCudaErrors(cudaMemcpyAsync(d_a, a, bytes, cudaMemcpyHostToDevice, stream)); checkCudaErrors(cudaMemcpyAsync(d_b, b, bytes, cudaMemcpyHostToDevice, stream)); dim3 block(256); dim3 grid((unsigned int)ceil(nelem/(float)block.x)); vectorAddGPU<<<grid, block, 0, stream>>>(d_a, d_b, d_c, nelem); checkCudaErrors(cudaFreeAsync(d_a, stream)); checkCudaErrors(cudaFreeAsync(d_b, stream)); checkCudaErrors(cudaMemcpyAsync(c, d_c, bytes, cudaMemcpyDeviceToHost, stream)); checkCudaErrors(cudaFreeAsync(d_c, stream)); checkCudaErrors(cudaStreamSynchronize(stream)); /* Compare the results */ printf("> Checking the results from vectorAddGPU() ...\n"); errorNorm = 0.f; refNorm = 0.f; for (int n = 0; n < nelem; n++) { ref = a[n] + b[n]; diff = c[n] - ref; errorNorm += diff*diff; refNorm += ref*ref; } errorNorm = (float)sqrt((double)errorNorm); refNorm = (float)sqrt((double)refNorm); if (errorNorm/refNorm < 1.e-6f) printf("basicStreamOrderedAllocation PASSED\n"); checkCudaErrors(cudaStreamDestroy(stream)); return errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE; } // streamOrderedAllocationPostSync(): demonstrates If the application wants the memory to persist in the pool beyond // synchronization, then it sets the release threshold on the pool. This way, when the application reaches the "steady state", // it is no longer allocating/freeing memory from the OS. int streamOrderedAllocationPostSync(const int dev, const int nelem, const float *a, const float *b, float *c) { float *d_a, *d_b, *d_c; // Device buffers float errorNorm, refNorm, ref, diff; size_t bytes = nelem * sizeof(float); cudaStream_t stream; cudaMemPool_t memPool; cudaEvent_t start, end; printf("Starting streamOrderedAllocationPostSync()\n"); checkCudaErrors(cudaSetDevice(dev)); checkCudaErrors(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&end)); checkCudaErrors(cudaDeviceGetDefaultMemPool(&memPool, dev)); uint64_t thresholdVal = ULONG_MAX; // set high release threshold on the default pool so that cudaFreeAsync will not actually release memory to the system. // By default, the release threshold for a memory pool is set to zero. This implies that the CUDA driver is // allowed to release a memory chunk back to the system as long as it does not contain any active suballocations. checkCudaErrors(cudaMemPoolSetAttribute(memPool, cudaMemPoolAttrReleaseThreshold, (void*)&thresholdVal)); // Record the start event checkCudaErrors(cudaEventRecord(start, stream)); for (int i = 0; i < MAX_ITER; i++) { checkCudaErrors(cudaMallocAsync(&d_a, bytes, stream)); checkCudaErrors(cudaMallocAsync(&d_b, bytes, stream)); checkCudaErrors(cudaMallocAsync(&d_c, bytes, stream)); checkCudaErrors(cudaMemcpyAsync(d_a, a, bytes, cudaMemcpyHostToDevice, stream)); checkCudaErrors(cudaMemcpyAsync(d_b, b, bytes, cudaMemcpyHostToDevice, stream)); dim3 block(256); dim3 grid((unsigned int)ceil(nelem/(float)block.x)); vectorAddGPU<<<grid, block, 0, stream>>>(d_a, d_b, d_c, nelem); checkCudaErrors(cudaFreeAsync(d_a, stream)); checkCudaErrors(cudaFreeAsync(d_b, stream)); checkCudaErrors(cudaMemcpyAsync(c, d_c, bytes, cudaMemcpyDeviceToHost, stream)); checkCudaErrors(cudaFreeAsync(d_c, stream)); checkCudaErrors(cudaStreamSynchronize(stream)); } checkCudaErrors(cudaEventRecord(end, stream)); // Wait for the end event to complete checkCudaErrors(cudaEventSynchronize(end)); float msecTotal = 0.0f; checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, end)); printf("Total elapsed time = %f ms over %d iterations\n", msecTotal, MAX_ITER); /* Compare the results */ printf("> Checking the results from vectorAddGPU() ...\n"); errorNorm = 0.f; refNorm = 0.f; for (int n = 0; n < nelem; n++) { ref = a[n] + b[n]; diff = c[n] - ref; errorNorm += diff*diff; refNorm += ref*ref; } errorNorm = (float)sqrt((double)errorNorm); refNorm = (float)sqrt((double)refNorm); if (errorNorm/refNorm < 1.e-6f) printf("streamOrderedAllocationPostSync PASSED\n"); checkCudaErrors(cudaStreamDestroy(stream)); return errorNorm/refNorm < 1.e-6f ? EXIT_SUCCESS : EXIT_FAILURE; } int main(int argc, char **argv) { int nelem; int dev = 0; // use default device 0 size_t bytes; float *a, *b, *c; // Host if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printf("Usage: streamOrderedAllocation [OPTION]\n\n"); printf("Options:\n"); printf(" --device=[device #] Specify the device to be used\n"); return EXIT_SUCCESS; } dev = findCudaDevice(argc, (const char **)argv); int isMemPoolSupported = 0; checkCudaErrors(cudaDeviceGetAttribute(&isMemPoolSupported, cudaDevAttrMemoryPoolsSupported, dev)); if (!isMemPoolSupported) { printf("Waiving execution as device does not support Memory Pools\n"); exit(EXIT_WAIVED); } // Allocate CPU memory. nelem = 1048576; bytes = nelem*sizeof(float); a = (float*) malloc(bytes); b = (float*) malloc(bytes); c = (float*) malloc(bytes); /* Initialize the vectors. */ for (int n = 0; n < nelem; n++) { a[n] = rand() / (float)RAND_MAX; b[n] = rand() / (float)RAND_MAX; } int ret1 = basicStreamOrderedAllocation(dev, nelem, a, b, c); int ret2 = streamOrderedAllocationPostSync(dev, nelem, a, b, c); /* Memory clean up */ free(a); free(b); free(c); return ((ret1 == EXIT_SUCCESS && ret2 == EXIT_SUCCESS) ? EXIT_SUCCESS : EXIT_FAILURE); }
f836d3c8a57c35eba0d3f1cbd7b4f8604652de3b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #define N 20 struct node { struct node *next; int data; }; struct node *createNode(int ii) { struct node *nn = (struct node *)malloc(sizeof(struct node)); nn->data = ii; nn->next = NULL; return nn; } struct node *createList() { struct node *head = NULL; for (int ii = 20; ii > 0; --ii) { struct node *nn = createNode(ii); nn->next = head; head = nn; } return head; } __device__ __host__ void printList(struct node *head) { if (head) { printf("%d ", head->data); printList(head->next); } else { printf("\n"); } } __global__ void printListGPU(struct node *head) { printList(head); } struct node *copyNode(struct node *nn) { struct node *nngpu; hipMalloc(&nngpu, sizeof(struct node)); hipMemcpy(nngpu, nn, sizeof(struct node), hipMemcpyHostToDevice); return nngpu; } struct node *copyList(struct node *head) { if (!head) return NULL; struct node nn; nn.next = copyList(head->next); nn.data = head->data; return copyNode(&nn); } int main() { struct node *head = createList(); struct node *gpuhead = copyList(head); printList(head); hipLaunchKernelGGL(( printListGPU), dim3(1), dim3(1), 0, 0, gpuhead); hipDeviceSynchronize(); return 0; }
f836d3c8a57c35eba0d3f1cbd7b4f8604652de3b.cu
#include <stdio.h> #include <cuda.h> #define N 20 struct node { struct node *next; int data; }; struct node *createNode(int ii) { struct node *nn = (struct node *)malloc(sizeof(struct node)); nn->data = ii; nn->next = NULL; return nn; } struct node *createList() { struct node *head = NULL; for (int ii = 20; ii > 0; --ii) { struct node *nn = createNode(ii); nn->next = head; head = nn; } return head; } __device__ __host__ void printList(struct node *head) { if (head) { printf("%d ", head->data); printList(head->next); } else { printf("\n"); } } __global__ void printListGPU(struct node *head) { printList(head); } struct node *copyNode(struct node *nn) { struct node *nngpu; cudaMalloc(&nngpu, sizeof(struct node)); cudaMemcpy(nngpu, nn, sizeof(struct node), cudaMemcpyHostToDevice); return nngpu; } struct node *copyList(struct node *head) { if (!head) return NULL; struct node nn; nn.next = copyList(head->next); nn.data = head->data; return copyNode(&nn); } int main() { struct node *head = createList(); struct node *gpuhead = copyList(head); printList(head); printListGPU<<<1, 1>>>(gpuhead); cudaDeviceSynchronize(); return 0; }
ddf2a56556af68af01b2db495ac83fd3a9b0aa8f.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <torch/extension.h> #include "timer.h" #include "sphconv/debug_utils.h" #include "assert.h" using namespace std; using namespace torch::indexing; namespace sphconv { template <typename T, int N> using GpuTensor = torch::PackedTensorAccessor32<T, N, torch::RestrictPtrTraits>; int huristic_tile_n_max(int inTileSizeH, int inTileSizeW, int B, int H, int W, int D, int NNZ) { double sparsity = (double)NNZ / (double)(B * H * W * D); double size = B * inTileSizeH * inTileSizeW * D * sparsity * 2; return (int)(size + 64) & (-1 << 4); } __host__ __device__ __forceinline__ int getInTileSize(int outTileSize, int stride, int kernelSize) { assert(stride <= kernelSize); return stride * (outTileSize - 1) + kernelSize; } template <typename IType> __device__ __forceinline__ IType OutSpatial(IType k, IType x, IType s, IType d, IType pad) { // forgive me. do nothing with the dillation // TODO: dilation if ((x + pad - k) % s == 0) return (x + pad - k) / s; return -1; } /** * @brief init the grid[B, H, W, D], * grid is a mapping from spatial location to its target glaobal physical location * * fill grid with global indices * * @return __global__ */ template <typename IType> __global__ void prepareSubMGridKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] const GpuTensor<IType, 3> zPtr, // [B, H, W] // TODO replace zPtr with exclusiveScan GpuTensor<IType, 4> grid) // [B, H, W, D] { int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= H || y >= W) return; // for each voxel for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : *(&zPtr[b][x][y] - 1); // diverge here, but we assume it's quick for (int pos = zStart; pos < zEnd; pos++) { int z = zIndices[pos]; grid[b][x][y][z] = pos; } } } /** * @brief init the grid[B, oH, oW, oD], * grid is a mapping from spatial location to its target global physical location * * fill grid with global indices, * * we first fill it with 0, * fill output cell to ones * sum along D * * TODO: fill local indices * * @return __global__ */ template <typename IType> __global__ void prepareGridKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] const GpuTensor<IType, 3> zPtr, // [B, H, W] GpuTensor<IType, 4> grid, // [B, oH, oW, oD] int KH, int KW, int KD, int sH, int sW, int sD, int padH, int padW, int padD, int dH, int dW, int dD) { int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int oH = grid.size(1); int oW = grid.size(2); int oD = grid.size(3); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int k = threadIdx.z; if (x >= H || y >= W) return; // (KH, KW, KD) // k = kx * KH ky kz int k_H = k / (KW * KD); int k_W = (k / KD) % KW; int k_D = k % KD; int oX = OutSpatial(k_H, x, sH, dH, padH); if (oX < 0 || oX >= oH) return; int oY = OutSpatial(k_W, y, sW, dW, padW); if (oY < 0 || oY >= oW) return; /// for each input voxel, fill its output to 1 for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : zPtr[b][x][y - 1]; // diverge here, but we assume it's quick for (int pos = zStart; pos < zEnd; pos++) { int z = zIndices[pos]; int oZ = OutSpatial(k_D, z, sD, dD, padD); if (oZ < 0 || oZ >= oD) continue; grid[b][oX][oY][oZ] = 1; } } } /** * for std conv * create ozIndices and Rule s. */ template <typename IType> __global__ void getOzIndicesAndRulesKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] GpuTensor<IType, 1> ozIndices, // [NNZ'] const GpuTensor<IType, 3> zPtr, // [B, H, W] const GpuTensor<IType, 3> ozPtr, // [B, oH, oW] const GpuTensor<IType, 4> grid, // [B, oH, oW, oD] GpuTensor<IType, 4> rules, // [NTile, KKK, 2, DMax] GpuTensor<IType, 2> ruleSize, // number active index, [NTile, KKK] int D, int KH, int KW, int KD, int sH, int sW, int sD, int padH, int padW, int padD, int dH, int dW, int dD, int outTileH, int outTileW) { int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int oH = grid.size(1); int oW = grid.size(2); int oD = grid.size(3); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int k = threadIdx.z + blockDim.z * blockIdx.z; if (x >= H || y >= W) return; int k_H = k / (KW * KD); int k_W = (k / KD) % KW; int k_D = k % KD; int oX = OutSpatial(k_H, x, sH, dH, padH); if (oX < 0 || oX >= oH) return; int oY = OutSpatial(k_W, y, sW, dW, padW); if (oY < 0 || oY >= oW) return; int TileGridW = divUp(oW, outTileW); int nTile = getLinearTileIdx(outTileH, outTileW, oX, oY, TileGridW); // printf("nTile(%d), oxy(%d,%d)\n", nTile, oX, oY); // nTile = 1; int tile_n_max = rules.size(3); for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : zPtr[b][x][y - 1]; for (int globalInIdx = zStart; globalInIdx < zEnd; globalInIdx++) { int z = zIndices[globalInIdx]; int oZ = OutSpatial(k_D, z, sD, dD, padD); if (oZ < 0 || oZ >= oD) continue; int globalOutIdx = grid[b][oX][oY][oZ] - 1; int counter = atomicAdd(&ruleSize[nTile][k], 1); if (counter < tile_n_max) { rules[nTile][k][0][counter] = globalInIdx; rules[nTile][k][1][counter] = globalOutIdx; } else { printf("overflow counter:(%d/%d), global i/o:%d/%d, nTile:%d, x:%d, y:%d, k:%d, Tile(%d,%d), oShape(%d,%d,%d), std\n", counter, tile_n_max, globalInIdx, globalOutIdx, nTile, x, y, k, outTileH, outTileW, oH, oW, oD); } ozIndices[globalOutIdx] = oZ; } } // b } /*** * fill rules, rules: [NTile, K*K*K, 4, DMax] */ template <typename IType> __global__ void getSubMRulesKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] const GpuTensor<IType, 3> zPtr, // [B, H, W] const GpuTensor<IType, 4> grid, // [B, oH, oW, oD] GpuTensor<IType, 4> rules, // [NTile, KKK, 2, DMax] GpuTensor<IType, 2> ruleSize, // number active index, [NTile, KKK] int D, int KH, int KW, int KD, int sH, int sW, int sD, int padH, int padW, int padD, int dH, int dW, int dD, int outTileH, int outTileW) { int tile_n_max = rules.size(3); int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int oH = grid.size(1); int oW = grid.size(2); int oD = grid.size(3); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= H || y >= W) return; int k = threadIdx.z + blockDim.z * blockIdx.z; int k_H = k / (KW * KD); int k_W = (k / KD) % KW; int k_D = k % KD; int oX = OutSpatial(k_H, x, sH, dH, padH); if (oX < 0 || oX >= oH) return; int oY = OutSpatial(k_W, y, sW, dW, padW); if (oY < 0 || oY >= oW) return; // int TileGridH = divUp(oH, outTileH); int TileGridW = divUp(oW, outTileW); int nTile = getLinearTileIdx(outTileH, outTileW, oX, oY, TileGridW); // printf("nTile(%d), oxy(%d,%d)\n", nTile, oX, oY); // nTile = 1; for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : zPtr[b][x][y - 1]; // diverge here for (int globalInIdx = zStart; globalInIdx < zEnd; globalInIdx++) { int z = zIndices[globalInIdx]; int oZ = OutSpatial(k_D, z, sD, dD, padD); if (oZ < 0 || oZ >= oD) continue; int globalOutIdx = grid[b][oX][oY][oZ]; if (globalOutIdx < 0) continue; // printf(" Tile(%d,%d), in(%d,%d,%d), out(%d,%d,%d), pair(%d,%d), k=%d\n", // oX / outTileH, oY / outTileW, // x, y, z, oX, oY, oZ, // globalInIdx, globalOutIdx, k); int counter = atomicAdd(&ruleSize[nTile][k], 1); if (counter < tile_n_max) { rules[nTile][k][0][counter] = globalInIdx; rules[nTile][k][1][counter] = globalOutIdx; } else { printf("overflow counter:(%d/%d), global i/o:%d/%d, nTile:%d, x:%d, y:%d, k:%d, Tile(%d,%d), inShape(%d,%d,%d), std\n", counter, tile_n_max, globalInIdx, globalOutIdx, nTile, x, y, k, outTileH, outTileW, oH, oW, oD); } } } // b } /** * tile_size: tile_size is on the output feature map. * * * return rules * rules: [NTile, K*K*K, 4, DMax] * * ROADMAP: * 1. only generate global indices * 2. generate both local indices and global indices * * ref: getIndicePair */ std::vector<torch::Tensor> get_rules_subm(const torch::Tensor zIndices, // [NNZ] const torch::Tensor zPtr, // [B, H, W] int batchSize, std::vector<int64_t> spatialShape, // H, W, D std::vector<int64_t> outSpatialShape, // H, W, D std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> tileSize) { torch::Tensor grid = torch::full({batchSize, outSpatialShape[0], outSpatialShape[1], outSpatialShape[2]}, /*value=*/-1, torch::dtype(torch::kInt32).device(zIndices.device())); dim3 gridSize(divUp(spatialShape[0], 16), divUp(spatialShape[1], 32)); dim3 blockSize(16, 32, 1); hipLaunchKernelGGL(( prepareSubMGridKernel<int32_t>), dim3(gridSize), dim3(blockSize), 0, 0, zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>()); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); int64_t kernelVolume = std::accumulate(kernelSize.begin(), kernelSize.end(), 1, std::multiplies<int64_t>()); // number of tiles int NTile = divUp(outSpatialShape[0], tileSize[0]) * divUp(outSpatialShape[1], tileSize[1]); // allocate rules and indice Num int tile_n_max = huristic_tile_n_max( getInTileSize(tileSize[0], stride[0], kernelSize[0]), getInTileSize(tileSize[1], stride[1], kernelSize[1]), batchSize, spatialShape[0], spatialShape[1], spatialShape[2], zIndices.size(0)); // printf("tile_n_max = %d\n", tile_n_max); torch::Tensor rules = torch::full( {NTile, kernelVolume, 2, tile_n_max}, /*value=*/-1, torch::dtype(torch::kInt32).device(zIndices.device())); torch::Tensor ruleSize = torch::zeros( {NTile, kernelVolume}, torch::dtype(torch::kInt32).device(zIndices.device())); gridSize = dim3(divUp(spatialShape[0], 4), divUp(spatialShape[1], 8), kernelVolume); blockSize = dim3(4, 8, 1); hipLaunchKernelGGL(( getSubMRulesKernel<int32_t>), dim3(gridSize), dim3(blockSize), 0, 0, zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), rules.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), ruleSize.packed_accessor32<int32_t, 2, torch::RestrictPtrTraits>(), spatialShape[2], kernelSize[0], kernelSize[1], kernelSize[2], stride[0], stride[1], stride[2], padding[0], padding[1], padding[2], dilation[0], dilation[1], dilation[2], tileSize[0], tileSize[1]); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); return {zIndices, zPtr, rules, ruleSize}; } /** * tile_size: tile_size is on the output feature map. * * * return rules * rules: [NTile, K*K*K, 4, DMax] * * ROADMAP: * 1. only generate global indices * 2. generate both local indices and global indices * * ref: getIndicePair */ std::vector<torch::Tensor> get_rules(const torch::Tensor zIndices, // [NNZ] const torch::Tensor zPtr, // [B, H, W] int batchSize, std::vector<int64_t> spatialShape, // H, W, D std::vector<int64_t> outSpatialShape, // oH, oW, oD std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> tileSize) { torch::Tensor grid = torch::zeros({batchSize, outSpatialShape[0], outSpatialShape[1], outSpatialShape[2]}, torch::dtype(torch::kInt32).device(zIndices.device())); int64_t kernelVolume = std::accumulate(kernelSize.begin(), kernelSize.end(), 1, std::multiplies<int64_t>()); dim3 gridSize(divUp(spatialShape[0], 2), divUp(spatialShape[1], 16), 1); dim3 blockSize(2, 16, kernelVolume); hipLaunchKernelGGL(( prepareGridKernel<int32_t>), dim3(gridSize), dim3(blockSize), 0, 0, zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), kernelSize[0], kernelSize[1], kernelSize[2], stride[0], stride[1], stride[2], padding[0], padding[1], padding[2], dilation[0], dilation[1], dilation[2]); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); grid = torch::cumsum(grid, 3, torch::kInt32); // [B, oH, oW, oD] // std::cout << "grid(2) = " << grid << std::endl; // here we want non inclusive scan, but pytorch only provides this. torch::Tensor ozPtr = torch::cumsum(grid.index({Slice(), Slice(), Slice(), -1}).reshape({-1}), 0, torch::kInt32) .reshape({batchSize, outSpatialShape[0], outSpatialShape[1]}); // [B, oH, oW] torch::Tensor exclusiveScan = ozPtr.roll(1); exclusiveScan.index_put_({0, 0, 0}, 0); grid += exclusiveScan.unsqueeze(-1); // now grid is filled with global output index // std::cout << "grid(3) = " << grid << std::endl; int NTile = divUp(outSpatialShape[0], tileSize[0]) * divUp(outSpatialShape[1], tileSize[1]); int tile_n_max = huristic_tile_n_max( getInTileSize(tileSize[0], stride[0], kernelSize[0]), getInTileSize(tileSize[1], stride[1], kernelSize[1]), batchSize, spatialShape[0], spatialShape[1], spatialShape[2], zIndices.size(0)); torch::Tensor rules = torch::full( {NTile, kernelVolume, 2, tile_n_max}, /*value=*/-1, torch::dtype(torch::kInt32).device(zIndices.device())); torch::Tensor ruleSize = torch::zeros( {NTile, kernelVolume}, torch::dtype(torch::kInt32).device(zIndices.device())); // PRINT_SHAPE(ruleSize); int outNNZ = ozPtr.view({-1}).index({-1}).item<int>(); torch::Tensor ozIndices = torch::empty({outNNZ}, torch::dtype(torch::kInt32).device(zIndices.device())); // PRINT_SHAPE(ozIndices); gridSize = dim3(divUp(spatialShape[0], 2), divUp(spatialShape[1], 16), kernelVolume); blockSize = dim3(2, 16, 1); hipLaunchKernelGGL(( getOzIndicesAndRulesKernel<int32_t>), dim3(gridSize), dim3(blockSize), 0, 0, zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), ozIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), ozPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), rules.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), ruleSize.packed_accessor32<int32_t, 2, torch::RestrictPtrTraits>(), spatialShape[2], kernelSize[0], kernelSize[1], kernelSize[2], stride[0], stride[1], stride[2], padding[0], padding[1], padding[2], dilation[0], dilation[1], dilation[2], tileSize[0], tileSize[1]); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); return {ozIndices, ozPtr, rules, ruleSize}; } } // namespace sphconv
ddf2a56556af68af01b2db495ac83fd3a9b0aa8f.cu
#include <cstdio> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <torch/extension.h> #include "timer.h" #include "sphconv/debug_utils.h" #include "assert.h" using namespace std; using namespace torch::indexing; namespace sphconv { template <typename T, int N> using GpuTensor = torch::PackedTensorAccessor32<T, N, torch::RestrictPtrTraits>; int huristic_tile_n_max(int inTileSizeH, int inTileSizeW, int B, int H, int W, int D, int NNZ) { double sparsity = (double)NNZ / (double)(B * H * W * D); double size = B * inTileSizeH * inTileSizeW * D * sparsity * 2; return (int)(size + 64) & (-1 << 4); } __host__ __device__ __forceinline__ int getInTileSize(int outTileSize, int stride, int kernelSize) { assert(stride <= kernelSize); return stride * (outTileSize - 1) + kernelSize; } template <typename IType> __device__ __forceinline__ IType OutSpatial(IType k, IType x, IType s, IType d, IType pad) { // forgive me. do nothing with the dillation // TODO: dilation if ((x + pad - k) % s == 0) return (x + pad - k) / s; return -1; } /** * @brief init the grid[B, H, W, D], * grid is a mapping from spatial location to its target glaobal physical location * * fill grid with global indices * * @return __global__ */ template <typename IType> __global__ void prepareSubMGridKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] const GpuTensor<IType, 3> zPtr, // [B, H, W] // TODO replace zPtr with exclusiveScan GpuTensor<IType, 4> grid) // [B, H, W, D] { int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= H || y >= W) return; // for each voxel for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : *(&zPtr[b][x][y] - 1); // diverge here, but we assume it's quick for (int pos = zStart; pos < zEnd; pos++) { int z = zIndices[pos]; grid[b][x][y][z] = pos; } } } /** * @brief init the grid[B, oH, oW, oD], * grid is a mapping from spatial location to its target global physical location * * fill grid with global indices, * * we first fill it with 0, * fill output cell to ones * sum along D * * TODO: fill local indices * * @return __global__ */ template <typename IType> __global__ void prepareGridKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] const GpuTensor<IType, 3> zPtr, // [B, H, W] GpuTensor<IType, 4> grid, // [B, oH, oW, oD] int KH, int KW, int KD, int sH, int sW, int sD, int padH, int padW, int padD, int dH, int dW, int dD) { int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int oH = grid.size(1); int oW = grid.size(2); int oD = grid.size(3); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int k = threadIdx.z; if (x >= H || y >= W) return; // (KH, KW, KD) // k = kx * KH ky kz int k_H = k / (KW * KD); int k_W = (k / KD) % KW; int k_D = k % KD; int oX = OutSpatial(k_H, x, sH, dH, padH); if (oX < 0 || oX >= oH) return; int oY = OutSpatial(k_W, y, sW, dW, padW); if (oY < 0 || oY >= oW) return; /// for each input voxel, fill its output to 1 for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : zPtr[b][x][y - 1]; // diverge here, but we assume it's quick for (int pos = zStart; pos < zEnd; pos++) { int z = zIndices[pos]; int oZ = OutSpatial(k_D, z, sD, dD, padD); if (oZ < 0 || oZ >= oD) continue; grid[b][oX][oY][oZ] = 1; } } } /** * for std conv * create ozIndices and Rule s. */ template <typename IType> __global__ void getOzIndicesAndRulesKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] GpuTensor<IType, 1> ozIndices, // [NNZ'] const GpuTensor<IType, 3> zPtr, // [B, H, W] const GpuTensor<IType, 3> ozPtr, // [B, oH, oW] const GpuTensor<IType, 4> grid, // [B, oH, oW, oD] GpuTensor<IType, 4> rules, // [NTile, KKK, 2, DMax] GpuTensor<IType, 2> ruleSize, // number active index, [NTile, KKK] int D, int KH, int KW, int KD, int sH, int sW, int sD, int padH, int padW, int padD, int dH, int dW, int dD, int outTileH, int outTileW) { int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int oH = grid.size(1); int oW = grid.size(2); int oD = grid.size(3); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; int k = threadIdx.z + blockDim.z * blockIdx.z; if (x >= H || y >= W) return; int k_H = k / (KW * KD); int k_W = (k / KD) % KW; int k_D = k % KD; int oX = OutSpatial(k_H, x, sH, dH, padH); if (oX < 0 || oX >= oH) return; int oY = OutSpatial(k_W, y, sW, dW, padW); if (oY < 0 || oY >= oW) return; int TileGridW = divUp(oW, outTileW); int nTile = getLinearTileIdx(outTileH, outTileW, oX, oY, TileGridW); // printf("nTile(%d), oxy(%d,%d)\n", nTile, oX, oY); // nTile = 1; int tile_n_max = rules.size(3); for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : zPtr[b][x][y - 1]; for (int globalInIdx = zStart; globalInIdx < zEnd; globalInIdx++) { int z = zIndices[globalInIdx]; int oZ = OutSpatial(k_D, z, sD, dD, padD); if (oZ < 0 || oZ >= oD) continue; int globalOutIdx = grid[b][oX][oY][oZ] - 1; int counter = atomicAdd(&ruleSize[nTile][k], 1); if (counter < tile_n_max) { rules[nTile][k][0][counter] = globalInIdx; rules[nTile][k][1][counter] = globalOutIdx; } else { printf("overflow counter:(%d/%d), global i/o:%d/%d, nTile:%d, x:%d, y:%d, k:%d, Tile(%d,%d), oShape(%d,%d,%d), std\n", counter, tile_n_max, globalInIdx, globalOutIdx, nTile, x, y, k, outTileH, outTileW, oH, oW, oD); } ozIndices[globalOutIdx] = oZ; } } // b } /*** * fill rules, rules: [NTile, K*K*K, 4, DMax] */ template <typename IType> __global__ void getSubMRulesKernel( const GpuTensor<IType, 1> zIndices, // [NNZ] const GpuTensor<IType, 3> zPtr, // [B, H, W] const GpuTensor<IType, 4> grid, // [B, oH, oW, oD] GpuTensor<IType, 4> rules, // [NTile, KKK, 2, DMax] GpuTensor<IType, 2> ruleSize, // number active index, [NTile, KKK] int D, int KH, int KW, int KD, int sH, int sW, int sD, int padH, int padW, int padD, int dH, int dW, int dD, int outTileH, int outTileW) { int tile_n_max = rules.size(3); int B = zPtr.size(0); int H = zPtr.size(1); int W = zPtr.size(2); int oH = grid.size(1); int oW = grid.size(2); int oD = grid.size(3); int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x >= H || y >= W) return; int k = threadIdx.z + blockDim.z * blockIdx.z; int k_H = k / (KW * KD); int k_W = (k / KD) % KW; int k_D = k % KD; int oX = OutSpatial(k_H, x, sH, dH, padH); if (oX < 0 || oX >= oH) return; int oY = OutSpatial(k_W, y, sW, dW, padW); if (oY < 0 || oY >= oW) return; // int TileGridH = divUp(oH, outTileH); int TileGridW = divUp(oW, outTileW); int nTile = getLinearTileIdx(outTileH, outTileW, oX, oY, TileGridW); // printf("nTile(%d), oxy(%d,%d)\n", nTile, oX, oY); // nTile = 1; for (int b = 0; b < B; b++) { int zEnd = zPtr[b][x][y]; int zStart = (b == 0 && x == 0 && y == 0) ? 0 : zPtr[b][x][y - 1]; // diverge here for (int globalInIdx = zStart; globalInIdx < zEnd; globalInIdx++) { int z = zIndices[globalInIdx]; int oZ = OutSpatial(k_D, z, sD, dD, padD); if (oZ < 0 || oZ >= oD) continue; int globalOutIdx = grid[b][oX][oY][oZ]; if (globalOutIdx < 0) continue; // printf(" Tile(%d,%d), in(%d,%d,%d), out(%d,%d,%d), pair(%d,%d), k=%d\n", // oX / outTileH, oY / outTileW, // x, y, z, oX, oY, oZ, // globalInIdx, globalOutIdx, k); int counter = atomicAdd(&ruleSize[nTile][k], 1); if (counter < tile_n_max) { rules[nTile][k][0][counter] = globalInIdx; rules[nTile][k][1][counter] = globalOutIdx; } else { printf("overflow counter:(%d/%d), global i/o:%d/%d, nTile:%d, x:%d, y:%d, k:%d, Tile(%d,%d), inShape(%d,%d,%d), std\n", counter, tile_n_max, globalInIdx, globalOutIdx, nTile, x, y, k, outTileH, outTileW, oH, oW, oD); } } } // b } /** * tile_size: tile_size is on the output feature map. * * * return rules * rules: [NTile, K*K*K, 4, DMax] * * ROADMAP: * 1. only generate global indices * 2. generate both local indices and global indices * * ref: getIndicePair */ std::vector<torch::Tensor> get_rules_subm(const torch::Tensor zIndices, // [NNZ] const torch::Tensor zPtr, // [B, H, W] int batchSize, std::vector<int64_t> spatialShape, // H, W, D std::vector<int64_t> outSpatialShape, // H, W, D std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> tileSize) { torch::Tensor grid = torch::full({batchSize, outSpatialShape[0], outSpatialShape[1], outSpatialShape[2]}, /*value=*/-1, torch::dtype(torch::kInt32).device(zIndices.device())); dim3 gridSize(divUp(spatialShape[0], 16), divUp(spatialShape[1], 32)); dim3 blockSize(16, 32, 1); prepareSubMGridKernel<int32_t><<<gridSize, blockSize>>>( zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>()); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); int64_t kernelVolume = std::accumulate(kernelSize.begin(), kernelSize.end(), 1, std::multiplies<int64_t>()); // number of tiles int NTile = divUp(outSpatialShape[0], tileSize[0]) * divUp(outSpatialShape[1], tileSize[1]); // allocate rules and indice Num int tile_n_max = huristic_tile_n_max( getInTileSize(tileSize[0], stride[0], kernelSize[0]), getInTileSize(tileSize[1], stride[1], kernelSize[1]), batchSize, spatialShape[0], spatialShape[1], spatialShape[2], zIndices.size(0)); // printf("tile_n_max = %d\n", tile_n_max); torch::Tensor rules = torch::full( {NTile, kernelVolume, 2, tile_n_max}, /*value=*/-1, torch::dtype(torch::kInt32).device(zIndices.device())); torch::Tensor ruleSize = torch::zeros( {NTile, kernelVolume}, torch::dtype(torch::kInt32).device(zIndices.device())); gridSize = dim3(divUp(spatialShape[0], 4), divUp(spatialShape[1], 8), kernelVolume); blockSize = dim3(4, 8, 1); getSubMRulesKernel<int32_t><<<gridSize, blockSize>>>( zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), rules.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), ruleSize.packed_accessor32<int32_t, 2, torch::RestrictPtrTraits>(), spatialShape[2], kernelSize[0], kernelSize[1], kernelSize[2], stride[0], stride[1], stride[2], padding[0], padding[1], padding[2], dilation[0], dilation[1], dilation[2], tileSize[0], tileSize[1]); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); return {zIndices, zPtr, rules, ruleSize}; } /** * tile_size: tile_size is on the output feature map. * * * return rules * rules: [NTile, K*K*K, 4, DMax] * * ROADMAP: * 1. only generate global indices * 2. generate both local indices and global indices * * ref: getIndicePair */ std::vector<torch::Tensor> get_rules(const torch::Tensor zIndices, // [NNZ] const torch::Tensor zPtr, // [B, H, W] int batchSize, std::vector<int64_t> spatialShape, // H, W, D std::vector<int64_t> outSpatialShape, // oH, oW, oD std::vector<int64_t> kernelSize, std::vector<int64_t> stride, std::vector<int64_t> padding, std::vector<int64_t> dilation, std::vector<int64_t> tileSize) { torch::Tensor grid = torch::zeros({batchSize, outSpatialShape[0], outSpatialShape[1], outSpatialShape[2]}, torch::dtype(torch::kInt32).device(zIndices.device())); int64_t kernelVolume = std::accumulate(kernelSize.begin(), kernelSize.end(), 1, std::multiplies<int64_t>()); dim3 gridSize(divUp(spatialShape[0], 2), divUp(spatialShape[1], 16), 1); dim3 blockSize(2, 16, kernelVolume); prepareGridKernel<int32_t><<<gridSize, blockSize>>>( zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), kernelSize[0], kernelSize[1], kernelSize[2], stride[0], stride[1], stride[2], padding[0], padding[1], padding[2], dilation[0], dilation[1], dilation[2]); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); grid = torch::cumsum(grid, 3, torch::kInt32); // [B, oH, oW, oD] // std::cout << "grid(2) = " << grid << std::endl; // here we want non inclusive scan, but pytorch only provides this. torch::Tensor ozPtr = torch::cumsum(grid.index({Slice(), Slice(), Slice(), -1}).reshape({-1}), 0, torch::kInt32) .reshape({batchSize, outSpatialShape[0], outSpatialShape[1]}); // [B, oH, oW] torch::Tensor exclusiveScan = ozPtr.roll(1); exclusiveScan.index_put_({0, 0, 0}, 0); grid += exclusiveScan.unsqueeze(-1); // now grid is filled with global output index // std::cout << "grid(3) = " << grid << std::endl; int NTile = divUp(outSpatialShape[0], tileSize[0]) * divUp(outSpatialShape[1], tileSize[1]); int tile_n_max = huristic_tile_n_max( getInTileSize(tileSize[0], stride[0], kernelSize[0]), getInTileSize(tileSize[1], stride[1], kernelSize[1]), batchSize, spatialShape[0], spatialShape[1], spatialShape[2], zIndices.size(0)); torch::Tensor rules = torch::full( {NTile, kernelVolume, 2, tile_n_max}, /*value=*/-1, torch::dtype(torch::kInt32).device(zIndices.device())); torch::Tensor ruleSize = torch::zeros( {NTile, kernelVolume}, torch::dtype(torch::kInt32).device(zIndices.device())); // PRINT_SHAPE(ruleSize); int outNNZ = ozPtr.view({-1}).index({-1}).item<int>(); torch::Tensor ozIndices = torch::empty({outNNZ}, torch::dtype(torch::kInt32).device(zIndices.device())); // PRINT_SHAPE(ozIndices); gridSize = dim3(divUp(spatialShape[0], 2), divUp(spatialShape[1], 16), kernelVolume); blockSize = dim3(2, 16, 1); getOzIndicesAndRulesKernel<int32_t><<<gridSize, blockSize>>>( zIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), ozIndices.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>(), zPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), ozPtr.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(), grid.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), rules.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>(), ruleSize.packed_accessor32<int32_t, 2, torch::RestrictPtrTraits>(), spatialShape[2], kernelSize[0], kernelSize[1], kernelSize[2], stride[0], stride[1], stride[2], padding[0], padding[1], padding[2], dilation[0], dilation[1], dilation[2], tileSize[0], tileSize[1]); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); return {ozIndices, ozPtr, rules, ruleSize}; } } // namespace sphconv
a5ecb9fab3341d6acd895026b5ad73ebd008610d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "test_hashmap.cuh" #include "../hashmap.cuh" #include "../random.cuh" #include "../error.cuh" #include "assert.h" using gpulda::FileLine; using gpulda::f32; using gpulda::i32; using gpulda::u32; using gpulda::u64; // #define GPULDA_HASH_DEBUG 1 #ifdef GPULDA_HASH_DEBUG #include <cstdio> #endif namespace gpulda_test { #ifdef GPULDA_HASH_DEBUG __device__ inline void debug_print_slot(gpulda::HashMap* m, i32 slot) { printf("\nhl:s\tk\tv\tis:st:d\n"); for(i32 s = slot; s < slot + warpSize/2; ++s) { u64 entry = m->data[s % m->capacity]; printf("%d:%d\t%u\t%d\t", s % 16, s % m->capacity, m->key(entry), m->value(entry)); if(entry != m->empty()) printf("%d:%d:%d", m->hash_slot(m->key(entry)), m->hash_stride(m->key(entry)), m->key_distance(m->key(entry), slot)); printf("\n"); } printf("\n"); } #endif __global__ void test_hashmap_init(i32 initial_capacity, i32* map_returned_size) { // allocate map __shared__ hiprandStatePhilox4_32_10_t rng; if(threadIdx.x == 0) { hiprand_init((unsigned long long) 0, (unsigned long long) 0, (unsigned long long) 0, &rng); } __syncthreads(); __shared__ gpulda::HashMap m; m.init(initial_capacity, &rng); __syncthreads(); // check returned capacity if(threadIdx.x == 0) { map_returned_size[0] = m.capacity; } // deallocate map m.deallocate(); } __global__ void test_hashmap_insert_print_steps() { #ifdef GPULDA_HASH_DEBUG // initialize __shared__ hiprandStatePhilox4_32_10_t rng; if(threadIdx.x == 0) { hiprand_init((unsigned long long) 0, (unsigned long long) 0, (unsigned long long) 0, &rng); } __syncthreads(); __shared__ gpulda::HashMap m; m.init(96, &rng); m.a=26; m.b=1; m.c=30; m.d=13; __syncthreads(); if(threadIdx.x < warpSize) { if(threadIdx.x == 0) { debug_print_slot(&m, 0); } if(threadIdx.x == 0) { debug_print_slot(&m, 16); } if(threadIdx.x == 0) { debug_print_slot(&m, 32); } if(threadIdx.x == 0) { debug_print_slot(&m, 48); } if(threadIdx.x == 0) { debug_print_slot(&m, 64); } if(threadIdx.x == 0) { debug_print_slot(&m, 80); } if(threadIdx.x == 0) { printf("------------------------------------------------------------\n"); } // 16 m.insert2(threadIdx.x < 16 ? 0 : 3, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 6 : 9, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 12 : 15, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 18 : 21, 1);if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 24 : 27, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 30 : 33, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 36 : 39, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 42 : 45, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } // 16->32 m.insert2(threadIdx.x < 16 ? 48 : 51, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 54 : 57, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 60 : 63, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 66 : 69, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 72 : 75, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 78 : 81, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 84 : 87, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 90 : 93, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } // 48 m.insert2(threadIdx.x < 16 ? 1 : 4, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 7 : 10, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 13 : 16, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 19 : 22, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 25 : 28, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 31 : 34, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 37 : 40, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 43 : 46, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } // 16->48 evict m.insert2(threadIdx.x < 16 ? 96 : 99, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 102 : 105, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 108 : 111, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 114 : 117, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 120 : 123, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 126 : 129, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 132 : 135, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 138 : 141, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } if(threadIdx.x == 0) { debug_print_slot(&m, 0); } if(threadIdx.x == 0) { debug_print_slot(&m, 16); } if(threadIdx.x == 0) { debug_print_slot(&m, 32); } if(threadIdx.x == 0) { debug_print_slot(&m, 48); } if(threadIdx.x == 0) { debug_print_slot(&m, 64); } if(threadIdx.x == 0) { debug_print_slot(&m, 80); } } m.deallocate(); #endif } __global__ void test_hashmap_insert2(i32 num_unique_elements, i32 num_elements, i32 max_size, i32* out, i32 rebuild) { __shared__ hiprandStatePhilox4_32_10_t rng; if(threadIdx.x == 0) { hiprand_init((unsigned long long) 0, (unsigned long long) 0, (unsigned long long) 0, &rng); } __syncthreads(); __shared__ gpulda::HashMap m; u32 initial_size = rebuild ? num_elements : max_size; m.init(initial_size, &rng); i32 dim = blockDim.x / (warpSize / 2); i32 half_warp_idx = threadIdx.x / (warpSize / 2); i32 half_lane_idx = threadIdx.x % (warpSize / 2); __syncthreads(); // accumulate elements for(i32 offset = 0; offset < num_elements / dim + 1; ++offset) { u32 i = offset * dim + half_warp_idx; m.insert2(i % num_unique_elements, i < num_elements ? 1 : 0); } // ensure insertion finished __syncthreads(); // rebuild if needed if(rebuild == true) { m.resize(0,0); } // ensure rebuild finished __syncthreads(); // retrieve elements for(i32 offset = 0; offset < num_elements / dim + 1; ++offset) { i32 i = offset * dim + half_warp_idx; if(i < num_unique_elements) { u32 element = m.get2(i); if(half_lane_idx == 0) { out[i] = element; } } } // deallocate map m.deallocate(); } void test_hashmap() { constexpr i32 max_size = 100; // will round down to 96 for cache alignment constexpr i32 num_elements = 90; // large contention to ensure collisions occur constexpr i32 num_unique_elements = 9; constexpr i32 warpSize = 32; constexpr i32 expected_size = ((((i32) (((f32) max_size) * GPULDA_HASH_GROWTH_RATE)) + 3*warpSize) / GPULDA_HASH_LINE_SIZE) * GPULDA_HASH_LINE_SIZE; i32* out; hipMalloc(&out, num_elements * sizeof(i32)) >> GPULDA_CHECK; i32* out_host = new i32[num_elements]; // init<warp> hipLaunchKernelGGL(( test_hashmap_init), dim3(1),dim3(warpSize), 0, 0, max_size, out); hipDeviceSynchronize() >> GPULDA_CHECK; hipMemcpy(out_host, out, sizeof(i32), hipMemcpyDeviceToHost) >> GPULDA_CHECK; assert(out_host[0] == expected_size); out_host[0] = 0; // init<block> hipLaunchKernelGGL(( test_hashmap_init), dim3(1),dim3(GPULDA_POLYA_URN_SAMPLE_BLOCKDIM), 0, 0, max_size, out); hipDeviceSynchronize() >> GPULDA_CHECK; hipMemcpy(out_host, out, sizeof(i32), hipMemcpyDeviceToHost) >> GPULDA_CHECK; assert(out_host[0] == expected_size); out_host[0] = 0; // print steps #ifdef GPULDA_HASH_DEBUG hipLaunchKernelGGL(( test_hashmap_insert_print_steps), dim3(1),dim3(warpSize), 0, 0, ); hipDeviceSynchronize() >> GPULDA_CHECK; return; #endif // insert2: warp, no rebuild hipLaunchKernelGGL(( test_hashmap_insert2), dim3(1),dim3(warpSize), 0, 0, num_unique_elements, num_elements, max_size, out, false); hipDeviceSynchronize() >> GPULDA_CHECK; hipMemcpy(out_host, out, num_elements * sizeof(i32), hipMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // insert2: block, no rebuild hipLaunchKernelGGL(( test_hashmap_insert2), dim3(1),dim3(GPULDA_POLYA_URN_SAMPLE_BLOCKDIM), 0, 0, num_unique_elements, num_elements, max_size, out, false); hipDeviceSynchronize() >> GPULDA_CHECK; hipMemcpy(out_host, out, num_elements * sizeof(i32), hipMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // insert2: warp, rebuild hipLaunchKernelGGL(( test_hashmap_insert2), dim3(1),dim3(warpSize), 0, 0, num_unique_elements, num_elements, max_size, out, true); hipDeviceSynchronize() >> GPULDA_CHECK; hipMemcpy(out_host, out, num_elements * sizeof(u32), hipMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // insert2: block, rebuild hipLaunchKernelGGL(( test_hashmap_insert2), dim3(1),dim3(GPULDA_POLYA_URN_SAMPLE_BLOCKDIM), 0, 0, num_unique_elements, num_elements, max_size, out, true); hipDeviceSynchronize() >> GPULDA_CHECK; hipMemcpy(out_host, out, num_elements * sizeof(u32), hipMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // cleanup hipFree(out); delete[] out_host; } }
a5ecb9fab3341d6acd895026b5ad73ebd008610d.cu
#include "test_hashmap.cuh" #include "../hashmap.cuh" #include "../random.cuh" #include "../error.cuh" #include "assert.h" using gpulda::FileLine; using gpulda::f32; using gpulda::i32; using gpulda::u32; using gpulda::u64; // #define GPULDA_HASH_DEBUG 1 #ifdef GPULDA_HASH_DEBUG #include <cstdio> #endif namespace gpulda_test { #ifdef GPULDA_HASH_DEBUG __device__ inline void debug_print_slot(gpulda::HashMap* m, i32 slot) { printf("\nhl:s\tk\tv\tis:st:d\n"); for(i32 s = slot; s < slot + warpSize/2; ++s) { u64 entry = m->data[s % m->capacity]; printf("%d:%d\t%u\t%d\t", s % 16, s % m->capacity, m->key(entry), m->value(entry)); if(entry != m->empty()) printf("%d:%d:%d", m->hash_slot(m->key(entry)), m->hash_stride(m->key(entry)), m->key_distance(m->key(entry), slot)); printf("\n"); } printf("\n"); } #endif __global__ void test_hashmap_init(i32 initial_capacity, i32* map_returned_size) { // allocate map __shared__ curandStatePhilox4_32_10_t rng; if(threadIdx.x == 0) { curand_init((unsigned long long) 0, (unsigned long long) 0, (unsigned long long) 0, &rng); } __syncthreads(); __shared__ gpulda::HashMap m; m.init(initial_capacity, &rng); __syncthreads(); // check returned capacity if(threadIdx.x == 0) { map_returned_size[0] = m.capacity; } // deallocate map m.deallocate(); } __global__ void test_hashmap_insert_print_steps() { #ifdef GPULDA_HASH_DEBUG // initialize __shared__ curandStatePhilox4_32_10_t rng; if(threadIdx.x == 0) { curand_init((unsigned long long) 0, (unsigned long long) 0, (unsigned long long) 0, &rng); } __syncthreads(); __shared__ gpulda::HashMap m; m.init(96, &rng); m.a=26; m.b=1; m.c=30; m.d=13; __syncthreads(); if(threadIdx.x < warpSize) { if(threadIdx.x == 0) { debug_print_slot(&m, 0); } if(threadIdx.x == 0) { debug_print_slot(&m, 16); } if(threadIdx.x == 0) { debug_print_slot(&m, 32); } if(threadIdx.x == 0) { debug_print_slot(&m, 48); } if(threadIdx.x == 0) { debug_print_slot(&m, 64); } if(threadIdx.x == 0) { debug_print_slot(&m, 80); } if(threadIdx.x == 0) { printf("------------------------------------------------------------\n"); } // 16 m.insert2(threadIdx.x < 16 ? 0 : 3, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 6 : 9, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 12 : 15, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 18 : 21, 1);if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 24 : 27, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 30 : 33, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 36 : 39, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 42 : 45, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 16); printf("------------------------------------------------------------\n"); } // 16->32 m.insert2(threadIdx.x < 16 ? 48 : 51, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 54 : 57, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 60 : 63, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 66 : 69, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 72 : 75, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 78 : 81, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 84 : 87, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 90 : 93, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 32); printf("------------------------------------------------------------\n"); } // 48 m.insert2(threadIdx.x < 16 ? 1 : 4, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 7 : 10, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 13 : 16, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 19 : 22, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 25 : 28, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 31 : 34, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 37 : 40, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 43 : 46, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } // 16->48 evict m.insert2(threadIdx.x < 16 ? 96 : 99, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 102 : 105, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 108 : 111, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 114 : 117, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 120 : 123, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 126 : 129, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 132 : 135, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } m.insert2(threadIdx.x < 16 ? 138 : 141, 1); if(threadIdx.x == 0) { debug_print_slot(&m, 48); printf("------------------------------------------------------------\n"); } if(threadIdx.x == 0) { debug_print_slot(&m, 0); } if(threadIdx.x == 0) { debug_print_slot(&m, 16); } if(threadIdx.x == 0) { debug_print_slot(&m, 32); } if(threadIdx.x == 0) { debug_print_slot(&m, 48); } if(threadIdx.x == 0) { debug_print_slot(&m, 64); } if(threadIdx.x == 0) { debug_print_slot(&m, 80); } } m.deallocate(); #endif } __global__ void test_hashmap_insert2(i32 num_unique_elements, i32 num_elements, i32 max_size, i32* out, i32 rebuild) { __shared__ curandStatePhilox4_32_10_t rng; if(threadIdx.x == 0) { curand_init((unsigned long long) 0, (unsigned long long) 0, (unsigned long long) 0, &rng); } __syncthreads(); __shared__ gpulda::HashMap m; u32 initial_size = rebuild ? num_elements : max_size; m.init(initial_size, &rng); i32 dim = blockDim.x / (warpSize / 2); i32 half_warp_idx = threadIdx.x / (warpSize / 2); i32 half_lane_idx = threadIdx.x % (warpSize / 2); __syncthreads(); // accumulate elements for(i32 offset = 0; offset < num_elements / dim + 1; ++offset) { u32 i = offset * dim + half_warp_idx; m.insert2(i % num_unique_elements, i < num_elements ? 1 : 0); } // ensure insertion finished __syncthreads(); // rebuild if needed if(rebuild == true) { m.resize(0,0); } // ensure rebuild finished __syncthreads(); // retrieve elements for(i32 offset = 0; offset < num_elements / dim + 1; ++offset) { i32 i = offset * dim + half_warp_idx; if(i < num_unique_elements) { u32 element = m.get2(i); if(half_lane_idx == 0) { out[i] = element; } } } // deallocate map m.deallocate(); } void test_hashmap() { constexpr i32 max_size = 100; // will round down to 96 for cache alignment constexpr i32 num_elements = 90; // large contention to ensure collisions occur constexpr i32 num_unique_elements = 9; constexpr i32 warpSize = 32; constexpr i32 expected_size = ((((i32) (((f32) max_size) * GPULDA_HASH_GROWTH_RATE)) + 3*warpSize) / GPULDA_HASH_LINE_SIZE) * GPULDA_HASH_LINE_SIZE; i32* out; cudaMalloc(&out, num_elements * sizeof(i32)) >> GPULDA_CHECK; i32* out_host = new i32[num_elements]; // init<warp> test_hashmap_init<<<1,warpSize>>>(max_size, out); cudaDeviceSynchronize() >> GPULDA_CHECK; cudaMemcpy(out_host, out, sizeof(i32), cudaMemcpyDeviceToHost) >> GPULDA_CHECK; assert(out_host[0] == expected_size); out_host[0] = 0; // init<block> test_hashmap_init<<<1,GPULDA_POLYA_URN_SAMPLE_BLOCKDIM>>>(max_size, out); cudaDeviceSynchronize() >> GPULDA_CHECK; cudaMemcpy(out_host, out, sizeof(i32), cudaMemcpyDeviceToHost) >> GPULDA_CHECK; assert(out_host[0] == expected_size); out_host[0] = 0; // print steps #ifdef GPULDA_HASH_DEBUG test_hashmap_insert_print_steps<<<1,warpSize>>>(); cudaDeviceSynchronize() >> GPULDA_CHECK; return; #endif // insert2: warp, no rebuild test_hashmap_insert2<<<1,warpSize>>>(num_unique_elements, num_elements, max_size, out, false); cudaDeviceSynchronize() >> GPULDA_CHECK; cudaMemcpy(out_host, out, num_elements * sizeof(i32), cudaMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // insert2: block, no rebuild test_hashmap_insert2<<<1,GPULDA_POLYA_URN_SAMPLE_BLOCKDIM>>>(num_unique_elements, num_elements, max_size, out, false); cudaDeviceSynchronize() >> GPULDA_CHECK; cudaMemcpy(out_host, out, num_elements * sizeof(i32), cudaMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // insert2: warp, rebuild test_hashmap_insert2<<<1,warpSize>>>(num_unique_elements, num_elements, max_size, out, true); cudaDeviceSynchronize() >> GPULDA_CHECK; cudaMemcpy(out_host, out, num_elements * sizeof(u32), cudaMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // insert2: block, rebuild test_hashmap_insert2<<<1,GPULDA_POLYA_URN_SAMPLE_BLOCKDIM>>>(num_unique_elements, num_elements, max_size, out, true); cudaDeviceSynchronize() >> GPULDA_CHECK; cudaMemcpy(out_host, out, num_elements * sizeof(u32), cudaMemcpyDeviceToHost) >> GPULDA_CHECK; for(i32 i = 0; i < num_unique_elements; ++i) { assert(out_host[i] == num_elements / num_unique_elements); out_host[i] = 0; } // cleanup cudaFree(out); delete[] out_host; } }
dbf5328db6ce103c22b1f52b27bc02c3b28a6254.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template<typename Dtype> __global__ void SoftmaxLossForwardGPU(const int_tp nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log( max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } #endif // USE_ROCM template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() >= 2) { top[1]->ShareData(prob_); } #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem loss_data = (cl_mem) (bottom[0]->mutable_gpu_diff()); cl_mem counts = (cl_mem) (prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_forward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_forward")); viennacl::ocl::enqueue( oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, &ctx), WrapHandle(label, &ctx), WrapHandle(loss_data, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype loss; greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, loss_data, 0, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() >= 2) { top[1]->ShareData(prob_); } #endif // USE_GREENTEA } } #ifdef USE_ROCM template<typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int_tp nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { const int_tp channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int_tp c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } #endif // USE_ROCM template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS) (nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); cl_mem prob_data = (cl_mem)(prob_.gpu_data()); cl_mem top_data = (cl_mem)(top[0]->gpu_data()); greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem counts = (cl_mem)(prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_backward")); viennacl::ocl::enqueue( oclk_softmax_loss_backward(nthreads, WrapHandle(top_data, &ctx), WrapHandle(label, &ctx), WrapHandle(bottom_diff, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); greentea_gpu_scal<Dtype>(this->device_->id(), prob_.count(), loss_weight, bottom_diff, 0); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
dbf5328db6ce103c22b1f52b27bc02c3b28a6254.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template<typename Dtype> __global__ void SoftmaxLossForwardGPU(const int_tp nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log( max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } #endif // USE_CUDA template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS)(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() >= 2) { top[1]->ShareData(prob_); } #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem prob_data = (cl_mem) (prob_.gpu_data()); cl_mem label = (cl_mem) (bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem loss_data = (cl_mem) (bottom[0]->mutable_gpu_diff()); cl_mem counts = (cl_mem) (prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_forward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_forward")); viennacl::ocl::enqueue( oclk_softmax_loss_forward(nthreads, WrapHandle(prob_data, &ctx), WrapHandle(label, &ctx), WrapHandle(loss_data, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype loss; greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, loss_data, 0, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() >= 2) { top[1]->ShareData(prob_); } #endif // USE_GREENTEA } } #ifdef USE_CUDA template<typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int_tp nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int_tp num, const int_tp dim, const int_tp spatial_dim, const bool has_ignore_label_, const int_tp ignore_label_, Dtype* counts) { const int_tp channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int_tp n = index / spatial_dim; const int_tp s = index % spatial_dim; const int_tp label_value = static_cast<int_tp>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int_tp c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } #endif // USE_CUDA template<typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS) (nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = this->device_->program(); cl_mem bottom_diff = (cl_mem)(bottom[0]->mutable_gpu_diff()); cl_mem prob_data = (cl_mem)(prob_.gpu_data()); cl_mem top_data = (cl_mem)(top[0]->gpu_data()); greentea_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, 0, bottom_diff, 0, &ctx); cl_mem label = (cl_mem)(bottom[1]->gpu_data()); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; cl_mem counts = (cl_mem)(prob_.mutable_gpu_diff()); viennacl::ocl::kernel &oclk_softmax_loss_backward = program.get_kernel( CL_KERNEL_SELECT("softmax_loss_backward")); viennacl::ocl::enqueue( oclk_softmax_loss_backward(nthreads, WrapHandle(top_data, &ctx), WrapHandle(label, &ctx), WrapHandle(bottom_diff, &ctx), outer_num_, dim, inner_num_, has_ignore_label_ ? 1 : 0, ignore_label_, WrapHandle(counts, &ctx)), ctx.get_queue()); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { greentea_gpu_asum<Dtype>(this->device_->id(), nthreads, counts, 0, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); greentea_gpu_scal<Dtype>(this->device_->id(), prob_.count(), loss_weight, bottom_diff, 0); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
d383d10e7014169e9fadee252027da24cde3afc5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include <iostream> #include <assert.h> #include <stdlib.h> #include <random> #define show(x) std::cout << #x ": " << x << std::endl; #define BLOCKSIZE 128 __global__ void pi(float *blockSums, int stepsPerThread, float dx) { __shared__ float threadSums[BLOCKSIZE]; int id = threadIdx.x + blockDim.x * blockIdx.x; int istart = id * stepsPerThread; int istop = istart + stepsPerThread; float accum = 0.0f; for (int i = istart; i < istop; i++) { float x = (i + 0.5f) * dx; accum += 4.0f / (1.0f + x*x); } threadSums[threadIdx.x] = accum; __syncthreads(); if (threadIdx.x == 0) { float blockSum = 0.0f; for (int j = 0; j < blockDim.x; j++) { blockSum += threadSums[j]; } blockSums[blockIdx.x] = blockSum; } } int main() { hipError_t err; const int stepsPerThread = 512 * 2 * 2; const int blockSize = BLOCKSIZE; const int numBlocks = 256; const int numSteps = blockSize * numBlocks * stepsPerThread; const float dx = 1.0f / numSteps; float *h_blockSums = (float *)malloc(sizeof(float) * numBlocks); float *d_blockSums; err = hipMalloc((void**)&d_blockSums, sizeof(float) * numBlocks); assert(err == hipSuccess); err = hipMemcpy(d_blockSums, h_blockSums, sizeof(float) * numBlocks, hipMemcpyHostToDevice); assert(err == hipSuccess); hipLaunchKernelGGL(( pi), dim3(numBlocks), dim3(blockSize), 0, 0, d_blockSums, stepsPerThread, dx); err = hipMemcpy(h_blockSums, d_blockSums, sizeof(float) * numBlocks, hipMemcpyDeviceToHost); assert(err == hipSuccess); float pi = 0.0f; for (int i = 0; i < numBlocks; i++) pi += h_blockSums[i]; pi *= dx; printf("pi approximately equals: %f\n", pi); hipFree(d_blockSums); free(h_blockSums); return 0; }
d383d10e7014169e9fadee252027da24cde3afc5.cu
#include <fstream> #include <iostream> #include <assert.h> #include <stdlib.h> #include <random> #define show(x) std::cout << #x ": " << x << std::endl; #define BLOCKSIZE 128 __global__ void pi(float *blockSums, int stepsPerThread, float dx) { __shared__ float threadSums[BLOCKSIZE]; int id = threadIdx.x + blockDim.x * blockIdx.x; int istart = id * stepsPerThread; int istop = istart + stepsPerThread; float accum = 0.0f; for (int i = istart; i < istop; i++) { float x = (i + 0.5f) * dx; accum += 4.0f / (1.0f + x*x); } threadSums[threadIdx.x] = accum; __syncthreads(); if (threadIdx.x == 0) { float blockSum = 0.0f; for (int j = 0; j < blockDim.x; j++) { blockSum += threadSums[j]; } blockSums[blockIdx.x] = blockSum; } } int main() { cudaError_t err; const int stepsPerThread = 512 * 2 * 2; const int blockSize = BLOCKSIZE; const int numBlocks = 256; const int numSteps = blockSize * numBlocks * stepsPerThread; const float dx = 1.0f / numSteps; float *h_blockSums = (float *)malloc(sizeof(float) * numBlocks); float *d_blockSums; err = cudaMalloc((void**)&d_blockSums, sizeof(float) * numBlocks); assert(err == cudaSuccess); err = cudaMemcpy(d_blockSums, h_blockSums, sizeof(float) * numBlocks, cudaMemcpyHostToDevice); assert(err == cudaSuccess); pi<<<numBlocks, blockSize>>> (d_blockSums, stepsPerThread, dx); err = cudaMemcpy(h_blockSums, d_blockSums, sizeof(float) * numBlocks, cudaMemcpyDeviceToHost); assert(err == cudaSuccess); float pi = 0.0f; for (int i = 0; i < numBlocks; i++) pi += h_blockSums[i]; pi *= dx; printf("pi approximately equals: %f\n", pi); cudaFree(d_blockSums); free(h_blockSums); return 0; }
f95cea636f523571dddf75e1367e2f632d0c54f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> int I = 500; int J = 500; int K = 500; __global__ void mul(int I, int J, int K, float *x, float *y, float *z) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int q=index; q<I*K; q+=stride) { int i = q / K; int k = q % K; z[q] = 0.0f; for(int j=0; j<J; j++) { z[q] += x[i*J+j] * y[j*K+k]; } } } int main(void) { setbuf(stdout, NULL); printf("Start\n"); float *x, *y, *z; hipMallocManaged(&x, I*J*sizeof(float)); hipMallocManaged(&y, J*K*sizeof(float)); hipMallocManaged(&z, I*K*sizeof(float)); for(int i = 0; i < I*J; i++) { x[i] = 1.0f; } for(int i = 0; i < J*K; i++) { y[i] = 2.0f; } int blockSize = 1; //512; int numBlocks = 1; //min(65535, (I*K + blockSize - 1) / blockSize); printf("Number %f\n", x[0]); hipLaunchKernelGGL(( mul), dim3(numBlocks), dim3(blockSize), 0, 0, I, J, K, x, y, z); printf("Number %f\n", x[0]); hipError_t cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); // for(int i = 0; i < N; i++) // { // if(fabs(y[i] - 3.0f)>0.000000001) { // printf("Wrong! %d %f", i, y[i]); // break; // } // } printf("Number %f\n", x[0]); hipFree(x); hipFree(y); hipFree(z); }
f95cea636f523571dddf75e1367e2f632d0c54f9.cu
#include <iostream> #include <math.h> int I = 500; int J = 500; int K = 500; __global__ void mul(int I, int J, int K, float *x, float *y, float *z) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int q=index; q<I*K; q+=stride) { int i = q / K; int k = q % K; z[q] = 0.0f; for(int j=0; j<J; j++) { z[q] += x[i*J+j] * y[j*K+k]; } } } int main(void) { setbuf(stdout, NULL); printf("Start\n"); float *x, *y, *z; cudaMallocManaged(&x, I*J*sizeof(float)); cudaMallocManaged(&y, J*K*sizeof(float)); cudaMallocManaged(&z, I*K*sizeof(float)); for(int i = 0; i < I*J; i++) { x[i] = 1.0f; } for(int i = 0; i < J*K; i++) { y[i] = 2.0f; } int blockSize = 1; //512; int numBlocks = 1; //min(65535, (I*K + blockSize - 1) / blockSize); printf("Number %f\n", x[0]); mul<<<numBlocks, blockSize>>>(I, J, K, x, y, z); printf("Number %f\n", x[0]); cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); // for(int i = 0; i < N; i++) // { // if(fabs(y[i] - 3.0f)>0.000000001) { // printf("Wrong! %d %f", i, y[i]); // break; // } // } printf("Number %f\n", x[0]); cudaFree(x); cudaFree(y); cudaFree(z); }
b0db0300f360bf954460a42e46996df1c06d54f1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #define tam 1.0 #define dx 0.00001 #define dt 0.000001 #define T 0.01 #define kappa 0.000045 __global__ void Inicializacao( double *uprev, const int n ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; double x = idx * dx; if( idx < n + 1 ) { if( x <= 0.5 ) { uprev[ idx ] = 200 * x; } else { uprev[ idx ] = 200 * ( 1. - x ); } } } __global__ void Atualiza( double *u, double *u_prev, const int n ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx == 0 ) { u[ 0 ] = u[ n ] = 0.; /* forca condicao de contorno */ } else if( idx < n ) { u[ idx ] = u_prev[ idx ] + kappa * dt / ( dx * dx ) * ( u_prev[ idx - 1 ] - 2 * u_prev[ idx ] + u_prev[ idx + 1 ] ); } } __global__ void Maximo( double *input, double *results, int n ) { extern __shared__ double sdata[]; int idx = blockIdx.x * blockDim.x + threadIdx.x, tx = threadIdx.x; double x = 0.; if( idx < n ) { x = input[ idx ]; } sdata[ tx ] = x; __syncthreads( ); for( int offset = blockDim.x / 2; offset > 0; offset >>= 1 ) { if( tx < offset ) { if( sdata[ tx ] < sdata[ tx + offset ] ) { sdata[ tx ] = sdata[ tx + offset ]; } } __syncthreads( ); } if( threadIdx.x == 0 ) { results[ blockIdx.x ] = sdata[ 0 ]; } } int main( void ) { double *tmp; double *u_prev_d, *u_d, *max_d; double t; long int n; /* Claculando quantidade de pontos */ n = tam / dx; int blockSize = 256; int gridSize = ceil( n / ( float ) blockSize ); printf( "Size: %d, numBlks: %d, numThds: %d, mult: %d\n", n, gridSize, blockSize, gridSize * blockSize ); hipMalloc( ( void** ) &u_d, ( n + 1 ) * sizeof( double ) ); hipMalloc( ( void** ) &u_prev_d, ( n + 1 ) * sizeof( double ) ); hipMalloc( ( void** ) &max_d, (gridSize + 1 ) * sizeof( double ) ); printf( "Inicio: qtde=%ld, dt=%g, dx=%g, dx=%g, kappa=%f, const=%f\n", ( n + 1 ), dt, dx, dx * dx, kappa, kappa * dt / ( dx * dx ) ); printf( "Iteracoes previstas: %g\n", T / dt ); double start = omp_get_wtime( ); hipLaunchKernelGGL(( Inicializacao) , dim3(gridSize), dim3(blockSize) , 0, 0, u_prev_d, n ); printf( "\tInicializacao: %f\n", omp_get_wtime( ) - start ); /* hipMemcpy( u_prev, u_prev_d, ( n + 1 ) * sizeof( double ), hipMemcpyDeviceToHost ); */ /* * for( i = 0; i < n + 1; i++ ) { * printf( "%.2f ", u_prev[ i ] ); * } * printf( "\n" ); */ /* * x = ( n + 1 ) * dx; * printf( "%f\n", x ); * printf( "dx=%g, x=%g, x-dx=%g\n", dx, x, x - dx ); * printf( "u_prev[0,1]=%g, %g\n", u_prev[ 0 ], u_prev[ 1 ] ); * printf( "u_prev[n-1,n]=%g, %g\n", u_prev[ n - 1 ], u_prev[ n ] ); */ t = 0.; while( t < T ) { hipLaunchKernelGGL(( Atualiza) , dim3(gridSize), dim3(blockSize) , 0, 0, u_d, u_prev_d, n ); tmp = u_prev_d; u_prev_d = u_d; u_d = tmp; /* troca entre ponteiros */ t += dt; } int smem_sz = blockSize * sizeof( double ); hipLaunchKernelGGL(( Maximo) , dim3(gridSize), dim3(blockSize), smem_sz , 0, u_d, max_d, n ); hipLaunchKernelGGL(( Maximo) , dim3(1), dim3(blockSize), smem_sz , 0, max_d, max_d + gridSize, gridSize ); double maxval; hipMemcpy( &maxval, max_d + gridSize, sizeof( double ), hipMemcpyDeviceToHost ); printf( "\tTempo total : %f \n", omp_get_wtime( ) - start ); printf( "Maior valor = %g\n", maxval ); return 0; }
b0db0300f360bf954460a42e46996df1c06d54f1.cu
#include <cuda.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #define tam 1.0 #define dx 0.00001 #define dt 0.000001 #define T 0.01 #define kappa 0.000045 __global__ void Inicializacao( double *uprev, const int n ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; double x = idx * dx; if( idx < n + 1 ) { if( x <= 0.5 ) { uprev[ idx ] = 200 * x; } else { uprev[ idx ] = 200 * ( 1. - x ); } } } __global__ void Atualiza( double *u, double *u_prev, const int n ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx == 0 ) { u[ 0 ] = u[ n ] = 0.; /* forca condicao de contorno */ } else if( idx < n ) { u[ idx ] = u_prev[ idx ] + kappa * dt / ( dx * dx ) * ( u_prev[ idx - 1 ] - 2 * u_prev[ idx ] + u_prev[ idx + 1 ] ); } } __global__ void Maximo( double *input, double *results, int n ) { extern __shared__ double sdata[]; int idx = blockIdx.x * blockDim.x + threadIdx.x, tx = threadIdx.x; double x = 0.; if( idx < n ) { x = input[ idx ]; } sdata[ tx ] = x; __syncthreads( ); for( int offset = blockDim.x / 2; offset > 0; offset >>= 1 ) { if( tx < offset ) { if( sdata[ tx ] < sdata[ tx + offset ] ) { sdata[ tx ] = sdata[ tx + offset ]; } } __syncthreads( ); } if( threadIdx.x == 0 ) { results[ blockIdx.x ] = sdata[ 0 ]; } } int main( void ) { double *tmp; double *u_prev_d, *u_d, *max_d; double t; long int n; /* Claculando quantidade de pontos */ n = tam / dx; int blockSize = 256; int gridSize = ceil( n / ( float ) blockSize ); printf( "Size: %d, numBlks: %d, numThds: %d, mult: %d\n", n, gridSize, blockSize, gridSize * blockSize ); cudaMalloc( ( void** ) &u_d, ( n + 1 ) * sizeof( double ) ); cudaMalloc( ( void** ) &u_prev_d, ( n + 1 ) * sizeof( double ) ); cudaMalloc( ( void** ) &max_d, (gridSize + 1 ) * sizeof( double ) ); printf( "Inicio: qtde=%ld, dt=%g, dx=%g, dx²=%g, kappa=%f, const=%f\n", ( n + 1 ), dt, dx, dx * dx, kappa, kappa * dt / ( dx * dx ) ); printf( "Iteracoes previstas: %g\n", T / dt ); double start = omp_get_wtime( ); Inicializacao <<< gridSize, blockSize >>> ( u_prev_d, n ); printf( "\tInicializacao: %f\n", omp_get_wtime( ) - start ); /* cudaMemcpy( u_prev, u_prev_d, ( n + 1 ) * sizeof( double ), cudaMemcpyDeviceToHost ); */ /* * for( i = 0; i < n + 1; i++ ) { * printf( "%.2f ", u_prev[ i ] ); * } * printf( "\n" ); */ /* * x = ( n + 1 ) * dx; * printf( "%f\n", x ); * printf( "dx=%g, x=%g, x-dx=%g\n", dx, x, x - dx ); * printf( "u_prev[0,1]=%g, %g\n", u_prev[ 0 ], u_prev[ 1 ] ); * printf( "u_prev[n-1,n]=%g, %g\n", u_prev[ n - 1 ], u_prev[ n ] ); */ t = 0.; while( t < T ) { Atualiza <<< gridSize, blockSize >>> ( u_d, u_prev_d, n ); tmp = u_prev_d; u_prev_d = u_d; u_d = tmp; /* troca entre ponteiros */ t += dt; } int smem_sz = blockSize * sizeof( double ); Maximo <<< gridSize, blockSize, smem_sz >>> ( u_d, max_d, n ); Maximo <<< 1, blockSize, smem_sz >>> ( max_d, max_d + gridSize, gridSize ); double maxval; cudaMemcpy( &maxval, max_d + gridSize, sizeof( double ), cudaMemcpyDeviceToHost ); printf( "\tTempo total : %f \n", omp_get_wtime( ) - start ); printf( "Maior valor = %g\n", maxval ); return 0; }
758155ea9287f42b9d5e30851c03adbcb36b1634.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_helper.h" #include "ffthelper.h" #include "utils.h" #include <hip/hip_runtime.h> #include <hipfft.h> #include <hipfftXt.h> #include <math.h> #include "hip/hip_complex.h" #include <complex> #include <vector> using ComplexVec = std::vector<std::complex<float>>; namespace refft { // Modular multiplication a * N mod p // In: a[np][N] __device__ cuFloatComplex twiddle(const float expr) { cuFloatComplex res; float s, c; sincosf(expr, &s, &c); res.x = c; res.y = s; return res; } __device__ void butt_fft(cuFloatComplex *a, cuFloatComplex *b, cuFloatComplex w) { cuFloatComplex U = cuCmulf(*b, w); *b = cuCsubf(*a, U); *a = cuCaddf(*a, U); } __global__ void Fft(cuFloatComplex *a, const int m, const int N, const int num_images) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N / 2) * num_images; i += blockDim.x * gridDim.x) { // index in N/2 range int N_idx = i % (N / 2); int image_idx = i / (N / 2); // i'th block int m_idx = N_idx / m; // base address cuFloatComplex *a_np = a + image_idx * N; int t_idx = N_idx % m; cuFloatComplex *a_x = a_np + 2 * m_idx * m + t_idx; cuFloatComplex *a_y = a_x + m; cuFloatComplex w = twiddle(-M_PI * (double)t_idx / (double)m); butt_fft(a_x, a_y, w); } } __device__ void butt_ifft(cuFloatComplex *a, cuFloatComplex *b, cuFloatComplex w) { cuFloatComplex T = cuCsubf(*a, *b); *a = cuCaddf(*a, *b); (*a).x /= 2.0; (*a).y /= 2.0; *b = cuCmulf(T, w); (*b).x /= 2.0; (*b).y /= 2.0; } __global__ void Ifft(cuFloatComplex *a, const int m, const int N, const int num_images) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N / 2) * num_images; i += blockDim.x * gridDim.x) { // index in N/2 range int N_idx = i % (N / 2); int image_idx = i / (N / 2); // i'th block int m_idx = N_idx / m; // base address cuFloatComplex *a_np = a + image_idx * N; int t_idx = N_idx % m; cuFloatComplex *a_x = a_np + 2 * m_idx * m + t_idx; cuFloatComplex *a_y = a_x + m; cuFloatComplex w = twiddle(M_PI * (double)t_idx / (double)m); butt_ifft(a_x, a_y, w); } } __global__ void bitReverse(std::complex<float> *a, int N, int num_images) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N) * num_images; i += blockDim.x * gridDim.x) { int logN = __log2f(N); int N_idx = i % N; int image_idx = i / N; std::complex<float> *a_x = a + N * image_idx; int revN = __brev(N_idx) >> (32 - logN); if (revN > N_idx) { std::complex<float> temp = a_x[N_idx]; a_x[N_idx] = a_x[revN]; a_x[revN] = temp; } } } __device__ cuFloatComplex Cmul(cuFloatComplex a, cuFloatComplex b) { float temp = double(a.x) * b.x - double(a.y) * b.y; float temp2 = double(a.x) * b.y + double(a.y) * b.x; cuFloatComplex res; res.x = temp; res.y = temp2; return res; } __global__ void Hadamard(cuFloatComplex *a, cuFloatComplex *b, int N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N); i += blockDim.x * gridDim.x) { int N_idx = i % N; cuFloatComplex *a_x = a; cuFloatComplex *b_x = b; a_x[N_idx] = Cmul(a_x[N_idx], b_x[N_idx]); } } void FftHelper::ExecFft(std::complex<float> *a, const int N, const int num_images) { dim3 blockDim(refft::FFTblocksize); dim3 gridDim(N/2/refft::FFTblocksize); hipLaunchKernelGGL(( bitReverse), dim3(gridDim), dim3(blockDim), 0, 0, a,N, num_images); for (int i = 1; i < N; i *= 2) { hipLaunchKernelGGL(( Fft), dim3(gridDim), dim3(blockDim), 0, 0, (cuFloatComplex *)a, i, N, num_images); CudaCheckError(); } CudaCheckError(); } void FftHelper::ExecIfft(std::complex<float> *a, const int N, const int num_images) { dim3 blockDim(refft::iFFTblocksize); dim3 gridDim(N/2/refft::iFFTblocksize); for (int i = N / 2; i > 0; i >>= 1) { hipLaunchKernelGGL(( Ifft), dim3(gridDim), dim3(blockDim), 0, 0, (cuFloatComplex *)a, i, N, num_images); } hipLaunchKernelGGL(( bitReverse), dim3(gridDim), dim3(blockDim), 0, 0, a, N, num_images); CudaCheckError(); } void FftHelper::Mult(std::complex<float> *a, std::complex<float> *b, int N) { dim3 blockDim(refft::iFFTblocksize); dim3 gridDim(N/refft::iFFTblocksize); hipLaunchKernelGGL(( Hadamard), dim3(gridDim), dim3(blockDim), 0, 0, (cuFloatComplex*)a,(cuFloatComplex*)b, N); CudaCheckError(); } void FftHelper::ExecCUFFT(std::complex<float> *a, const int N, const int num_images) { hipfftHandle plan; //int dim[1] = {N}; //hipfftPlanMany(&plan,1,dim,NULL,1,1,NULL,1,1,HIPFFT_C2C,num_images); { CudaTimer t("Planning"); if(hipfftPlan1d(&plan,N,HIPFFT_C2C,num_images)!=HIPFFT_SUCCESS){ std::cout << "CUFFT ERROR : PLAN ERROR" << std::endl; return; } } { CudaTimer t("Execution"); if(hipfftExecC2C(plan,(hipComplex *) a, (hipComplex *) a, HIPFFT_FORWARD)!=HIPFFT_SUCCESS){ std::cout << "CUFFT ERROR : CUFFT ERROR" << std::endl; return; } } } void FftHelper::ExecCUIFFT(std::complex<float> *a, const int N, const int num_images) { hipfftHandle plan; if(hipfftPlan1d(&plan,N,HIPFFT_C2C,num_images)!=HIPFFT_SUCCESS){ std::cout << "CUFFT ERROR : PLAN ERROR" << std::endl; return; } if(hipfftExecC2C(plan,(hipComplex *) a, (hipComplex *) a, HIPFFT_BACKWARD)!=HIPFFT_SUCCESS){ std::cout << "CUFFT ERROR : CUFFT ERROR" << std::endl; return; } } } // namespace refft
758155ea9287f42b9d5e30851c03adbcb36b1634.cu
#include "cuda_helper.h" #include "ffthelper.h" #include "utils.h" #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> #include <math.h> #include "cuComplex.h" #include <complex> #include <vector> using ComplexVec = std::vector<std::complex<float>>; namespace refft { // Modular multiplication a * N mod p // In: a[np][N] __device__ cuFloatComplex twiddle(const float expr) { cuFloatComplex res; float s, c; sincosf(expr, &s, &c); res.x = c; res.y = s; return res; } __device__ void butt_fft(cuFloatComplex *a, cuFloatComplex *b, cuFloatComplex w) { cuFloatComplex U = cuCmulf(*b, w); *b = cuCsubf(*a, U); *a = cuCaddf(*a, U); } __global__ void Fft(cuFloatComplex *a, const int m, const int N, const int num_images) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N / 2) * num_images; i += blockDim.x * gridDim.x) { // index in N/2 range int N_idx = i % (N / 2); int image_idx = i / (N / 2); // i'th block int m_idx = N_idx / m; // base address cuFloatComplex *a_np = a + image_idx * N; int t_idx = N_idx % m; cuFloatComplex *a_x = a_np + 2 * m_idx * m + t_idx; cuFloatComplex *a_y = a_x + m; cuFloatComplex w = twiddle(-M_PI * (double)t_idx / (double)m); butt_fft(a_x, a_y, w); } } __device__ void butt_ifft(cuFloatComplex *a, cuFloatComplex *b, cuFloatComplex w) { cuFloatComplex T = cuCsubf(*a, *b); *a = cuCaddf(*a, *b); (*a).x /= 2.0; (*a).y /= 2.0; *b = cuCmulf(T, w); (*b).x /= 2.0; (*b).y /= 2.0; } __global__ void Ifft(cuFloatComplex *a, const int m, const int N, const int num_images) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N / 2) * num_images; i += blockDim.x * gridDim.x) { // index in N/2 range int N_idx = i % (N / 2); int image_idx = i / (N / 2); // i'th block int m_idx = N_idx / m; // base address cuFloatComplex *a_np = a + image_idx * N; int t_idx = N_idx % m; cuFloatComplex *a_x = a_np + 2 * m_idx * m + t_idx; cuFloatComplex *a_y = a_x + m; cuFloatComplex w = twiddle(M_PI * (double)t_idx / (double)m); butt_ifft(a_x, a_y, w); } } __global__ void bitReverse(std::complex<float> *a, int N, int num_images) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N) * num_images; i += blockDim.x * gridDim.x) { int logN = __log2f(N); int N_idx = i % N; int image_idx = i / N; std::complex<float> *a_x = a + N * image_idx; int revN = __brev(N_idx) >> (32 - logN); if (revN > N_idx) { std::complex<float> temp = a_x[N_idx]; a_x[N_idx] = a_x[revN]; a_x[revN] = temp; } } } __device__ cuFloatComplex Cmul(cuFloatComplex a, cuFloatComplex b) { float temp = double(a.x) * b.x - double(a.y) * b.y; float temp2 = double(a.x) * b.y + double(a.y) * b.x; cuFloatComplex res; res.x = temp; res.y = temp2; return res; } __global__ void Hadamard(cuFloatComplex *a, cuFloatComplex *b, int N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (N); i += blockDim.x * gridDim.x) { int N_idx = i % N; cuFloatComplex *a_x = a; cuFloatComplex *b_x = b; a_x[N_idx] = Cmul(a_x[N_idx], b_x[N_idx]); } } void FftHelper::ExecFft(std::complex<float> *a, const int N, const int num_images) { dim3 blockDim(refft::FFTblocksize); dim3 gridDim(N/2/refft::FFTblocksize); bitReverse<<<gridDim, blockDim>>>(a,N, num_images); for (int i = 1; i < N; i *= 2) { Fft<<<gridDim, blockDim>>>((cuFloatComplex *)a, i, N, num_images); CudaCheckError(); } CudaCheckError(); } void FftHelper::ExecIfft(std::complex<float> *a, const int N, const int num_images) { dim3 blockDim(refft::iFFTblocksize); dim3 gridDim(N/2/refft::iFFTblocksize); for (int i = N / 2; i > 0; i >>= 1) { Ifft<<<gridDim, blockDim>>>((cuFloatComplex *)a, i, N, num_images); } bitReverse<<<gridDim, blockDim>>>(a, N, num_images); CudaCheckError(); } void FftHelper::Mult(std::complex<float> *a, std::complex<float> *b, int N) { dim3 blockDim(refft::iFFTblocksize); dim3 gridDim(N/refft::iFFTblocksize); Hadamard<<<gridDim, blockDim>>>((cuFloatComplex*)a,(cuFloatComplex*)b, N); CudaCheckError(); } void FftHelper::ExecCUFFT(std::complex<float> *a, const int N, const int num_images) { cufftHandle plan; //int dim[1] = {N}; //cufftPlanMany(&plan,1,dim,NULL,1,1,NULL,1,1,CUFFT_C2C,num_images); { CudaTimer t("Planning"); if(cufftPlan1d(&plan,N,CUFFT_C2C,num_images)!=CUFFT_SUCCESS){ std::cout << "CUFFT ERROR : PLAN ERROR" << std::endl; return; } } { CudaTimer t("Execution"); if(cufftExecC2C(plan,(cuComplex *) a, (cuComplex *) a, CUFFT_FORWARD)!=CUFFT_SUCCESS){ std::cout << "CUFFT ERROR : CUFFT ERROR" << std::endl; return; } } } void FftHelper::ExecCUIFFT(std::complex<float> *a, const int N, const int num_images) { cufftHandle plan; if(cufftPlan1d(&plan,N,CUFFT_C2C,num_images)!=CUFFT_SUCCESS){ std::cout << "CUFFT ERROR : PLAN ERROR" << std::endl; return; } if(cufftExecC2C(plan,(cuComplex *) a, (cuComplex *) a, CUFFT_INVERSE)!=CUFFT_SUCCESS){ std::cout << "CUFFT ERROR : CUFFT ERROR" << std::endl; return; } } } // namespace refft
2ab2521840034c93377f84bbeb71f1bbdd9bfbf1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendona Lopes This file is part of GPUMLib. GPUMLib is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "NMFkernels.h" namespace GPUMLib { //! \addtogroup nmfkernels Non-negative Matrix Factorization kernels //! @{ // NMF_AditiveEuclidianDistance kernels KERNEL UpdateMatrix_AE(cudafloat * X, cudafloat * deltaX1, cudafloat * deltaX2, int elements) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < elements) { cudafloat v = X[idx] + (X[idx] / deltaX2[idx]) * (deltaX1[idx] - deltaX2[idx]); if (v < CUDA_VALUE(0.0)) v = CUDA_VALUE(0.0); X[idx] = v; } } // NMF_MultiplicativeEuclidianDistance kernels KERNEL UpdateMatrix_ME(cudafloat * nm, cudafloat * dm, cudafloat * m, int elements) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < elements) m[idx] *= nm[idx] / (dm[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR); } // NMF_MultiplicativeDivergence kernels #ifdef ROW_MAJOR_H #define HMATRIX(_ROW, _COL, _R, _M) (H[(_ROW) * (_M) + (_COL)]) #else #define HMATRIX(_ROW, _COL, _R, _M) (H[(_COL) * (_R) + (_ROW)]) #endif #ifdef ROW_MAJOR_W #define WMATRIX(_ROW, _COL, _N, _R) (W[(_ROW) * (_R) + (_COL)]) #else #define WMATRIX(_ROW, _COL, _N, _R) (W[(_COL) * (_N) + (_ROW)]) #endif template <int blockSize> KERNEL SumW(cudafloat * W, int n, cudafloat * sumW) { extern __shared__ cudafloat w[]; w[threadIdx.x] = CUDA_VALUE(0.0); for(int k = threadIdx.x; k < n; k += blockSize) { w[threadIdx.x] += WMATRIX(k, blockIdx.x, n, gridDim.x); } __syncthreads(); if (blockSize >= 1024) { if (threadIdx.x < 512) w[threadIdx.x] += w[threadIdx.x + 512]; __syncthreads(); } if (blockSize >= 512) { if (threadIdx.x < 256) w[threadIdx.x] += w[threadIdx.x + 256]; __syncthreads(); } if (blockSize >= 256) { if (threadIdx.x < 128) w[threadIdx.x] += w[threadIdx.x + 128]; __syncthreads(); } if (blockSize >= 128) { if (threadIdx.x < 64) w[threadIdx.x] += w[threadIdx.x + 64]; __syncthreads(); } if (threadIdx.x < 32) { volatile cudafloat * _w = w; if (blockSize >= 64) _w[threadIdx.x] += _w[threadIdx.x + 32]; if (blockSize >= 32) _w[threadIdx.x] += _w[threadIdx.x + 16]; if (blockSize >= 16) _w[threadIdx.x] += _w[threadIdx.x + 8]; if (blockSize >= 8) _w[threadIdx.x] += _w[threadIdx.x + 4]; if (blockSize >= 4) _w[threadIdx.x] += _w[threadIdx.x + 2]; if (blockSize >= 2) _w[threadIdx.x] += _w[threadIdx.x + 1]; if (threadIdx.x == 0) { cudafloat sum = w[0]; if (sum < SMALL_VALUE_TO_ADD_DENOMINATOR) sum = SMALL_VALUE_TO_ADD_DENOMINATOR; sumW[blockIdx.x] = sum; } } } void KernelSumW(int blockSize, cudafloat * W, int n, int r, cudafloat * sumW) { switch(blockSize) { #ifdef FERMI case 1024: hipLaunchKernelGGL(( SumW<1024>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; #endif case 512: hipLaunchKernelGGL(( SumW<512>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 256: hipLaunchKernelGGL(( SumW<256>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 128: hipLaunchKernelGGL(( SumW<128>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 64: hipLaunchKernelGGL(( SumW<64>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 32: hipLaunchKernelGGL(( SumW<32>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 16: hipLaunchKernelGGL(( SumW<16>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 8: hipLaunchKernelGGL(( SumW<8>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 4: hipLaunchKernelGGL(( SumW<4>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 2: hipLaunchKernelGGL(( SumW<2>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; case 1: hipLaunchKernelGGL(( SumW<1>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, W, n, sumW); break; } } template <int blockSize> KERNEL SumH(cudafloat * H, int m, cudafloat * sumH) { extern __shared__ cudafloat h[]; h[threadIdx.x] = CUDA_VALUE(0.0); for(int k = threadIdx.x; k < m; k += blockSize) { h[threadIdx.x] += HMATRIX(blockIdx.x, k, gridDim.x, m); } __syncthreads(); if (blockSize >= 1024) { if (threadIdx.x < 512) h[threadIdx.x] += h[threadIdx.x + 512]; __syncthreads(); } if (blockSize >= 512) { if (threadIdx.x < 256) h[threadIdx.x] += h[threadIdx.x + 256]; __syncthreads(); } if (blockSize >= 256) { if (threadIdx.x < 128) h[threadIdx.x] += h[threadIdx.x + 128]; __syncthreads(); } if (blockSize >= 128) { if (threadIdx.x < 64) h[threadIdx.x] += h[threadIdx.x + 64]; __syncthreads(); } if (threadIdx.x < 32) { volatile cudafloat * _h = h; if (blockSize >= 64) _h[threadIdx.x] += _h[threadIdx.x + 32]; if (blockSize >= 32) _h[threadIdx.x] += _h[threadIdx.x + 16]; if (blockSize >= 16) _h[threadIdx.x] += _h[threadIdx.x + 8]; if (blockSize >= 8) _h[threadIdx.x] += _h[threadIdx.x + 4]; if (blockSize >= 4) _h[threadIdx.x] += _h[threadIdx.x + 2]; if (blockSize >= 2) _h[threadIdx.x] += _h[threadIdx.x + 1]; if (threadIdx.x == 0) { cudafloat sum = h[0]; if (sum < SMALL_VALUE_TO_ADD_DENOMINATOR) sum = SMALL_VALUE_TO_ADD_DENOMINATOR; sumH[blockIdx.x] = sum; } } } void KernelSumH(int blockSize, cudafloat * H, int r, int m, cudafloat * sumH) { switch(blockSize) { #ifdef FERMI case 1024: hipLaunchKernelGGL(( SumH<1024>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; #endif case 512: hipLaunchKernelGGL(( SumH<512>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 256: hipLaunchKernelGGL(( SumH<256>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 128: hipLaunchKernelGGL(( SumH<128>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 64: hipLaunchKernelGGL(( SumH<64>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 32: hipLaunchKernelGGL(( SumH<32>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 16: hipLaunchKernelGGL(( SumH<16>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 8: hipLaunchKernelGGL(( SumH<8>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 4: hipLaunchKernelGGL(( SumH<4>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 2: hipLaunchKernelGGL(( SumH<2>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; case 1: hipLaunchKernelGGL(( SumH<1>), dim3(r), dim3(blockSize), blockSize * sizeof(cudafloat), 0, H, m, sumH); break; } } //#define SW(_R, _C) sw[(_R)][(_C)] #define SW(_R, _C) (sw[(_C)][(_R)]) #define SVH(_R, _C) svh[(_R)][(_C)] //#define SVH(_R, _C) (svh[(_C)][(_R)]) //#define SH(_R, _C) sh[(_R)][(_C)] #define SH(_R, _C) sh[(_C)][(_R)] #define SVW(_R, _C) svw[(_R)][(_C)] //#define SVW(_R, _C) svw[(_C)][(_R)] KERNEL UpdateW_MD(cudafloat * W, cudafloat * H, cudafloat * V, cudafloat * WH, cudafloat * sumH, int n, int m, int r) { __shared__ cudafloat SH(32, 32); __shared__ cudafloat SVW(32, 32); int x = blockIdx.x * 32 + threadIdx.x; int y = blockIdx.y * 32 + threadIdx.y; cudafloat sum1 = CUDA_VALUE(0.0); cudafloat sum2 = CUDA_VALUE(0.0); for(int k = 0; k < m; k += 32) { int tx = threadIdx.x + 16; if (x < r && threadIdx.y + k < m) { int ky = k + threadIdx.y; SH(threadIdx.x, threadIdx.y) = HMATRIX(x, ky, r, m); SH(tx, threadIdx.y) = (x + 16 < r) ? HMATRIX(x + 16, ky, r, m) : CUDA_VALUE(0.0); } else { SH(threadIdx.x, threadIdx.y) = CUDA_VALUE(0.0); SH(tx, threadIdx.y) = CUDA_VALUE(0.0); } if (y < n && k + threadIdx.x < m) { int idx = (k + threadIdx.x) * n + y; SVW(threadIdx.y, threadIdx.x) = (V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR)); idx += (n << 4); SVW(threadIdx.y, tx) = (k + tx < m) ? (V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR)) : CUDA_VALUE(0.0); } else { SVW(threadIdx.y, threadIdx.x) = CUDA_VALUE(0.0); SVW(threadIdx.y, tx) = CUDA_VALUE(0.0); } __syncthreads(); for(int i = 0; i < 32; i++) { sum1 += SH(threadIdx.x, i) * SVW(threadIdx.y, i); sum2 += SH(tx, i) * SVW(threadIdx.y, i); } __syncthreads(); } if (y < n && x < r) { WMATRIX(y, x, n, r) *= (sum1 / sumH[x]); x += 16; if (x < r) WMATRIX(y, x, n, r) *= (sum2 / sumH[x]); } } KERNEL UpdateH_MD(cudafloat * H, cudafloat * W, cudafloat * V, cudafloat * WH, cudafloat * sumW, int n, int m, int r) { __shared__ cudafloat SW(32, 32); __shared__ cudafloat SVH(32, 32); int x = blockIdx.x * 32 + threadIdx.x; int y = blockIdx.y * 32 + threadIdx.y; cudafloat sum1 = CUDA_VALUE(0.0); cudafloat sum2 = CUDA_VALUE(0.0); for(int k = 0; k < n; k += 32) { int ty = threadIdx.y + 16; if (y < r && k + threadIdx.x < n) { int kx = k + threadIdx.x; SW(threadIdx.x, threadIdx.y) = WMATRIX(kx, y, n, r); SW(threadIdx.x, ty) = (y + 16 < r) ? WMATRIX(kx, y + 16, n, r) : CUDA_VALUE(0.0); } else { SW(threadIdx.x, threadIdx.y) = CUDA_VALUE(0.0); SW(threadIdx.x, ty) = CUDA_VALUE(0.0); } if (x < m && k + threadIdx.y < n) { int idx = x * n + (k + threadIdx.y); SVH(threadIdx.y, threadIdx.x) = V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR); idx += 16; SVH(ty, threadIdx.x) = (k + ty < n) ? (V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR)) : CUDA_VALUE(0.0); } else { SVH(threadIdx.y, threadIdx.x) = CUDA_VALUE(0.0); SVH(ty, threadIdx.x) = CUDA_VALUE(0.0); } __syncthreads(); for(int i = 0; i < 32; i++) { sum1 += SW(i, threadIdx.y) * SVH(i, threadIdx.x); sum2 += SW(i, ty) * SVH(i, threadIdx.x); } __syncthreads(); } if (y < r && x < m) { HMATRIX(y, x, r, m) *= (sum1 / sumW[y]); y += 16; if (y < r) HMATRIX(y, x, r, m) *= (sum2 / sumW[y]); } } //! @} }
2ab2521840034c93377f84bbeb71f1bbdd9bfbf1.cu
/* Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendonša Lopes This file is part of GPUMLib. GPUMLib is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "NMFkernels.h" namespace GPUMLib { //! \addtogroup nmfkernels Non-negative Matrix Factorization kernels //! @{ // NMF_AditiveEuclidianDistance kernels KERNEL UpdateMatrix_AE(cudafloat * X, cudafloat * deltaX1, cudafloat * deltaX2, int elements) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < elements) { cudafloat v = X[idx] + (X[idx] / deltaX2[idx]) * (deltaX1[idx] - deltaX2[idx]); if (v < CUDA_VALUE(0.0)) v = CUDA_VALUE(0.0); X[idx] = v; } } // NMF_MultiplicativeEuclidianDistance kernels KERNEL UpdateMatrix_ME(cudafloat * nm, cudafloat * dm, cudafloat * m, int elements) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < elements) m[idx] *= nm[idx] / (dm[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR); } // NMF_MultiplicativeDivergence kernels #ifdef ROW_MAJOR_H #define HMATRIX(_ROW, _COL, _R, _M) (H[(_ROW) * (_M) + (_COL)]) #else #define HMATRIX(_ROW, _COL, _R, _M) (H[(_COL) * (_R) + (_ROW)]) #endif #ifdef ROW_MAJOR_W #define WMATRIX(_ROW, _COL, _N, _R) (W[(_ROW) * (_R) + (_COL)]) #else #define WMATRIX(_ROW, _COL, _N, _R) (W[(_COL) * (_N) + (_ROW)]) #endif template <int blockSize> KERNEL SumW(cudafloat * W, int n, cudafloat * sumW) { extern __shared__ cudafloat w[]; w[threadIdx.x] = CUDA_VALUE(0.0); for(int k = threadIdx.x; k < n; k += blockSize) { w[threadIdx.x] += WMATRIX(k, blockIdx.x, n, gridDim.x); } __syncthreads(); if (blockSize >= 1024) { if (threadIdx.x < 512) w[threadIdx.x] += w[threadIdx.x + 512]; __syncthreads(); } if (blockSize >= 512) { if (threadIdx.x < 256) w[threadIdx.x] += w[threadIdx.x + 256]; __syncthreads(); } if (blockSize >= 256) { if (threadIdx.x < 128) w[threadIdx.x] += w[threadIdx.x + 128]; __syncthreads(); } if (blockSize >= 128) { if (threadIdx.x < 64) w[threadIdx.x] += w[threadIdx.x + 64]; __syncthreads(); } if (threadIdx.x < 32) { volatile cudafloat * _w = w; if (blockSize >= 64) _w[threadIdx.x] += _w[threadIdx.x + 32]; if (blockSize >= 32) _w[threadIdx.x] += _w[threadIdx.x + 16]; if (blockSize >= 16) _w[threadIdx.x] += _w[threadIdx.x + 8]; if (blockSize >= 8) _w[threadIdx.x] += _w[threadIdx.x + 4]; if (blockSize >= 4) _w[threadIdx.x] += _w[threadIdx.x + 2]; if (blockSize >= 2) _w[threadIdx.x] += _w[threadIdx.x + 1]; if (threadIdx.x == 0) { cudafloat sum = w[0]; if (sum < SMALL_VALUE_TO_ADD_DENOMINATOR) sum = SMALL_VALUE_TO_ADD_DENOMINATOR; sumW[blockIdx.x] = sum; } } } void KernelSumW(int blockSize, cudafloat * W, int n, int r, cudafloat * sumW) { switch(blockSize) { #ifdef FERMI case 1024: SumW<1024><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; #endif case 512: SumW<512><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 256: SumW<256><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 128: SumW<128><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 64: SumW<64><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 32: SumW<32><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 16: SumW<16><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 8: SumW<8><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 4: SumW<4><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 2: SumW<2><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; case 1: SumW<1><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(W, n, sumW); break; } } template <int blockSize> KERNEL SumH(cudafloat * H, int m, cudafloat * sumH) { extern __shared__ cudafloat h[]; h[threadIdx.x] = CUDA_VALUE(0.0); for(int k = threadIdx.x; k < m; k += blockSize) { h[threadIdx.x] += HMATRIX(blockIdx.x, k, gridDim.x, m); } __syncthreads(); if (blockSize >= 1024) { if (threadIdx.x < 512) h[threadIdx.x] += h[threadIdx.x + 512]; __syncthreads(); } if (blockSize >= 512) { if (threadIdx.x < 256) h[threadIdx.x] += h[threadIdx.x + 256]; __syncthreads(); } if (blockSize >= 256) { if (threadIdx.x < 128) h[threadIdx.x] += h[threadIdx.x + 128]; __syncthreads(); } if (blockSize >= 128) { if (threadIdx.x < 64) h[threadIdx.x] += h[threadIdx.x + 64]; __syncthreads(); } if (threadIdx.x < 32) { volatile cudafloat * _h = h; if (blockSize >= 64) _h[threadIdx.x] += _h[threadIdx.x + 32]; if (blockSize >= 32) _h[threadIdx.x] += _h[threadIdx.x + 16]; if (blockSize >= 16) _h[threadIdx.x] += _h[threadIdx.x + 8]; if (blockSize >= 8) _h[threadIdx.x] += _h[threadIdx.x + 4]; if (blockSize >= 4) _h[threadIdx.x] += _h[threadIdx.x + 2]; if (blockSize >= 2) _h[threadIdx.x] += _h[threadIdx.x + 1]; if (threadIdx.x == 0) { cudafloat sum = h[0]; if (sum < SMALL_VALUE_TO_ADD_DENOMINATOR) sum = SMALL_VALUE_TO_ADD_DENOMINATOR; sumH[blockIdx.x] = sum; } } } void KernelSumH(int blockSize, cudafloat * H, int r, int m, cudafloat * sumH) { switch(blockSize) { #ifdef FERMI case 1024: SumH<1024><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; #endif case 512: SumH<512><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 256: SumH<256><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 128: SumH<128><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 64: SumH<64><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 32: SumH<32><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 16: SumH<16><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 8: SumH<8><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 4: SumH<4><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 2: SumH<2><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; case 1: SumH<1><<<r, blockSize, blockSize * sizeof(cudafloat)>>>(H, m, sumH); break; } } //#define SW(_R, _C) sw[(_R)][(_C)] #define SW(_R, _C) (sw[(_C)][(_R)]) #define SVH(_R, _C) svh[(_R)][(_C)] //#define SVH(_R, _C) (svh[(_C)][(_R)]) //#define SH(_R, _C) sh[(_R)][(_C)] #define SH(_R, _C) sh[(_C)][(_R)] #define SVW(_R, _C) svw[(_R)][(_C)] //#define SVW(_R, _C) svw[(_C)][(_R)] KERNEL UpdateW_MD(cudafloat * W, cudafloat * H, cudafloat * V, cudafloat * WH, cudafloat * sumH, int n, int m, int r) { __shared__ cudafloat SH(32, 32); __shared__ cudafloat SVW(32, 32); int x = blockIdx.x * 32 + threadIdx.x; int y = blockIdx.y * 32 + threadIdx.y; cudafloat sum1 = CUDA_VALUE(0.0); cudafloat sum2 = CUDA_VALUE(0.0); for(int k = 0; k < m; k += 32) { int tx = threadIdx.x + 16; if (x < r && threadIdx.y + k < m) { int ky = k + threadIdx.y; SH(threadIdx.x, threadIdx.y) = HMATRIX(x, ky, r, m); SH(tx, threadIdx.y) = (x + 16 < r) ? HMATRIX(x + 16, ky, r, m) : CUDA_VALUE(0.0); } else { SH(threadIdx.x, threadIdx.y) = CUDA_VALUE(0.0); SH(tx, threadIdx.y) = CUDA_VALUE(0.0); } if (y < n && k + threadIdx.x < m) { int idx = (k + threadIdx.x) * n + y; SVW(threadIdx.y, threadIdx.x) = (V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR)); idx += (n << 4); SVW(threadIdx.y, tx) = (k + tx < m) ? (V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR)) : CUDA_VALUE(0.0); } else { SVW(threadIdx.y, threadIdx.x) = CUDA_VALUE(0.0); SVW(threadIdx.y, tx) = CUDA_VALUE(0.0); } __syncthreads(); for(int i = 0; i < 32; i++) { sum1 += SH(threadIdx.x, i) * SVW(threadIdx.y, i); sum2 += SH(tx, i) * SVW(threadIdx.y, i); } __syncthreads(); } if (y < n && x < r) { WMATRIX(y, x, n, r) *= (sum1 / sumH[x]); x += 16; if (x < r) WMATRIX(y, x, n, r) *= (sum2 / sumH[x]); } } KERNEL UpdateH_MD(cudafloat * H, cudafloat * W, cudafloat * V, cudafloat * WH, cudafloat * sumW, int n, int m, int r) { __shared__ cudafloat SW(32, 32); __shared__ cudafloat SVH(32, 32); int x = blockIdx.x * 32 + threadIdx.x; int y = blockIdx.y * 32 + threadIdx.y; cudafloat sum1 = CUDA_VALUE(0.0); cudafloat sum2 = CUDA_VALUE(0.0); for(int k = 0; k < n; k += 32) { int ty = threadIdx.y + 16; if (y < r && k + threadIdx.x < n) { int kx = k + threadIdx.x; SW(threadIdx.x, threadIdx.y) = WMATRIX(kx, y, n, r); SW(threadIdx.x, ty) = (y + 16 < r) ? WMATRIX(kx, y + 16, n, r) : CUDA_VALUE(0.0); } else { SW(threadIdx.x, threadIdx.y) = CUDA_VALUE(0.0); SW(threadIdx.x, ty) = CUDA_VALUE(0.0); } if (x < m && k + threadIdx.y < n) { int idx = x * n + (k + threadIdx.y); SVH(threadIdx.y, threadIdx.x) = V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR); idx += 16; SVH(ty, threadIdx.x) = (k + ty < n) ? (V[idx] / (WH[idx] + SMALL_VALUE_TO_ADD_DENOMINATOR)) : CUDA_VALUE(0.0); } else { SVH(threadIdx.y, threadIdx.x) = CUDA_VALUE(0.0); SVH(ty, threadIdx.x) = CUDA_VALUE(0.0); } __syncthreads(); for(int i = 0; i < 32; i++) { sum1 += SW(i, threadIdx.y) * SVH(i, threadIdx.x); sum2 += SW(i, ty) * SVH(i, threadIdx.x); } __syncthreads(); } if (y < r && x < m) { HMATRIX(y, x, r, m) *= (sum1 / sumW[y]); y += 16; if (y < r) HMATRIX(y, x, r, m) *= (sum2 / sumW[y]); } } //! @} }
d5b21ffaefb58347f205bbba992caa7838d9e861.hip
// !!! This is a file automatically generated by hipify!!! #include <hipcub/hipcub.hpp> void b() { printf("b() called\n"); cub::DoubleBuffer<unsigned int> d_keys; cub::DoubleBuffer<cub::NullType> d_values; size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values, 1024); }
d5b21ffaefb58347f205bbba992caa7838d9e861.cu
#include <cub/cub.cuh> void b() { printf("b() called\n"); cub::DoubleBuffer<unsigned int> d_keys; cub::DoubleBuffer<cub::NullType> d_values; size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs(NULL, temp_storage_bytes, d_keys, d_values, 1024); }
6872017fc55dbf7cb191850841439b04d7245518.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void printSuccessForCorrectExecutionConfiguration() { if(threadIdx.x == 1023 && blockIdx.x == 255) { printf("Success!\n"); } } int main() { /* * Update the execution configuration so that the kernel * will print `"Success!"`. */ hipLaunchKernelGGL(( printSuccessForCorrectExecutionConfiguration), dim3(256), dim3(1024), 0, 0, ); hipDeviceSynchronize(); }
6872017fc55dbf7cb191850841439b04d7245518.cu
#include <stdio.h> __global__ void printSuccessForCorrectExecutionConfiguration() { if(threadIdx.x == 1023 && blockIdx.x == 255) { printf("Success!\n"); } } int main() { /* * Update the execution configuration so that the kernel * will print `"Success!"`. */ printSuccessForCorrectExecutionConfiguration<<<256, 1024>>>(); cudaDeviceSynchronize(); }
e91a6104570e28ab358b639a9ef74125a28d147c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*----------------------------*/ /* REDUCTION KERNEL FUNCTIONS */ /* IMPLEMENTATION */ /*----------------------------*/ #include "GlobalDeclarations.cuh" #include "ReductionBase.cuh" #include "ReductionKernels.cuh" #include "Algorithms.cuh" #include <cstdio> using namespace std; /********************************* * DECLARATIONS * * ____________ * *********************************/ __device__ datatype* experimental_array; /************************************ * BATCHED REDUCTION WRAPPERS * * __________________________ * ************************************/ /*--------------------------------*/ /* 2-STAGE MULTIBLOCK REDUCTION */ /* *STAGE 1* */ /*--------------------------------*/ __global__ void multi_reduction_squared_sum_STAGE1( datatype* in, datatype* out, datatype* counter, int stride) { // blockIdx.y = row = reduction id // blockIdx.x = reduction axis int N = *((int*)(counter + blockIdx.y)); datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value filtered through function sum = sqval(in[blockIdx.y*stride + index]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { experimental_array[blockIdx.y * 1024 + blockIdx.x] = sum; } else { *(out + blockIdx.y) = sum; } } } /*--------------------------------*/ /* 2-STAGE MULTIBLOCK REDUCTION */ /* *STAGE 2* */ /* ALSO COLLINCOMB FUSED IN KERNEL*/ /* TO INCREASE CONCURRENCY. */ /*--------------------------------*/ __global__ void multi_reduction_squared_sum_STAGE2_together_collincomb( datatype* in, datatype* out, datatype* counter, int stride, datatype *A, datatype* columns, datatype* out2, unsigned int rowsX) { if (blockIdx.y == 0) { /**/ /* *STAGE 2* */ /**/ int N = (*((int*)(counter + blockIdx.x)) + 1024 - 1) / 1024; datatype sum = 0; if (threadIdx.x < N) { sum = experimental_array[blockIdx.x * 1024 + threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + blockIdx.x) = sum; } } else { /**/ /* *COLLINCOMB* */ /**/ if (threadIdx.x < rowsX) { // Multi-Reduction sub-id: blockIdx.x columns += blockIdx.x * stride; in += blockIdx.x * stride; out2 += blockIdx.x * rowsX; unsigned int N = *((int*)(counter + blockIdx.x)); datatype sum = 0; for (int i = 0; i < N; i++) { sum += A[((int)columns[i])*rowsX + threadIdx.x] * in[i]; } *(out2 + threadIdx.x) = sum; } } } /*------------------------------------*/ /* 2D + 2-STAGE REDUCTION FOR MATRIX- */ /* VECTOR PRODUCT FILTERED BY COLUMNS */ /* *STAGE 1* */ /*------------------------------------*/ __global__ void multi_reduction_smallGamma_times_gammaJ_STAGE1( datatype *Gamma, datatype* columns, datatype* gammaJ, datatype* out, datatype* counters, unsigned int pJ) { // blockIdx.x - reduction axis // blockIdx.y - reduction ID axis int N = *((int*)counters); if (N != 0) { if (pJ == blockIdx.y) { if (blockIdx.x == 0 && threadIdx.x == 0) { *(out + pJ) = *(out + pJ - gridDim.y); } return; } datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value filtered through columns sum = Gamma[((int)columns[index])*gridDim.y + blockIdx.y] * gammaJ[index]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output N = (N + 1024 - 1) / 1024; if (N > 1) { experimental_array[blockIdx.y * 1024 + blockIdx.x] = sum; } else { if (blockIdx.x == 0) { *(out + blockIdx.y) = sum; } } } } } /*------------------------------------*/ /* 2D + 2-STAGE REDUCTION FOR MATRIX- */ /* VECTOR PRODUCT FILTERED BY COLUMNS */ /* *STAGE 2* */ /*------------------------------------*/ __global__ void multi_reduction_smallGamma_times_gammaJ_STAGE2( datatype* out, datatype* counters, unsigned int pJ) { int N = *((int*)counters); if (N != 0) { if (pJ == blockIdx.x) { return; } N = (N + 1024 - 1) / 1024; if (N > 1) { datatype sum = 0; if (threadIdx.x < N) { // Retrieve partial sum sum = experimental_array[blockIdx.x * 1024 + threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + blockIdx.x) = sum; } } } } /************************************ * REDUCTION STAGES FUNCTIONS * * __________________________ * ************************************/ ////////////////////////////////////// // Reduction across multiple blocks // ////////////////////////////////////// __global__ void deviceReduceKernel_2D_square_values(datatype *in, int N, datatype* out) { datatype sum = 0; if (threadIdx.x < N) { // Retrieve value filtered through function sum = sqval(in[blockIdx.x*N + threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = invSQRT(sum); } } ////////////////////////////////////// // - 1 STAGE MULTIBLOCK REDUCTION - // // - LINEAR COMBINATION OF ROWS - // ////////////////////////////////////// __global__ void rowlincomb( datatype *A, datatype* x, datatype* out, datatype* cols, unsigned int rowsX, datatype* counters, datatype* out2, datatype* D, datatype* out3, unsigned int colsX) { unsigned int N; if ( (N = *((int*)counters)) != 0) { if (blockIdx.x < N) { datatype sum = 0; if (threadIdx.x < rowsX) { // Retrieve value filtered through columns and rows sum = A[(int)cols[blockIdx.x] * rowsX + threadIdx.x] * x[threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = sum; } } else if (blockIdx.x >= colsX) { if (blockIdx.x == gridDim.x - 1) { // Calculate the norm of the new atom // by combining the previous results. // datatype sum = 0; if (threadIdx.x < rowsX) { // Retrieve value filtered through sqval sum = sqval(x[threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *out2 = sqrt(sum); } } else { // We now calculate the following // vector-matrix multiplication in // parallel: // (atom'*D) // datatype sum = 0; if (threadIdx.x < rowsX) { // Retrieve column sum = D[(blockIdx.x - colsX)*rowsX + threadIdx.x] * x[threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out3[blockIdx.x - colsX] = sum; } } } } } ////////////////////////////////////// // - 3 STAGE MULTIBLOCK REDUCTION - // // - RMSE CALCULATION - // // - *STAGE 1* - // ////////////////////////////////////// __global__ void RMSE_stage1(datatype* X, datatype* Xappr, unsigned int N, datatype* out) { datatype sum = 0; if (threadIdx.x < N) { // Retrieve value and compute difference sum = sqval(X[blockIdx.x*N + threadIdx.x] - Xappr[blockIdx.x*N + threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = sum; } } ////////////////////////////////////// // - 3 STAGE MULTIBLOCK REDUCTION - // // - RMSE CALCULATION - // // - *STAGE 2* - // ////////////////////////////////////// __global__ void RMSE_stage2( datatype* in, unsigned int N, unsigned int size, unsigned int iter, datatype* bitmap) { datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value sum = in[index]; in[index] = sum * bitmap[index]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { //Write to output if (gridDim.x > 1) { experimental_array[blockIdx.x] = sum; } else { printf("Iteration %02d / %d complete, RMSE = %.5f\n", iter, NUMBERofITERATIONS, sqrt(sum / size)); } } } ////////////////////////////////////// // - 3 STAGE MULTIBLOCK REDUCTION - // // - RMSE CALCULATION - // // - *STAGE 3* - // ////////////////////////////////////// __global__ void RMSE_stage3(unsigned int N, unsigned int size, unsigned int iter) { datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value sum = experimental_array[index]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { //Write to output if (gridDim.x > 1) { experimental_array[blockIdx.x] = sum; } else { printf("Iteration %02d / %d complete, RMSE = %.5f\n", iter, NUMBERofITERATIONS, sqrt(sum / size)); } } } ////////////////////////////////////////// // - MULTI-STAGE REDUCTION TO BUFFER -// // - BLOCK-WIDE MAXIMUM VALUE & INDEX -// // - *STAGE 1* - // ////////////////////////////////////////// __global__ void device_max_err_STAGE1(datatype* in, int N, datatype* out) { if (!(*out)) { // Our atom does not need replacement return; } DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N) { // Retrieve value myElement.value = in[index]; // Store index myElement.index = index; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *(out + 1) = myElement.index; in[myElement.index] = 0; } } } ////////////////////////////////////////// // - MULTI-STAGE REDUCTION TO BUFFER -// // - BLOCK-WIDE MAXIMUM VALUE & INDEX -// // - *STAGE 2* - // ////////////////////////////////////////// __global__ void device_max_err_STAGE2(datatype* in, int N, datatype* out) { if (!(*out)) { // Our atom does not need replacement return; } DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N) { // Retrieve value myElement = ((DoubleReductionType*)experimental_array)[index]; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *(out + 1) = myElement.index; in[myElement.index] = 0; } } } /////////////////////////////////////////// // - SINGLE-STAGE IN-PLACE REDUCTION -// // - COLUMN-WISE MAXIMUM SQUARE VALUE -// // - SINGLE WARP -// /////////////////////////////////////////// __global__ void device_max( datatype* G, unsigned int colsD, datatype* usecount, datatype* replaced, unsigned int j) { datatype max = 0; if (threadIdx.x < colsD && threadIdx.x != j) { // Retrieve value max = sqval( G[threadIdx.x] ); } max = warpReduceMax(max); if (threadIdx.x == 0) { // Write to output *G = ( ( max > SQR_muTHRESH ) || ( *((unsigned int*)usecount) < USE_THRESH ) ) && (*replaced == 0); } } ////////////////////////////////////////// // - 1 STAGE EUCLEDIAN NORM REDUCTION - // ////////////////////////////////////////// __global__ void EUnorm(datatype* in, datatype* out, unsigned int N) { if (!(*out)) { // Our atom does not need replacement return; } in += (unsigned int)(*(out + 1))*N; datatype sum = 0; if (threadIdx.x < N) { // Retrieve value filtered through sqval sum = sqval(in[threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + 2) = sqrt(sum); } } //////////////////////////////////////// // - 1 STAGE SPECIAL CASE REDUCTION - // // - ERROR CALCULATION - // //////////////////////////////////////// __global__ void SCase_err_stage1( datatype* X, datatype* Xappr, unsigned int N, unsigned int colsX, datatype* out, datatype* counters, datatype* unused, datatype* UScounter) { if (*((int*)counters) == 0) { if (blockIdx.x < (colsX - *UScounter)) { unsigned int column = unused[myGenerator(blockIdx.x, blockDim.x)]; datatype sum = 0; if (threadIdx.x < N) { // Retrieve value and compute difference sum = sqval(X[column*N + threadIdx.x] - Xappr[column*N + threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = sum; } } } } //////////////////////////////////////// // - 2 STAGE SPECIAL CASE REDUCTION - // // - MAXIMUM CALCULATION - // //////////////////////////////////////// __global__ void SCase_err_stage2( datatype* in, int N, datatype* out, unsigned int colsX, datatype* counters, datatype* UScounter) { if (*((int*)counters) == 0) { DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N && index < (colsX - *UScounter) ) { // Retrieve value myElement.value = in[index]; // Store index myElement.index = index; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *out = myElement.index; } } } } //////////////////////////////////////// // - 2 STAGE SPECIAL CASE REDUCTION - // // - MAXIMUM CALCULATION - // //////////////////////////////////////// __global__ void SCase_err_stage3( int N, datatype* out, datatype* counters) { if (*((int*)counters) == 0) { DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N) { // Retrieve value myElement = ((DoubleReductionType*)experimental_array)[index]; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *out = myElement.index; } } } } ////////////////////////////////////////// // - 1 STAGE EUCLEDIAN NORM REDUCTION - // ////////////////////////////////////////// __global__ void SCase_norm( datatype* in, datatype* out, unsigned int N, datatype* counters, datatype* unused, unsigned int dim) { if(*((int*)counters) == 0) { in += (unsigned int)unused[myGenerator((unsigned int)(*out), dim)] * N; datatype sum = 0; if (threadIdx.x < N) { // Retrieve value filtered through sqval sum = sqval(in[threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + 1) = sqrt(sum); } } }
e91a6104570e28ab358b639a9ef74125a28d147c.cu
/*----------------------------*/ /* REDUCTION KERNEL FUNCTIONS */ /* IMPLEMENTATION */ /*----------------------------*/ #include "GlobalDeclarations.cuh" #include "ReductionBase.cuh" #include "ReductionKernels.cuh" #include "Algorithms.cuh" #include <cstdio> using namespace std; /********************************* * DECLARATIONS * * ____________ * *********************************/ __device__ datatype* experimental_array; /************************************ * BATCHED REDUCTION WRAPPERS * * __________________________ * ************************************/ /*--------------------------------*/ /* 2-STAGE MULTIBLOCK REDUCTION */ /* *STAGE 1* */ /*--------------------------------*/ __global__ void multi_reduction_squared_sum_STAGE1( datatype* in, datatype* out, datatype* counter, int stride) { // blockIdx.y = row = reduction id // blockIdx.x = reduction axis int N = *((int*)(counter + blockIdx.y)); datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value filtered through function sum = sqval(in[blockIdx.y*stride + index]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { experimental_array[blockIdx.y * 1024 + blockIdx.x] = sum; } else { *(out + blockIdx.y) = sum; } } } /*--------------------------------*/ /* 2-STAGE MULTIBLOCK REDUCTION */ /* *STAGE 2* */ /* ALSO COLLINCOMB FUSED IN KERNEL*/ /* TO INCREASE CONCURRENCY. */ /*--------------------------------*/ __global__ void multi_reduction_squared_sum_STAGE2_together_collincomb( datatype* in, datatype* out, datatype* counter, int stride, datatype *A, datatype* columns, datatype* out2, unsigned int rowsX) { if (blockIdx.y == 0) { /**/ /* *STAGE 2* */ /**/ int N = (*((int*)(counter + blockIdx.x)) + 1024 - 1) / 1024; datatype sum = 0; if (threadIdx.x < N) { sum = experimental_array[blockIdx.x * 1024 + threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + blockIdx.x) = sum; } } else { /**/ /* *COLLINCOMB* */ /**/ if (threadIdx.x < rowsX) { // Multi-Reduction sub-id: blockIdx.x columns += blockIdx.x * stride; in += blockIdx.x * stride; out2 += blockIdx.x * rowsX; unsigned int N = *((int*)(counter + blockIdx.x)); datatype sum = 0; for (int i = 0; i < N; i++) { sum += A[((int)columns[i])*rowsX + threadIdx.x] * in[i]; } *(out2 + threadIdx.x) = sum; } } } /*------------------------------------*/ /* 2D + 2-STAGE REDUCTION FOR MATRIX- */ /* VECTOR PRODUCT FILTERED BY COLUMNS */ /* *STAGE 1* */ /*------------------------------------*/ __global__ void multi_reduction_smallGamma_times_gammaJ_STAGE1( datatype *Gamma, datatype* columns, datatype* gammaJ, datatype* out, datatype* counters, unsigned int pJ) { // blockIdx.x - reduction axis // blockIdx.y - reduction ID axis int N = *((int*)counters); if (N != 0) { if (pJ == blockIdx.y) { if (blockIdx.x == 0 && threadIdx.x == 0) { *(out + pJ) = *(out + pJ - gridDim.y); } return; } datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value filtered through columns sum = Gamma[((int)columns[index])*gridDim.y + blockIdx.y] * gammaJ[index]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output N = (N + 1024 - 1) / 1024; if (N > 1) { experimental_array[blockIdx.y * 1024 + blockIdx.x] = sum; } else { if (blockIdx.x == 0) { *(out + blockIdx.y) = sum; } } } } } /*------------------------------------*/ /* 2D + 2-STAGE REDUCTION FOR MATRIX- */ /* VECTOR PRODUCT FILTERED BY COLUMNS */ /* *STAGE 2* */ /*------------------------------------*/ __global__ void multi_reduction_smallGamma_times_gammaJ_STAGE2( datatype* out, datatype* counters, unsigned int pJ) { int N = *((int*)counters); if (N != 0) { if (pJ == blockIdx.x) { return; } N = (N + 1024 - 1) / 1024; if (N > 1) { datatype sum = 0; if (threadIdx.x < N) { // Retrieve partial sum sum = experimental_array[blockIdx.x * 1024 + threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + blockIdx.x) = sum; } } } } /************************************ * REDUCTION STAGES FUNCTIONS * * __________________________ * ************************************/ ////////////////////////////////////// // Reduction across multiple blocks // ////////////////////////////////////// __global__ void deviceReduceKernel_2D_square_values(datatype *in, int N, datatype* out) { datatype sum = 0; if (threadIdx.x < N) { // Retrieve value filtered through function sum = sqval(in[blockIdx.x*N + threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = invSQRT(sum); } } ////////////////////////////////////// // - 1 STAGE MULTIBLOCK REDUCTION - // // - LINEAR COMBINATION OF ROWS - // ////////////////////////////////////// __global__ void rowlincomb( datatype *A, datatype* x, datatype* out, datatype* cols, unsigned int rowsX, datatype* counters, datatype* out2, datatype* D, datatype* out3, unsigned int colsX) { unsigned int N; if ( (N = *((int*)counters)) != 0) { if (blockIdx.x < N) { datatype sum = 0; if (threadIdx.x < rowsX) { // Retrieve value filtered through columns and rows sum = A[(int)cols[blockIdx.x] * rowsX + threadIdx.x] * x[threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = sum; } } else if (blockIdx.x >= colsX) { if (blockIdx.x == gridDim.x - 1) { // Calculate the norm of the new atom // by combining the previous results. // datatype sum = 0; if (threadIdx.x < rowsX) { // Retrieve value filtered through sqval sum = sqval(x[threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *out2 = sqrt(sum); } } else { // We now calculate the following // vector-matrix multiplication in // parallel: // (atom'*D) // datatype sum = 0; if (threadIdx.x < rowsX) { // Retrieve column sum = D[(blockIdx.x - colsX)*rowsX + threadIdx.x] * x[threadIdx.x]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out3[blockIdx.x - colsX] = sum; } } } } } ////////////////////////////////////// // - 3 STAGE MULTIBLOCK REDUCTION - // // - RMSE CALCULATION - // // - *STAGE 1* - // ////////////////////////////////////// __global__ void RMSE_stage1(datatype* X, datatype* Xappr, unsigned int N, datatype* out) { datatype sum = 0; if (threadIdx.x < N) { // Retrieve value and compute difference sum = sqval(X[blockIdx.x*N + threadIdx.x] - Xappr[blockIdx.x*N + threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = sum; } } ////////////////////////////////////// // - 3 STAGE MULTIBLOCK REDUCTION - // // - RMSE CALCULATION - // // - *STAGE 2* - // ////////////////////////////////////// __global__ void RMSE_stage2( datatype* in, unsigned int N, unsigned int size, unsigned int iter, datatype* bitmap) { datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value sum = in[index]; in[index] = sum * bitmap[index]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { //Write to output if (gridDim.x > 1) { experimental_array[blockIdx.x] = sum; } else { printf("Iteration %02d / %d complete, RMSE = %.5f\n", iter, NUMBERofITERATIONS, sqrt(sum / size)); } } } ////////////////////////////////////// // - 3 STAGE MULTIBLOCK REDUCTION - // // - RMSE CALCULATION - // // - *STAGE 3* - // ////////////////////////////////////// __global__ void RMSE_stage3(unsigned int N, unsigned int size, unsigned int iter) { datatype sum = 0; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { // Retrieve value sum = experimental_array[index]; } sum = blockReduceSum(sum); if (threadIdx.x == 0) { //Write to output if (gridDim.x > 1) { experimental_array[blockIdx.x] = sum; } else { printf("Iteration %02d / %d complete, RMSE = %.5f\n", iter, NUMBERofITERATIONS, sqrt(sum / size)); } } } ////////////////////////////////////////// // - MULTI-STAGE REDUCTION TO BUFFER -// // - BLOCK-WIDE MAXIMUM VALUE & INDEX -// // - *STAGE 1* - // ////////////////////////////////////////// __global__ void device_max_err_STAGE1(datatype* in, int N, datatype* out) { if (!(*out)) { // Our atom does not need replacement return; } DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N) { // Retrieve value myElement.value = in[index]; // Store index myElement.index = index; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *(out + 1) = myElement.index; in[myElement.index] = 0; } } } ////////////////////////////////////////// // - MULTI-STAGE REDUCTION TO BUFFER -// // - BLOCK-WIDE MAXIMUM VALUE & INDEX -// // - *STAGE 2* - // ////////////////////////////////////////// __global__ void device_max_err_STAGE2(datatype* in, int N, datatype* out) { if (!(*out)) { // Our atom does not need replacement return; } DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N) { // Retrieve value myElement = ((DoubleReductionType*)experimental_array)[index]; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *(out + 1) = myElement.index; in[myElement.index] = 0; } } } /////////////////////////////////////////// // - SINGLE-STAGE IN-PLACE REDUCTION -// // - COLUMN-WISE MAXIMUM SQUARE VALUE -// // - SINGLE WARP -// /////////////////////////////////////////// __global__ void device_max( datatype* G, unsigned int colsD, datatype* usecount, datatype* replaced, unsigned int j) { datatype max = 0; if (threadIdx.x < colsD && threadIdx.x != j) { // Retrieve value max = sqval( G[threadIdx.x] ); } max = warpReduceMax(max); if (threadIdx.x == 0) { // Write to output *G = ( ( max > SQR_muTHRESH ) || ( *((unsigned int*)usecount) < USE_THRESH ) ) && (*replaced == 0); } } ////////////////////////////////////////// // - 1 STAGE EUCLEDIAN NORM REDUCTION - // ////////////////////////////////////////// __global__ void EUnorm(datatype* in, datatype* out, unsigned int N) { if (!(*out)) { // Our atom does not need replacement return; } in += (unsigned int)(*(out + 1))*N; datatype sum = 0; if (threadIdx.x < N) { // Retrieve value filtered through sqval sum = sqval(in[threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + 2) = sqrt(sum); } } //////////////////////////////////////// // - 1 STAGE SPECIAL CASE REDUCTION - // // - ERROR CALCULATION - // //////////////////////////////////////// __global__ void SCase_err_stage1( datatype* X, datatype* Xappr, unsigned int N, unsigned int colsX, datatype* out, datatype* counters, datatype* unused, datatype* UScounter) { if (*((int*)counters) == 0) { if (blockIdx.x < (colsX - *UScounter)) { unsigned int column = unused[myGenerator(blockIdx.x, blockDim.x)]; datatype sum = 0; if (threadIdx.x < N) { // Retrieve value and compute difference sum = sqval(X[column*N + threadIdx.x] - Xappr[column*N + threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output out[blockIdx.x] = sum; } } } } //////////////////////////////////////// // - 2 STAGE SPECIAL CASE REDUCTION - // // - MAXIMUM CALCULATION - // //////////////////////////////////////// __global__ void SCase_err_stage2( datatype* in, int N, datatype* out, unsigned int colsX, datatype* counters, datatype* UScounter) { if (*((int*)counters) == 0) { DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N && index < (colsX - *UScounter) ) { // Retrieve value myElement.value = in[index]; // Store index myElement.index = index; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *out = myElement.index; } } } } //////////////////////////////////////// // - 2 STAGE SPECIAL CASE REDUCTION - // // - MAXIMUM CALCULATION - // //////////////////////////////////////// __global__ void SCase_err_stage3( int N, datatype* out, datatype* counters) { if (*((int*)counters) == 0) { DoubleReductionType myElement; myElement.value = 0; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < N) { // Retrieve value myElement = ((DoubleReductionType*)experimental_array)[index]; } myElement = blockReduceMaximumIndex(myElement); if (threadIdx.x == 0) { // Write to output if (gridDim.x > 1) { ((DoubleReductionType*)experimental_array)[blockIdx.x] = myElement; } else { *out = myElement.index; } } } } ////////////////////////////////////////// // - 1 STAGE EUCLEDIAN NORM REDUCTION - // ////////////////////////////////////////// __global__ void SCase_norm( datatype* in, datatype* out, unsigned int N, datatype* counters, datatype* unused, unsigned int dim) { if(*((int*)counters) == 0) { in += (unsigned int)unused[myGenerator((unsigned int)(*out), dim)] * N; datatype sum = 0; if (threadIdx.x < N) { // Retrieve value filtered through sqval sum = sqval(in[threadIdx.x]); } sum = blockReduceSum(sum); if (threadIdx.x == 0) { // Write to output *(out + 1) = sqrt(sum); } } }
f7f759ca52fbe0054f3bd57892bd2a9e2b2dfa5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <numeric> #include <thrust/device_vector.h> #include <pybind11/numpy.h> #include <torch/extension.h> #include <quiver/common.hpp> #include <quiver/functor.cu.hpp> #include <quiver/quiver.cu.hpp> #include <quiver/reindex.cu.hpp> #include <quiver/stream_pool.hpp> #include <quiver/trace.hpp> #include <quiver/zip.hpp> #include <thrust/remove.h> template <typename IdType> HostOrderedHashTable<IdType> * FillWithDuplicates(const IdType *const input, const size_t num_input, hipStream_t stream, thrust::device_vector<IdType> &unique_items) { const auto policy = thrust::hip::par.on(stream); const int64_t num_tiles = (num_input + TILE_SIZE - 1) / TILE_SIZE; const dim3 grid(num_tiles); const dim3 block(BLOCK_SIZE); auto host_table = new HostOrderedHashTable<IdType>(num_input, 1); DeviceOrderedHashTable<IdType> device_table = host_table->DeviceHandle(); hipLaunchKernelGGL(( generate_hashmap_duplicates<IdType, BLOCK_SIZE, TILE_SIZE>) , dim3(grid), dim3(block), 0, stream, input, num_input, device_table); thrust::device_vector<int> item_prefix(num_input + 1, 0); using it = thrust::counting_iterator<IdType>; using Mapping = typename DeviceOrderedHashTable<IdType>::Mapping; thrust::for_each(it(0), it(num_input), [count = thrust::raw_pointer_cast(item_prefix.data()), table = device_table, in = input] __device__(IdType i) mutable { Mapping &mapping = *(table.Search(in[i])); if (mapping.index == i) { count[i] = 1; } }); thrust::exclusive_scan(item_prefix.begin(), item_prefix.end(), item_prefix.begin()); size_t tot = item_prefix[num_input]; unique_items.resize(tot); thrust::for_each(it(0), it(num_input), [prefix = thrust::raw_pointer_cast(item_prefix.data()), table = device_table, in = input, u = thrust::raw_pointer_cast( unique_items.data())] __device__(IdType i) mutable { Mapping &mapping = *(table.Search(in[i])); if (mapping.index == i) { mapping.local = prefix[i]; u[prefix[i]] = in[i]; } }); return host_table; } namespace quiver { template <typename T> void replicate_fill(size_t n, const T *counts, const T *values, T *outputs) { for (size_t i = 0; i < n; ++i) { const size_t c = counts[i]; std::fill(outputs, outputs + c, values[i]); outputs += c; } } class TorchQuiver { using torch_quiver_t = quiver<int64_t, CUDA>; torch_quiver_t quiver_; stream_pool pool_; public: TorchQuiver(torch_quiver_t quiver, int device = 0, int num_workers = 4) : quiver_(std::move(quiver)) { pool_ = stream_pool(num_workers); } using T = int64_t; using W = float; // deprecated, not compatible with AliGraph std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> sample_sub(const torch::Tensor &vertices, int k) const { return sample_sub_with_stream(0, vertices, k); } std::tuple<torch::Tensor, torch::Tensor> sample_neighbor(int stream_num, const torch::Tensor &vertices, int k) { hipStream_t stream = 0; if (!pool_.empty()) { stream = (pool_)[stream_num]; } const auto policy = thrust::hip::par.on(stream); const size_t bs = vertices.size(0); thrust::device_vector<T> inputs; thrust::device_vector<T> outputs; thrust::device_vector<T> output_counts; sample_kernel(stream, vertices, k, inputs, outputs, output_counts); torch::Tensor neighbors = torch::empty(outputs.size(), vertices.options()); torch::Tensor counts = torch::empty(vertices.size(0), vertices.options()); thrust::copy(outputs.begin(), outputs.end(), neighbors.data_ptr<T>()); thrust::copy(output_counts.begin(), output_counts.end(), counts.data_ptr<T>()); return std::make_tuple(neighbors, counts); } std::tuple<torch::Tensor, torch::Tensor> sample_kernel(const hipStream_t stream, const torch::Tensor &vertices, int k, thrust::device_vector<T> &inputs, thrust::device_vector<T> &outputs, thrust::device_vector<T> &output_counts) const { T tot = 0; const auto policy = thrust::hip::par.on(stream); thrust::device_vector<T> output_ptr; thrust::device_vector<T> output_idx; const T *p = vertices.data_ptr<T>(); const size_t bs = vertices.size(0); { TRACE_SCOPE("alloc_1"); inputs.resize(bs); output_counts.resize(bs); output_ptr.resize(bs); } // output_ptr is exclusive prefix sum of output_counts(neighbor counts // <= k) { TRACE_SCOPE("prepare"); thrust::copy(p, p + bs, inputs.begin()); // quiver_.to_local(stream, inputs); quiver_.degree(stream, inputs.data(), inputs.data() + inputs.size(), output_counts.data()); if (k >= 0) { thrust::transform(policy, output_counts.begin(), output_counts.end(), output_counts.begin(), cap_by<T>(k)); } thrust::exclusive_scan(policy, output_counts.begin(), output_counts.end(), output_ptr.begin()); tot = thrust::reduce(policy, output_counts.begin(), output_counts.end()); } { TRACE_SCOPE("alloc_2"); outputs.resize(tot); output_idx.resize(tot); } // outputs[outptr[i], outptr[i + 1]) are unique neighbors of inputs[i] // { // TRACE_SCOPE("sample"); // quiver_.sample(stream, inputs.begin(), inputs.end(), // output_ptr.begin(), output_counts.begin(), // outputs.data(), output_eid.data()); // } { TRACE_SCOPE("sample"); quiver_.new_sample( stream, k, thrust::raw_pointer_cast(inputs.data()), inputs.size(), thrust::raw_pointer_cast(output_ptr.data()), thrust::raw_pointer_cast(output_counts.data()), thrust::raw_pointer_cast(outputs.data()), thrust::raw_pointer_cast(output_idx.data())); } torch::Tensor out_neighbor; torch::Tensor out_eid; // thrust::copy(outputs.begin(), outputs.end(), // out_neighbor.data_ptr<T>()); // thrust::copy(output_eid.begin(), output_eid.end(), // out_eid.data_ptr<T>()); return std::make_tuple(out_neighbor, out_eid); } static void reindex_kernel(const hipStream_t stream, thrust::device_vector<T> &inputs, thrust::device_vector<T> &outputs, thrust::device_vector<T> &subset) { const auto policy = thrust::hip::par.on(stream); HostOrderedHashTable<T> *table; // reindex { { TRACE_SCOPE("reindex 0"); subset.resize(inputs.size() + outputs.size()); thrust::copy(policy, inputs.begin(), inputs.end(), subset.begin()); thrust::copy(policy, outputs.begin(), outputs.end(), subset.begin() + inputs.size()); thrust::device_vector<T> unique_items; unique_items.clear(); table = FillWithDuplicates(thrust::raw_pointer_cast(subset.data()), subset.size(), stream, unique_items); subset.resize(unique_items.size()); thrust::copy(policy, unique_items.begin(), unique_items.end(), subset.begin()); // thrust::sort(policy, subset.begin(), subset.end()); // subset.erase( // thrust::unique(policy, subset.begin(), subset.end()), // subset.end()); // _reindex_with(policy, outputs, subset, outputs); } { TRACE_SCOPE("permute"); // thrust::device_vector<T> s1; // s1.reserve(subset.size()); // _reindex_with(policy, inputs, subset, s1); // complete_permutation(s1, subset.size(), stream); // subset = permute(s1, subset, stream); // thrust::device_vector<T> s2; // inverse_permutation(s1, s2, stream); // permute_value(s2, outputs, stream); DeviceOrderedHashTable<T> device_table = table->DeviceHandle(); thrust::for_each( policy, outputs.begin(), outputs.end(), [device_table] __device__(T & id) mutable { using Iterator = typename DeviceOrderedHashTable<T>::Iterator; Iterator iter = device_table.Search(id); id = static_cast<T>((*iter).local); }); } delete table; } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> sample_sub_with_stream(int stream_num, const torch::Tensor &vertices, int k) const { TRACE_SCOPE(__func__); hipStream_t stream = 0; if (!pool_.empty()) { stream = (pool_)[stream_num]; } const auto policy = thrust::hip::par.on(stream); thrust::device_vector<T> inputs; thrust::device_vector<T> outputs; thrust::device_vector<T> output_counts; thrust::device_vector<T> subset; sample_kernel(stream, vertices, k, inputs, outputs, output_counts); int tot = outputs.size(); reindex_kernel(stream, inputs, outputs, subset); torch::Tensor out_vertices = torch::empty(subset.size(), vertices.options()); torch::Tensor row_idx = torch::empty(tot, vertices.options()); torch::Tensor col_idx = torch::empty(tot, vertices.options()); { TRACE_SCOPE("prepare output"); thrust::device_vector<T> prefix_count(output_counts.size()); thrust::device_vector<T> seq(output_counts.size()); thrust::sequence(policy, seq.begin(), seq.end()); thrust::exclusive_scan(policy, output_counts.begin(), output_counts.end(), prefix_count.begin()); const size_t m = inputs.size(); using it = thrust::counting_iterator<T>; thrust::for_each( policy, it(0), it(m), [prefix = thrust::raw_pointer_cast(prefix_count.data()), count = thrust::raw_pointer_cast(output_counts.data()), in = thrust::raw_pointer_cast(seq.data()), out = thrust::raw_pointer_cast( row_idx.data_ptr<T>())] __device__(T i) { for (int j = 0; j < count[i]; j++) { out[prefix[i] + j] = in[i]; } }); thrust::copy(subset.begin(), subset.end(), out_vertices.data_ptr<T>()); thrust::copy(outputs.begin(), outputs.end(), col_idx.data_ptr<T>()); } return std::make_tuple(out_vertices, row_idx, col_idx); } }; std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> reindex_single(torch::Tensor inputs, torch::Tensor outputs, torch::Tensor count) { using T = int64_t; hipStream_t stream = 0; const auto policy = thrust::hip::par.on(stream); thrust::device_vector<T> total_inputs(inputs.size(0)); thrust::device_vector<T> total_outputs(outputs.size(0)); thrust::device_vector<T> input_prefix(inputs.size(0)); const T *ptr; size_t bs; ptr = count.data_ptr<T>(); bs = inputs.size(0); thrust::copy(ptr, ptr + bs, input_prefix.begin()); ptr = inputs.data_ptr<T>(); thrust::copy(ptr, ptr + bs, total_inputs.begin()); thrust::exclusive_scan(policy, input_prefix.begin(), input_prefix.end(), input_prefix.begin()); ptr = outputs.data_ptr<T>(); bs = outputs.size(0); thrust::copy(ptr, ptr + bs, total_outputs.begin()); const size_t m = inputs.size(0); using it = thrust::counting_iterator<T>; thrust::device_vector<T> subset; TorchQuiver::reindex_kernel(stream, total_inputs, total_outputs, subset); int tot = total_outputs.size(); torch::Tensor out_vertices = torch::empty(subset.size(), inputs.options()); torch::Tensor row_idx = torch::empty(tot, inputs.options()); torch::Tensor col_idx = torch::empty(tot, inputs.options()); { thrust::device_vector<T> seq(count.size(0)); thrust::sequence(policy, seq.begin(), seq.end()); thrust::for_each( policy, it(0), it(m), [prefix = thrust::raw_pointer_cast(input_prefix.data()), count = count.data_ptr<T>(), in = thrust::raw_pointer_cast(seq.data()), out = thrust::raw_pointer_cast( row_idx.data_ptr<T>())] __device__(T i) { for (int j = 0; j < count[i]; j++) { out[prefix[i] + j] = in[i]; } }); thrust::copy(subset.begin(), subset.end(), out_vertices.data_ptr<T>()); thrust::copy(total_outputs.begin(), total_outputs.end(), col_idx.data_ptr<T>()); } return std::make_tuple(out_vertices, row_idx, col_idx); } TorchQuiver new_quiver_from_csr_array(torch::Tensor &input_indptr, torch::Tensor &input_indices, torch::Tensor &input_edge_idx, int device = 0, bool cuda = false) { hipSetDevice(device); TRACE_SCOPE(__func__); using T = typename TorchQuiver::T; check_eq<int64_t>(input_indptr.dim(), 1); const size_t node_count = input_indptr.size(0); check_eq<int64_t>(input_indices.dim(), 1); const size_t edge_count = input_indices.size(0); bool use_eid = input_edge_idx.size(0) == edge_count; /* In Zero-Copy Mode, We Do These Steps: 0. Copy The Data If Needed 1. Register Buffer As Mapped Pinned Memory 2. Get Device Pointer In GPU Memory Space 3. Intiliaze A Quiver Instance And Return */ T *indptr_device_pointer = nullptr; T *indices_device_pointer = nullptr; T *edge_id_device_pointer = nullptr; {/*if (!cuda) { const T *indptr_original = reinterpret_cast<const T *>(input_indptr.data_ptr<T>()); // Register Buffer As Mapped Pinned Memory quiverRegister((void *)indptr_original, sizeof(T) * node_count, hipHostRegisterMapped); // Get Device Pointer In GPU Memory Space hipHostGetDevicePointer((void **)&indptr_device_pointer, (void *)indptr_original, 0); } else */ {const T *indptr_original = reinterpret_cast<const T *>(input_indptr.data_ptr<T>()); T *indptr_copy; hipMalloc((void **)&indptr_copy, sizeof(T) * node_count); hipMemcpy((void *)indptr_copy, (void *)indptr_original, sizeof(T) * node_count, hipMemcpyDefault); indptr_device_pointer = indptr_copy; } } // namespace quiver // std::cout<<"mapped indptr"<<std::endl; { if (!cuda) { const T *indices_original = reinterpret_cast<const T *>(input_indices.data_ptr<T>()); // Register Buffer As Mapped Pinned Memory quiverRegister((void *)indices_original, sizeof(T) * edge_count, hipHostRegisterMapped); // Get Device Pointer In GPU Memory Space hipHostGetDevicePointer((void **)&indices_device_pointer, (void *)indices_original, 0); } else { const T *indices_original = reinterpret_cast<const T *>(input_indices.data_ptr<T>()); T *indices_copy; hipMalloc((void **)&indices_copy, sizeof(T) * edge_count); hipMemcpy((void *)indices_copy, (void *)indices_original, sizeof(T) * edge_count, hipMemcpyDefault); indices_device_pointer = indices_copy; } } // std::cout<<"mapped indices"<<std::endl; if (use_eid) { if (!cuda) { const T *id_original = reinterpret_cast<const T *>(input_edge_idx.data_ptr<T>()); // Register Buffer As Mapped Pinned Memory quiverRegister((void *)id_original, sizeof(T) * edge_count, hipHostRegisterMapped); // Get Device Pointer In GPU Memory Space hipHostGetDevicePointer((void **)&edge_id_device_pointer, (void *)id_original, 0); } else { const T *id_original = reinterpret_cast<const T *>(input_edge_idx.data_ptr<T>()); T *id_copy; hipMalloc((void **)&id_copy, sizeof(T) * edge_count); hipMemcpy((void *)id_copy, (void *)id_original, sizeof(T) * edge_count, hipMemcpyDefault); edge_id_device_pointer = id_copy; } } // std::cout<<"mapped edge id "<<std::endl; // initialize Quiver instance using Q = quiver<int64_t, CUDA>; Q quiver = Q::New(indptr_device_pointer, indices_device_pointer, edge_id_device_pointer, node_count - 1, edge_count); return TorchQuiver(std::move(quiver), device); } TorchQuiver new_quiver_from_edge_index(size_t n, py::array_t<int64_t> &input_edges, py::array_t<int64_t> &input_edge_idx, int device = 0) { hipSetDevice(device); TRACE_SCOPE(__func__); using T = typename TorchQuiver::T; py::buffer_info edges = input_edges.request(); py::buffer_info edge_idx = input_edge_idx.request(); check_eq<int64_t>(edges.ndim, 2); check_eq<int64_t>(edges.shape[0], 2); const size_t m = edges.shape[1]; check_eq<int64_t>(edge_idx.ndim, 1); bool use_eid = edge_idx.shape[0] == m; thrust::device_vector<T> row_idx(m); thrust::device_vector<T> col_idx(m); { const T *p = reinterpret_cast<const T *>(edges.ptr); thrust::copy(p, p + m, row_idx.begin()); thrust::copy(p + m, p + m * 2, col_idx.begin()); } thrust::device_vector<T> edge_idx_; if (use_eid) { edge_idx_.resize(m); const T *p = reinterpret_cast<const T *>(edge_idx.ptr); thrust::copy(p, p + m, edge_idx_.begin()); } using Q = quiver<int64_t, CUDA>; Q quiver = Q::New(static_cast<T>(n), std::move(row_idx), std::move(col_idx), std::move(edge_idx_)); return TorchQuiver(std::move(quiver), device); } } // namespace quiver void register_cuda_quiver_sample(pybind11::module &m) { m.def("reindex_single", &quiver::reindex_single); m.def("new_quiver_from_edge_index", &quiver::new_quiver_from_edge_index); m.def("new_quiver_from_csr_array", &quiver::new_quiver_from_csr_array); py::class_<quiver::TorchQuiver>(m, "Quiver") .def("sample_sub", &quiver::TorchQuiver::sample_sub_with_stream, py::call_guard<py::gil_scoped_release>()) .def("sample_neighbor", &quiver::TorchQuiver::sample_neighbor, py::call_guard<py::gil_scoped_release>()); }
f7f759ca52fbe0054f3bd57892bd2a9e2b2dfa5e.cu
#include <algorithm> #include <numeric> #include <thrust/device_vector.h> #include <pybind11/numpy.h> #include <torch/extension.h> #include <quiver/common.hpp> #include <quiver/functor.cu.hpp> #include <quiver/quiver.cu.hpp> #include <quiver/reindex.cu.hpp> #include <quiver/stream_pool.hpp> #include <quiver/trace.hpp> #include <quiver/zip.hpp> #include <thrust/remove.h> template <typename IdType> HostOrderedHashTable<IdType> * FillWithDuplicates(const IdType *const input, const size_t num_input, cudaStream_t stream, thrust::device_vector<IdType> &unique_items) { const auto policy = thrust::cuda::par.on(stream); const int64_t num_tiles = (num_input + TILE_SIZE - 1) / TILE_SIZE; const dim3 grid(num_tiles); const dim3 block(BLOCK_SIZE); auto host_table = new HostOrderedHashTable<IdType>(num_input, 1); DeviceOrderedHashTable<IdType> device_table = host_table->DeviceHandle(); generate_hashmap_duplicates<IdType, BLOCK_SIZE, TILE_SIZE> <<<grid, block, 0, stream>>>(input, num_input, device_table); thrust::device_vector<int> item_prefix(num_input + 1, 0); using it = thrust::counting_iterator<IdType>; using Mapping = typename DeviceOrderedHashTable<IdType>::Mapping; thrust::for_each(it(0), it(num_input), [count = thrust::raw_pointer_cast(item_prefix.data()), table = device_table, in = input] __device__(IdType i) mutable { Mapping &mapping = *(table.Search(in[i])); if (mapping.index == i) { count[i] = 1; } }); thrust::exclusive_scan(item_prefix.begin(), item_prefix.end(), item_prefix.begin()); size_t tot = item_prefix[num_input]; unique_items.resize(tot); thrust::for_each(it(0), it(num_input), [prefix = thrust::raw_pointer_cast(item_prefix.data()), table = device_table, in = input, u = thrust::raw_pointer_cast( unique_items.data())] __device__(IdType i) mutable { Mapping &mapping = *(table.Search(in[i])); if (mapping.index == i) { mapping.local = prefix[i]; u[prefix[i]] = in[i]; } }); return host_table; } namespace quiver { template <typename T> void replicate_fill(size_t n, const T *counts, const T *values, T *outputs) { for (size_t i = 0; i < n; ++i) { const size_t c = counts[i]; std::fill(outputs, outputs + c, values[i]); outputs += c; } } class TorchQuiver { using torch_quiver_t = quiver<int64_t, CUDA>; torch_quiver_t quiver_; stream_pool pool_; public: TorchQuiver(torch_quiver_t quiver, int device = 0, int num_workers = 4) : quiver_(std::move(quiver)) { pool_ = stream_pool(num_workers); } using T = int64_t; using W = float; // deprecated, not compatible with AliGraph std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> sample_sub(const torch::Tensor &vertices, int k) const { return sample_sub_with_stream(0, vertices, k); } std::tuple<torch::Tensor, torch::Tensor> sample_neighbor(int stream_num, const torch::Tensor &vertices, int k) { cudaStream_t stream = 0; if (!pool_.empty()) { stream = (pool_)[stream_num]; } const auto policy = thrust::cuda::par.on(stream); const size_t bs = vertices.size(0); thrust::device_vector<T> inputs; thrust::device_vector<T> outputs; thrust::device_vector<T> output_counts; sample_kernel(stream, vertices, k, inputs, outputs, output_counts); torch::Tensor neighbors = torch::empty(outputs.size(), vertices.options()); torch::Tensor counts = torch::empty(vertices.size(0), vertices.options()); thrust::copy(outputs.begin(), outputs.end(), neighbors.data_ptr<T>()); thrust::copy(output_counts.begin(), output_counts.end(), counts.data_ptr<T>()); return std::make_tuple(neighbors, counts); } std::tuple<torch::Tensor, torch::Tensor> sample_kernel(const cudaStream_t stream, const torch::Tensor &vertices, int k, thrust::device_vector<T> &inputs, thrust::device_vector<T> &outputs, thrust::device_vector<T> &output_counts) const { T tot = 0; const auto policy = thrust::cuda::par.on(stream); thrust::device_vector<T> output_ptr; thrust::device_vector<T> output_idx; const T *p = vertices.data_ptr<T>(); const size_t bs = vertices.size(0); { TRACE_SCOPE("alloc_1"); inputs.resize(bs); output_counts.resize(bs); output_ptr.resize(bs); } // output_ptr is exclusive prefix sum of output_counts(neighbor counts // <= k) { TRACE_SCOPE("prepare"); thrust::copy(p, p + bs, inputs.begin()); // quiver_.to_local(stream, inputs); quiver_.degree(stream, inputs.data(), inputs.data() + inputs.size(), output_counts.data()); if (k >= 0) { thrust::transform(policy, output_counts.begin(), output_counts.end(), output_counts.begin(), cap_by<T>(k)); } thrust::exclusive_scan(policy, output_counts.begin(), output_counts.end(), output_ptr.begin()); tot = thrust::reduce(policy, output_counts.begin(), output_counts.end()); } { TRACE_SCOPE("alloc_2"); outputs.resize(tot); output_idx.resize(tot); } // outputs[outptr[i], outptr[i + 1]) are unique neighbors of inputs[i] // { // TRACE_SCOPE("sample"); // quiver_.sample(stream, inputs.begin(), inputs.end(), // output_ptr.begin(), output_counts.begin(), // outputs.data(), output_eid.data()); // } { TRACE_SCOPE("sample"); quiver_.new_sample( stream, k, thrust::raw_pointer_cast(inputs.data()), inputs.size(), thrust::raw_pointer_cast(output_ptr.data()), thrust::raw_pointer_cast(output_counts.data()), thrust::raw_pointer_cast(outputs.data()), thrust::raw_pointer_cast(output_idx.data())); } torch::Tensor out_neighbor; torch::Tensor out_eid; // thrust::copy(outputs.begin(), outputs.end(), // out_neighbor.data_ptr<T>()); // thrust::copy(output_eid.begin(), output_eid.end(), // out_eid.data_ptr<T>()); return std::make_tuple(out_neighbor, out_eid); } static void reindex_kernel(const cudaStream_t stream, thrust::device_vector<T> &inputs, thrust::device_vector<T> &outputs, thrust::device_vector<T> &subset) { const auto policy = thrust::cuda::par.on(stream); HostOrderedHashTable<T> *table; // reindex { { TRACE_SCOPE("reindex 0"); subset.resize(inputs.size() + outputs.size()); thrust::copy(policy, inputs.begin(), inputs.end(), subset.begin()); thrust::copy(policy, outputs.begin(), outputs.end(), subset.begin() + inputs.size()); thrust::device_vector<T> unique_items; unique_items.clear(); table = FillWithDuplicates(thrust::raw_pointer_cast(subset.data()), subset.size(), stream, unique_items); subset.resize(unique_items.size()); thrust::copy(policy, unique_items.begin(), unique_items.end(), subset.begin()); // thrust::sort(policy, subset.begin(), subset.end()); // subset.erase( // thrust::unique(policy, subset.begin(), subset.end()), // subset.end()); // _reindex_with(policy, outputs, subset, outputs); } { TRACE_SCOPE("permute"); // thrust::device_vector<T> s1; // s1.reserve(subset.size()); // _reindex_with(policy, inputs, subset, s1); // complete_permutation(s1, subset.size(), stream); // subset = permute(s1, subset, stream); // thrust::device_vector<T> s2; // inverse_permutation(s1, s2, stream); // permute_value(s2, outputs, stream); DeviceOrderedHashTable<T> device_table = table->DeviceHandle(); thrust::for_each( policy, outputs.begin(), outputs.end(), [device_table] __device__(T & id) mutable { using Iterator = typename DeviceOrderedHashTable<T>::Iterator; Iterator iter = device_table.Search(id); id = static_cast<T>((*iter).local); }); } delete table; } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> sample_sub_with_stream(int stream_num, const torch::Tensor &vertices, int k) const { TRACE_SCOPE(__func__); cudaStream_t stream = 0; if (!pool_.empty()) { stream = (pool_)[stream_num]; } const auto policy = thrust::cuda::par.on(stream); thrust::device_vector<T> inputs; thrust::device_vector<T> outputs; thrust::device_vector<T> output_counts; thrust::device_vector<T> subset; sample_kernel(stream, vertices, k, inputs, outputs, output_counts); int tot = outputs.size(); reindex_kernel(stream, inputs, outputs, subset); torch::Tensor out_vertices = torch::empty(subset.size(), vertices.options()); torch::Tensor row_idx = torch::empty(tot, vertices.options()); torch::Tensor col_idx = torch::empty(tot, vertices.options()); { TRACE_SCOPE("prepare output"); thrust::device_vector<T> prefix_count(output_counts.size()); thrust::device_vector<T> seq(output_counts.size()); thrust::sequence(policy, seq.begin(), seq.end()); thrust::exclusive_scan(policy, output_counts.begin(), output_counts.end(), prefix_count.begin()); const size_t m = inputs.size(); using it = thrust::counting_iterator<T>; thrust::for_each( policy, it(0), it(m), [prefix = thrust::raw_pointer_cast(prefix_count.data()), count = thrust::raw_pointer_cast(output_counts.data()), in = thrust::raw_pointer_cast(seq.data()), out = thrust::raw_pointer_cast( row_idx.data_ptr<T>())] __device__(T i) { for (int j = 0; j < count[i]; j++) { out[prefix[i] + j] = in[i]; } }); thrust::copy(subset.begin(), subset.end(), out_vertices.data_ptr<T>()); thrust::copy(outputs.begin(), outputs.end(), col_idx.data_ptr<T>()); } return std::make_tuple(out_vertices, row_idx, col_idx); } }; std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> reindex_single(torch::Tensor inputs, torch::Tensor outputs, torch::Tensor count) { using T = int64_t; cudaStream_t stream = 0; const auto policy = thrust::cuda::par.on(stream); thrust::device_vector<T> total_inputs(inputs.size(0)); thrust::device_vector<T> total_outputs(outputs.size(0)); thrust::device_vector<T> input_prefix(inputs.size(0)); const T *ptr; size_t bs; ptr = count.data_ptr<T>(); bs = inputs.size(0); thrust::copy(ptr, ptr + bs, input_prefix.begin()); ptr = inputs.data_ptr<T>(); thrust::copy(ptr, ptr + bs, total_inputs.begin()); thrust::exclusive_scan(policy, input_prefix.begin(), input_prefix.end(), input_prefix.begin()); ptr = outputs.data_ptr<T>(); bs = outputs.size(0); thrust::copy(ptr, ptr + bs, total_outputs.begin()); const size_t m = inputs.size(0); using it = thrust::counting_iterator<T>; thrust::device_vector<T> subset; TorchQuiver::reindex_kernel(stream, total_inputs, total_outputs, subset); int tot = total_outputs.size(); torch::Tensor out_vertices = torch::empty(subset.size(), inputs.options()); torch::Tensor row_idx = torch::empty(tot, inputs.options()); torch::Tensor col_idx = torch::empty(tot, inputs.options()); { thrust::device_vector<T> seq(count.size(0)); thrust::sequence(policy, seq.begin(), seq.end()); thrust::for_each( policy, it(0), it(m), [prefix = thrust::raw_pointer_cast(input_prefix.data()), count = count.data_ptr<T>(), in = thrust::raw_pointer_cast(seq.data()), out = thrust::raw_pointer_cast( row_idx.data_ptr<T>())] __device__(T i) { for (int j = 0; j < count[i]; j++) { out[prefix[i] + j] = in[i]; } }); thrust::copy(subset.begin(), subset.end(), out_vertices.data_ptr<T>()); thrust::copy(total_outputs.begin(), total_outputs.end(), col_idx.data_ptr<T>()); } return std::make_tuple(out_vertices, row_idx, col_idx); } TorchQuiver new_quiver_from_csr_array(torch::Tensor &input_indptr, torch::Tensor &input_indices, torch::Tensor &input_edge_idx, int device = 0, bool cuda = false) { cudaSetDevice(device); TRACE_SCOPE(__func__); using T = typename TorchQuiver::T; check_eq<int64_t>(input_indptr.dim(), 1); const size_t node_count = input_indptr.size(0); check_eq<int64_t>(input_indices.dim(), 1); const size_t edge_count = input_indices.size(0); bool use_eid = input_edge_idx.size(0) == edge_count; /* In Zero-Copy Mode, We Do These Steps: 0. Copy The Data If Needed 1. Register Buffer As Mapped Pinned Memory 2. Get Device Pointer In GPU Memory Space 3. Intiliaze A Quiver Instance And Return */ T *indptr_device_pointer = nullptr; T *indices_device_pointer = nullptr; T *edge_id_device_pointer = nullptr; {/*if (!cuda) { const T *indptr_original = reinterpret_cast<const T *>(input_indptr.data_ptr<T>()); // Register Buffer As Mapped Pinned Memory quiverRegister((void *)indptr_original, sizeof(T) * node_count, cudaHostRegisterMapped); // Get Device Pointer In GPU Memory Space cudaHostGetDevicePointer((void **)&indptr_device_pointer, (void *)indptr_original, 0); } else */ {const T *indptr_original = reinterpret_cast<const T *>(input_indptr.data_ptr<T>()); T *indptr_copy; cudaMalloc((void **)&indptr_copy, sizeof(T) * node_count); cudaMemcpy((void *)indptr_copy, (void *)indptr_original, sizeof(T) * node_count, cudaMemcpyDefault); indptr_device_pointer = indptr_copy; } } // namespace quiver // std::cout<<"mapped indptr"<<std::endl; { if (!cuda) { const T *indices_original = reinterpret_cast<const T *>(input_indices.data_ptr<T>()); // Register Buffer As Mapped Pinned Memory quiverRegister((void *)indices_original, sizeof(T) * edge_count, cudaHostRegisterMapped); // Get Device Pointer In GPU Memory Space cudaHostGetDevicePointer((void **)&indices_device_pointer, (void *)indices_original, 0); } else { const T *indices_original = reinterpret_cast<const T *>(input_indices.data_ptr<T>()); T *indices_copy; cudaMalloc((void **)&indices_copy, sizeof(T) * edge_count); cudaMemcpy((void *)indices_copy, (void *)indices_original, sizeof(T) * edge_count, cudaMemcpyDefault); indices_device_pointer = indices_copy; } } // std::cout<<"mapped indices"<<std::endl; if (use_eid) { if (!cuda) { const T *id_original = reinterpret_cast<const T *>(input_edge_idx.data_ptr<T>()); // Register Buffer As Mapped Pinned Memory quiverRegister((void *)id_original, sizeof(T) * edge_count, cudaHostRegisterMapped); // Get Device Pointer In GPU Memory Space cudaHostGetDevicePointer((void **)&edge_id_device_pointer, (void *)id_original, 0); } else { const T *id_original = reinterpret_cast<const T *>(input_edge_idx.data_ptr<T>()); T *id_copy; cudaMalloc((void **)&id_copy, sizeof(T) * edge_count); cudaMemcpy((void *)id_copy, (void *)id_original, sizeof(T) * edge_count, cudaMemcpyDefault); edge_id_device_pointer = id_copy; } } // std::cout<<"mapped edge id "<<std::endl; // initialize Quiver instance using Q = quiver<int64_t, CUDA>; Q quiver = Q::New(indptr_device_pointer, indices_device_pointer, edge_id_device_pointer, node_count - 1, edge_count); return TorchQuiver(std::move(quiver), device); } TorchQuiver new_quiver_from_edge_index(size_t n, py::array_t<int64_t> &input_edges, py::array_t<int64_t> &input_edge_idx, int device = 0) { cudaSetDevice(device); TRACE_SCOPE(__func__); using T = typename TorchQuiver::T; py::buffer_info edges = input_edges.request(); py::buffer_info edge_idx = input_edge_idx.request(); check_eq<int64_t>(edges.ndim, 2); check_eq<int64_t>(edges.shape[0], 2); const size_t m = edges.shape[1]; check_eq<int64_t>(edge_idx.ndim, 1); bool use_eid = edge_idx.shape[0] == m; thrust::device_vector<T> row_idx(m); thrust::device_vector<T> col_idx(m); { const T *p = reinterpret_cast<const T *>(edges.ptr); thrust::copy(p, p + m, row_idx.begin()); thrust::copy(p + m, p + m * 2, col_idx.begin()); } thrust::device_vector<T> edge_idx_; if (use_eid) { edge_idx_.resize(m); const T *p = reinterpret_cast<const T *>(edge_idx.ptr); thrust::copy(p, p + m, edge_idx_.begin()); } using Q = quiver<int64_t, CUDA>; Q quiver = Q::New(static_cast<T>(n), std::move(row_idx), std::move(col_idx), std::move(edge_idx_)); return TorchQuiver(std::move(quiver), device); } } // namespace quiver void register_cuda_quiver_sample(pybind11::module &m) { m.def("reindex_single", &quiver::reindex_single); m.def("new_quiver_from_edge_index", &quiver::new_quiver_from_edge_index); m.def("new_quiver_from_csr_array", &quiver::new_quiver_from_csr_array); py::class_<quiver::TorchQuiver>(m, "Quiver") .def("sample_sub", &quiver::TorchQuiver::sample_sub_with_stream, py::call_guard<py::gil_scoped_release>()) .def("sample_neighbor", &quiver::TorchQuiver::sample_neighbor, py::call_guard<py::gil_scoped_release>()); }
101e7a44a12ee4f8decd7a3ccdc50e7037a727e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void cycfold_multichannel(const float2 *pol0, const float2 *pol1, const double *phase, const double *step, const int fftlen, const int overlap, const int nbin, const int nlag, const int num_fft, float2 *xx, float2 *yy, float2 *xy, unsigned *hits) { // Lag is specified with threadIdx.x and blockIdx.x since there could be // more lags than allowed threads. const int ilaga = threadIdx.x; const int nlaga = blockDim.x; const int ilagb = blockIdx.x; const int ilag = ilagb*nlaga + ilaga; // Phase bin is blockIdx.y const int ibin = blockIdx.y; // Filterbank channel is blockIdx.z const int ichan = blockIdx.z; const int num_valid_samples = fftlen - overlap; // accumulators for the various lag terms float2 foldxxlag = make_float2(0,0); float2 foldyylag = make_float2(0,0); float2 foldxylag = make_float2(0,0); __shared__ int samp0; __shared__ int samp1; // Number of hits for this phase/lag bin int foldcount = 0; for (int ifft=0; ifft < num_fft; ifft++){ //Pointers to the first valid sample for this channel and fft const float2 *ptr0 = pol0 +ichan*fftlen*num_fft + ifft*fftlen + overlap/2; const float2 *ptr1 = pol1 + ichan*fftlen*num_fft + ifft*fftlen + overlap/2; // Fold info const double bin0 = phase[ifft]; const double bins_per_sample = step[ifft]; // bins/sample const double samples_per_bin = 1.0/bins_per_sample; // samples/bin const int num_turns = ((double)num_valid_samples*bins_per_sample)/(double)nbin + 2; // Loop over number of pulse periods in data block for (int iturn=0; iturn<num_turns; iturn++) { // Determine range of samples needed for this bin, turn if(ilaga == 0){ samp0 = samples_per_bin*((double)ibin-bin0+(double)iturn*nbin)+0.5; samp1 = samples_per_bin*((double)ibin-bin0+(double)iturn*nbin+1)+0.5; // Range checks if (samp0<0) { samp0=0; } if (samp1<0) { samp1=0; } if (samp0>num_valid_samples) { samp0=num_valid_samples; } if (samp1>num_valid_samples) { samp1=num_valid_samples; } } __syncthreads(); // Read in and add samples int lag_index; for (int isamp=samp0; isamp<samp1; isamp++) { lag_index = isamp + ilag -nlag/2; if((lag_index >= 0) && (lag_index < num_valid_samples)){ float2 p0 = ptr0[isamp]; float2 p0lag = ptr0[lag_index]; float2 p1 = ptr1[isamp]; float2 p1lag = ptr1[lag_index]; // <Pol0 x Pol0_lag*> foldxxlag.x += p0.x*p0lag.x + p0.y*p0lag.y; foldxxlag.y += p0.y*p0lag.x - p0.x*p0lag.y; // <Pol1 x Pol1_lag*> foldyylag.x += p1.x*p1lag.x + p1.y*p1lag.y; foldyylag.y += p1.y*p1lag.x - p1.x*p1lag.y; // <Pol0 x Pol1_lag*> foldxylag.x += p0.x*p1lag.x + p0.y*p1lag.y; foldxylag.y += p0.y*p1lag.x - p0.x*p1lag.y; foldcount++; } } } } xx[ichan*nlag*nbin+nlag*ibin+ilag] = foldxxlag; yy[ichan*nlag*nbin+nlag*ibin+ilag] = foldyylag; xy[ichan*nlag*nbin+nlag*ibin+ilag] = foldxylag; hits[ichan*nlag*nbin+nlag*ibin+ilag] = foldcount; }
101e7a44a12ee4f8decd7a3ccdc50e7037a727e3.cu
#include <stdio.h> __global__ void cycfold_multichannel(const float2 *pol0, const float2 *pol1, const double *phase, const double *step, const int fftlen, const int overlap, const int nbin, const int nlag, const int num_fft, float2 *xx, float2 *yy, float2 *xy, unsigned *hits) { // Lag is specified with threadIdx.x and blockIdx.x since there could be // more lags than allowed threads. const int ilaga = threadIdx.x; const int nlaga = blockDim.x; const int ilagb = blockIdx.x; const int ilag = ilagb*nlaga + ilaga; // Phase bin is blockIdx.y const int ibin = blockIdx.y; // Filterbank channel is blockIdx.z const int ichan = blockIdx.z; const int num_valid_samples = fftlen - overlap; // accumulators for the various lag terms float2 foldxxlag = make_float2(0,0); float2 foldyylag = make_float2(0,0); float2 foldxylag = make_float2(0,0); __shared__ int samp0; __shared__ int samp1; // Number of hits for this phase/lag bin int foldcount = 0; for (int ifft=0; ifft < num_fft; ifft++){ //Pointers to the first valid sample for this channel and fft const float2 *ptr0 = pol0 +ichan*fftlen*num_fft + ifft*fftlen + overlap/2; const float2 *ptr1 = pol1 + ichan*fftlen*num_fft + ifft*fftlen + overlap/2; // Fold info const double bin0 = phase[ifft]; const double bins_per_sample = step[ifft]; // bins/sample const double samples_per_bin = 1.0/bins_per_sample; // samples/bin const int num_turns = ((double)num_valid_samples*bins_per_sample)/(double)nbin + 2; // Loop over number of pulse periods in data block for (int iturn=0; iturn<num_turns; iturn++) { // Determine range of samples needed for this bin, turn if(ilaga == 0){ samp0 = samples_per_bin*((double)ibin-bin0+(double)iturn*nbin)+0.5; samp1 = samples_per_bin*((double)ibin-bin0+(double)iturn*nbin+1)+0.5; // Range checks if (samp0<0) { samp0=0; } if (samp1<0) { samp1=0; } if (samp0>num_valid_samples) { samp0=num_valid_samples; } if (samp1>num_valid_samples) { samp1=num_valid_samples; } } __syncthreads(); // Read in and add samples int lag_index; for (int isamp=samp0; isamp<samp1; isamp++) { lag_index = isamp + ilag -nlag/2; if((lag_index >= 0) && (lag_index < num_valid_samples)){ float2 p0 = ptr0[isamp]; float2 p0lag = ptr0[lag_index]; float2 p1 = ptr1[isamp]; float2 p1lag = ptr1[lag_index]; // <Pol0 x Pol0_lag*> foldxxlag.x += p0.x*p0lag.x + p0.y*p0lag.y; foldxxlag.y += p0.y*p0lag.x - p0.x*p0lag.y; // <Pol1 x Pol1_lag*> foldyylag.x += p1.x*p1lag.x + p1.y*p1lag.y; foldyylag.y += p1.y*p1lag.x - p1.x*p1lag.y; // <Pol0 x Pol1_lag*> foldxylag.x += p0.x*p1lag.x + p0.y*p1lag.y; foldxylag.y += p0.y*p1lag.x - p0.x*p1lag.y; foldcount++; } } } } xx[ichan*nlag*nbin+nlag*ibin+ilag] = foldxxlag; yy[ichan*nlag*nbin+nlag*ibin+ilag] = foldyylag; xy[ichan*nlag*nbin+nlag*ibin+ilag] = foldxylag; hits[ichan*nlag*nbin+nlag*ibin+ilag] = foldcount; }
adcebdd92c70b94b2f366459981f128b3f77b184.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include "bmp.h" #define ITERATIONS 256 #define DOTS_PER_UNIT 2500 #define THREADX 16 #define THREADY 16 __constant__ COLORTRIPLE black = {0, 0, 0}; __constant__ COLORTRIPLE c0 = {0, 0, 51}; __constant__ COLORTRIPLE c1 = {0, 0, 153}; __constant__ COLORTRIPLE c2 = {0, 51, 255}; __constant__ COLORTRIPLE c3 = {51, 153, 255}; __constant__ COLORTRIPLE c4 = {0, 204, 255}; __constant__ COLORTRIPLE c5 = {51, 255, 255}; __constant__ COLORTRIPLE c6 = {0, 204, 0}; __constant__ COLORTRIPLE c7 = {255, 255, 0}; __constant__ COLORTRIPLE c8 = {255, 0, 0}; __global__ void mandelb (COLORTRIPLE *pixelPtr) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; double re, im; re = -2.0 + i*(1.0/DOTS_PER_UNIT); im = -1.0 + j*(1.0/DOTS_PER_UNIT); if((re >= 1) || (im >= 1)){ return; } if ( (re*re + im*im) > 4){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c0; return; }else{ double zkr = re; double zki = im; double zkr_prev , zki_prev; for (int n = 1; n <= ITERATIONS; n++){ zkr_prev = zkr; zki_prev = zki; zkr = re + zkr_prev*zkr_prev - zki_prev*zki_prev ; zki = im + 2*zkr_prev*zki_prev ; if ((zkr*zkr + zki*zki) > 4){ if(n == 1){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c1; }else if(n == 2){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c2; }else if(n == 3){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c3; }else if((n >= 4) && (n < 8)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c4; }else if((n >= 8) && (n < 16)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c5; }else if((n >= 16) && (n < 32)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c6; }else if((n >= 32) && (n < 64)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c7; }else if(n >= 64){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c8; } return; } } *(pixelPtr + j*3*DOTS_PER_UNIT + i) = black; return; } } void WriteBitmap (BITMAP bitmap, FILE *fp) { COLORTRIPLE triple; unsigned char fillbyte = 0; int nstep; int i, j, k, fill; fwrite (&bitmap.fileheader, sizeof (FILEHEADER), 1, fp); fwrite (&bitmap.bmpheader, sizeof (BMPHEADER), 1, fp); /* number of bytes in a row must be multiple of 4 */ fill = bitmap.width % 4; for (i = 0; i < bitmap.height; i++) { for (j = 0; j < bitmap.width; j++) { nstep = j + (i * bitmap.width); triple = bitmap.pixel [nstep]; fwrite (&triple, sizeof(COLORTRIPLE), 1, fp); } for (k = 0; k < fill; k++) fwrite (&fillbyte, sizeof(unsigned char), 1, fp); } #ifdef BMPSHOWALL printf ("%d pixels written\n", nstep + 1); #endif return; } void ReleaseBitmapData (BITMAP *bitmap) { hipFree ((*bitmap).pixel); (*bitmap).bmpheader.ImageHeight = (*bitmap).height = 0; (*bitmap).bmpheader.ImageWidth = (*bitmap).width = 0; (*bitmap).pixel = NULL; return; } BITMAP CreateEmptyBitmap (dword height, dword width) { BITMAP bitmap; #ifdef BMPSHOWALL printf ("Creating empty bitmap %d x %d pixels\n", height, width); #endif /* bitmap header */ bitmap.fileheader.ImageFileType = BMPFILETYPE; /* magic number! */ bitmap.fileheader.FileSize = 14 + 40 + height * width * 3; bitmap.fileheader.Reserved1 = 0; bitmap.fileheader.Reserved2 = 0; bitmap.fileheader.ImageDataOffset = 14 + 40; /* bmp header */ bitmap.bmpheader.HeaderSize = 40; bitmap.bmpheader.ImageWidth = bitmap.width = width; bitmap.bmpheader.ImageHeight = bitmap.height = height; bitmap.bmpheader.NumberOfImagePlanes = 1; bitmap.bmpheader.BitsPerPixel = 24; /* the only supported format */ bitmap.bmpheader.CompressionMethod = 0; /* compression is not supported */ bitmap.bmpheader.SizeOfBitmap = 0; /* conventional value for uncompressed images */ bitmap.bmpheader.HorizonalResolution = 0; /* currently unused */ bitmap.bmpheader.VerticalResolution = 0; /* currently unused */ bitmap.bmpheader.NumberOfColorsUsed = 0; /* dummy value */ bitmap.bmpheader.NumberOfSignificantColors = 0; /* every color is important */ //bitmap.pixel = (COLORTRIPLE *) malloc (sizeof (COLORTRIPLE) * width * height); hipMallocManaged (&(bitmap.pixel), sizeof (COLORTRIPLE) * width * height); if (bitmap.pixel == NULL) { printf ("Memory allocation error\n"); exit (EXIT_FAILURE); } return bitmap; } int main(void){ BITMAP bitmap; FILE *fpout; COLORTRIPLE *pixelPtr; hipEvent_t start, end; float gpuTime; fpout = fopen("mandelbrot_cuda.bmp" , "wb"); bitmap = CreateEmptyBitmap ((int)(2*DOTS_PER_UNIT), (int)(3*DOTS_PER_UNIT)); pixelPtr = bitmap.pixel; hipDeviceSynchronize(); dim3 blocksPerGrid (1 + 3*DOTS_PER_UNIT/THREADX, 1 + 2*DOTS_PER_UNIT/THREADY); dim3 threadsPerBlock (THREADX, THREADY); hipEventCreate (&start); hipEventCreate (&end); hipEventRecord (start); hipLaunchKernelGGL(( mandelb) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, pixelPtr); hipEventRecord (end); hipDeviceSynchronize (); hipEventElapsedTime (&gpuTime, start, end); printf ("Tempo impiegato: %.2f ms\n", gpuTime); WriteBitmap (bitmap, fpout); ReleaseBitmapData (&bitmap); fclose (fpout); return 0; }
adcebdd92c70b94b2f366459981f128b3f77b184.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "bmp.h" #define ITERATIONS 256 #define DOTS_PER_UNIT 2500 #define THREADX 16 #define THREADY 16 __constant__ COLORTRIPLE black = {0, 0, 0}; __constant__ COLORTRIPLE c0 = {0, 0, 51}; __constant__ COLORTRIPLE c1 = {0, 0, 153}; __constant__ COLORTRIPLE c2 = {0, 51, 255}; __constant__ COLORTRIPLE c3 = {51, 153, 255}; __constant__ COLORTRIPLE c4 = {0, 204, 255}; __constant__ COLORTRIPLE c5 = {51, 255, 255}; __constant__ COLORTRIPLE c6 = {0, 204, 0}; __constant__ COLORTRIPLE c7 = {255, 255, 0}; __constant__ COLORTRIPLE c8 = {255, 0, 0}; __global__ void mandelb (COLORTRIPLE *pixelPtr) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; double re, im; re = -2.0 + i*(1.0/DOTS_PER_UNIT); im = -1.0 + j*(1.0/DOTS_PER_UNIT); if((re >= 1) || (im >= 1)){ return; } if ( (re*re + im*im) > 4){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c0; return; }else{ double zkr = re; double zki = im; double zkr_prev , zki_prev; for (int n = 1; n <= ITERATIONS; n++){ zkr_prev = zkr; zki_prev = zki; zkr = re + zkr_prev*zkr_prev - zki_prev*zki_prev ; zki = im + 2*zkr_prev*zki_prev ; if ((zkr*zkr + zki*zki) > 4){ if(n == 1){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c1; }else if(n == 2){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c2; }else if(n == 3){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c3; }else if((n >= 4) && (n < 8)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c4; }else if((n >= 8) && (n < 16)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c5; }else if((n >= 16) && (n < 32)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c6; }else if((n >= 32) && (n < 64)){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c7; }else if(n >= 64){ *(pixelPtr + j*3*DOTS_PER_UNIT + i) = c8; } return; } } *(pixelPtr + j*3*DOTS_PER_UNIT + i) = black; return; } } void WriteBitmap (BITMAP bitmap, FILE *fp) { COLORTRIPLE triple; unsigned char fillbyte = 0; int nstep; int i, j, k, fill; fwrite (&bitmap.fileheader, sizeof (FILEHEADER), 1, fp); fwrite (&bitmap.bmpheader, sizeof (BMPHEADER), 1, fp); /* number of bytes in a row must be multiple of 4 */ fill = bitmap.width % 4; for (i = 0; i < bitmap.height; i++) { for (j = 0; j < bitmap.width; j++) { nstep = j + (i * bitmap.width); triple = bitmap.pixel [nstep]; fwrite (&triple, sizeof(COLORTRIPLE), 1, fp); } for (k = 0; k < fill; k++) fwrite (&fillbyte, sizeof(unsigned char), 1, fp); } #ifdef BMPSHOWALL printf ("%d pixels written\n", nstep + 1); #endif return; } void ReleaseBitmapData (BITMAP *bitmap) { cudaFree ((*bitmap).pixel); (*bitmap).bmpheader.ImageHeight = (*bitmap).height = 0; (*bitmap).bmpheader.ImageWidth = (*bitmap).width = 0; (*bitmap).pixel = NULL; return; } BITMAP CreateEmptyBitmap (dword height, dword width) { BITMAP bitmap; #ifdef BMPSHOWALL printf ("Creating empty bitmap %d x %d pixels\n", height, width); #endif /* bitmap header */ bitmap.fileheader.ImageFileType = BMPFILETYPE; /* magic number! */ bitmap.fileheader.FileSize = 14 + 40 + height * width * 3; bitmap.fileheader.Reserved1 = 0; bitmap.fileheader.Reserved2 = 0; bitmap.fileheader.ImageDataOffset = 14 + 40; /* bmp header */ bitmap.bmpheader.HeaderSize = 40; bitmap.bmpheader.ImageWidth = bitmap.width = width; bitmap.bmpheader.ImageHeight = bitmap.height = height; bitmap.bmpheader.NumberOfImagePlanes = 1; bitmap.bmpheader.BitsPerPixel = 24; /* the only supported format */ bitmap.bmpheader.CompressionMethod = 0; /* compression is not supported */ bitmap.bmpheader.SizeOfBitmap = 0; /* conventional value for uncompressed images */ bitmap.bmpheader.HorizonalResolution = 0; /* currently unused */ bitmap.bmpheader.VerticalResolution = 0; /* currently unused */ bitmap.bmpheader.NumberOfColorsUsed = 0; /* dummy value */ bitmap.bmpheader.NumberOfSignificantColors = 0; /* every color is important */ //bitmap.pixel = (COLORTRIPLE *) malloc (sizeof (COLORTRIPLE) * width * height); cudaMallocManaged (&(bitmap.pixel), sizeof (COLORTRIPLE) * width * height); if (bitmap.pixel == NULL) { printf ("Memory allocation error\n"); exit (EXIT_FAILURE); } return bitmap; } int main(void){ BITMAP bitmap; FILE *fpout; COLORTRIPLE *pixelPtr; cudaEvent_t start, end; float gpuTime; fpout = fopen("mandelbrot_cuda.bmp" , "wb"); bitmap = CreateEmptyBitmap ((int)(2*DOTS_PER_UNIT), (int)(3*DOTS_PER_UNIT)); pixelPtr = bitmap.pixel; cudaDeviceSynchronize(); dim3 blocksPerGrid (1 + 3*DOTS_PER_UNIT/THREADX, 1 + 2*DOTS_PER_UNIT/THREADY); dim3 threadsPerBlock (THREADX, THREADY); cudaEventCreate (&start); cudaEventCreate (&end); cudaEventRecord (start); mandelb <<< blocksPerGrid, threadsPerBlock>>> (pixelPtr); cudaEventRecord (end); cudaDeviceSynchronize (); cudaEventElapsedTime (&gpuTime, start, end); printf ("Tempo impiegato: %.2f ms\n", gpuTime); WriteBitmap (bitmap, fpout); ReleaseBitmapData (&bitmap); fclose (fpout); return 0; }
9115757799c434a269e084865fa9bb29a19884b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "gputimer.h" #include "utils.h" const int N= 1024; // matrix size is NxN const int K= 1; // TODO, set K to the correct value and tile size will be KxK // to be launched with one thread per element, in KxK threadblocks // thread (x,y) in grid writes element (i,j) of output matrix __global__ void transpose_parallel_per_element(float in[], float out[]) { //TODO } //The following functions and kernels are for your reference void transpose_CPU(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched on a single thread __global__ void transpose_serial(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched with one thread per row of output matrix __global__ void transpose_parallel_per_row(float in[], float out[]) { int i = threadIdx.x; for(int j=0; j < N; j++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } int main(int argc, char **argv) { int numbytes = N * N * sizeof(float); float *in = (float *) malloc(numbytes); float *out = (float *) malloc(numbytes); float *gold = (float *) malloc(numbytes); fill_matrix(in, N); transpose_CPU(in, gold); float *d_in, *d_out; hipMalloc(&d_in, numbytes); hipMalloc(&d_out, numbytes); hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice); GpuTimer timer; /* * Now time each kernel and verify that it produces the correct result. * * To be really careful about benchmarking purposes, we should run every kernel once * to "warm" the system and avoid any compilation or code-caching effects, then run * every kernel 10 or 100 times and average the timings to smooth out any variance. * But this makes for messy code and our goal is teaching, not detailed benchmarking. */ dim3 blocks(1, 1); // TODO, you need to define the correct blocks per grid dim3 threads(1, 1); // TODO, you need to define the correct threads per block timer.Start(); hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(blocks),dim3(threads), 0, 0, d_in, d_out); timer.Stop(); hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost); printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success"); hipFree(d_in); hipFree(d_out); }
9115757799c434a269e084865fa9bb29a19884b2.cu
#include <stdio.h> #include "gputimer.h" #include "utils.h" const int N= 1024; // matrix size is NxN const int K= 1; // TODO, set K to the correct value and tile size will be KxK // to be launched with one thread per element, in KxK threadblocks // thread (x,y) in grid writes element (i,j) of output matrix __global__ void transpose_parallel_per_element(float in[], float out[]) { //TODO } //The following functions and kernels are for your reference void transpose_CPU(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched on a single thread __global__ void transpose_serial(float in[], float out[]) { for(int j=0; j < N; j++) for(int i=0; i < N; i++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } // to be launched with one thread per row of output matrix __global__ void transpose_parallel_per_row(float in[], float out[]) { int i = threadIdx.x; for(int j=0; j < N; j++) out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j) } int main(int argc, char **argv) { int numbytes = N * N * sizeof(float); float *in = (float *) malloc(numbytes); float *out = (float *) malloc(numbytes); float *gold = (float *) malloc(numbytes); fill_matrix(in, N); transpose_CPU(in, gold); float *d_in, *d_out; cudaMalloc(&d_in, numbytes); cudaMalloc(&d_out, numbytes); cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice); GpuTimer timer; /* * Now time each kernel and verify that it produces the correct result. * * To be really careful about benchmarking purposes, we should run every kernel once * to "warm" the system and avoid any compilation or code-caching effects, then run * every kernel 10 or 100 times and average the timings to smooth out any variance. * But this makes for messy code and our goal is teaching, not detailed benchmarking. */ dim3 blocks(1, 1); // TODO, you need to define the correct blocks per grid dim3 threads(1, 1); // TODO, you need to define the correct threads per block timer.Start(); transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out); timer.Stop(); cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost); printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n", timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success"); cudaFree(d_in); cudaFree(d_out); }
db085cafef379d80316565c53b4695e00ce7b550.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Open sourced multi-head attention **/ #include "fastertransformer/open_decoder.h" #include "hipcub/hipcub.hpp" namespace fastertransformer{ const int WARP_SIZE = 32; const bool ATTENION_OPT = true; const int ATTENTION_BLOCK_SIZE = 256; /////////////////////////////////////////////////////////////////////////////////////////////////// template <int HALF_ELEMENTS_PER_WARP_LOAD> using Copy_half_t = typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4 >::type >::type >::type; template <typename T, int ELEMENTS_PER_WARP_LOAD> using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>; /////////////////////////////////////////////////////////////////////////////////////////////////// /** masked multi-head attention */ #define FINAL_MASK 0xffffffff template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } /* Calculate the sum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; // __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f); val = warpReduceSum<T>(val); return val; } template <typename T> __global__ void add_bias_relu(T* out, const T* bias, int m, int n) { T val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x; int tid = threadIdx.x; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m) { val = out[tid + i * blockDim.x + row_id * n] + reg_bias; out[tid + i * blockDim.x + row_id * n] = (T)(val > 0.0f ? val : 0.0f); row_id += gridDim.x; } } } template <> __global__ void add_bias_relu(half* out, const half* bias, int m, int n) { half2 val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x / 2; int tid = threadIdx.x; half2* out_ptr = (half2*) out; const half2* bias_ptr = (half2*) bias; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m) { val = out_ptr[tid + i * blockDim.x + row_id * n / 2]; val = __hadd2(val, reg_bias); val.x = val.x > (half)0.0f ? val.x : (half)0.0f; val.y = val.y > (half)0.0f ? val.y : (half)0.0f; out_ptr[tid + i * blockDim.x + row_id * n / 2] = val; row_id += gridDim.x; } } } template <typename T> __inline__ __device__ T warpReduceMax(T val) { for(int mask = 16; mask > 0; mask >>= 1) val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32)); return val; } /* Calculate the maximum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceMax(T val) { static __shared__ T shared[32]; // __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; // in-warp idx int wid = threadIdx.x >> 5; // warp idx val = warpReduceMax(val); // get maxx in each warp if(lane == 0) // record in-warp maxx by warp Idx shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f; val = warpReduceMax(val); return val; } template <int size_per_head, int block_sz, typename T> __global__ void masked_attention_kernel_opt( T* __restrict key_buf, T* __restrict value_buf, T* __restrict query_buf, const T* __restrict self_Q_bias, T* __restrict key_cache, const T* __restrict self_K_bias, T* __restrict value_cache, const T* __restrict self_V_bias, T* __restrict context_buf, int batch_size, int head_num, const int step, const T scalar) { typedef Copy_t<T, size_per_head> copy_t; const int elems_per_thread = size_per_head / WARP_SIZE; union Access_t { copy_t v; T x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Float_n_t { T x[elems_per_thread]; // supported size 1,2,4 } float_n_t; __shared__ float_n_t sq[block_sz]; __shared__ float logits[1024]; // only use [0 ~ step-1], the step should be smaller than 1024 const int tid = threadIdx.x; const int warp_num = block_sz / WARP_SIZE; const int bid = blockIdx.x; const int head_id = blockIdx.x % head_num; const int warp_id = tid / WARP_SIZE; // warp_id in block const int lane_id = tid % WARP_SIZE; // lane_id in warp typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef hipcub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num]; int qkv_id = bid * size_per_head; int qkv_bias_id = head_id * size_per_head; query_buf = &query_buf[qkv_id]; key_buf = &key_buf[qkv_id]; value_buf = &value_buf[qkv_id]; self_K_bias = &self_K_bias[qkv_bias_id]; key_cache = &key_cache[qkv_id]; self_Q_bias = &self_Q_bias[qkv_bias_id]; self_V_bias = &self_V_bias[qkv_bias_id]; value_cache = &value_cache[qkv_id]; context_buf = &context_buf[qkv_id]; Access_t bias_r, query_buf_r; Access_t key_val_r, key_buf_r; Access_t value_val_r, value_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t *)query_buf + lane_id); key_buf_r.v = *((copy_t *)key_buf + lane_id); bias_r.v = *((copy_t *)self_Q_bias + lane_id); float qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i]; } //offset for each step int offset = batch_size * head_num * size_per_head; bias_r.v = *((copy_t *) self_K_bias + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar; } float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for(int i = tid; i < step; i += blockDim.x) local_i = max(local_i, logits[i]); float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max()); if(tid == 0) s_max_val = max_val; __syncthreads(); float local_o = 0.0f; for(int i = tid; i < step; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for(int i = tid; i < step; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation float sum_r[elems_per_thread] = {0.f}; bias_r.v = *((copy_t *) self_V_bias + lane_id); value_buf_r.v = *((copy_t *)value_buf + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] += (float)value_val_r.x[i] * logits[ite]; } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (warp_id == 0) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = sum_r[i]; } if (warp_id == 0) { *((copy_t *)context_buf + lane_id) = value_val_r.v; } } // only use for compile template <int size_per_head, int block_sz> __global__ void masked_attention_kernel_opt_half2( float* __restrict key_buf, float* __restrict value_buf, float* __restrict query_buf, const float* __restrict self_Q_bias, float* __restrict key_cache, const float* __restrict self_K_bias, float* __restrict value_cache, const float* __restrict self_V_bias, float* __restrict context_buf, int batch_size, int head_num, const int step, const float scalar) {} template <int size_per_head, int block_sz> __global__ void masked_attention_kernel_opt_half2( half* __restrict key_buf, half* __restrict value_buf, half* __restrict query_buf, const half* __restrict self_Q_bias, half* __restrict key_cache, const half* __restrict self_K_bias, half* __restrict value_cache, const half* __restrict self_V_bias, half* __restrict context_buf, int batch_size, int head_num, const int step, const half scalar) { half2* key_buf_ptr = (half2*)key_buf; half2* value_buf_ptr = (half2*)value_buf; half2* query_buf_ptr = (half2*)query_buf; half2* key_cache_ptr = (half2*)key_cache; half2* value_cache_ptr = (half2*)value_cache; const half2* self_Q_bias_ptr = (const half2*)self_Q_bias; const half2* self_K_bias_ptr = (const half2*)self_K_bias; const half2* self_V_bias_ptr = (const half2*)self_V_bias; half2* context_buf_ptr = (half2*)context_buf; typedef Copy_t<half2, size_per_head/2> copy_t; const int elems_per_thread = size_per_head / 2 / WARP_SIZE; union Access_t { copy_t v; half2 x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Half_n_t { half2 x[elems_per_thread]; // supported size 1,2,4 } half_n_t; __shared__ half_n_t sq[block_sz]; __shared__ float logits[1024]; // only use [0 ~ step-1] const int tid = threadIdx.x; const int warp_num = block_sz / WARP_SIZE; const int bid = blockIdx.x; const int head_id = blockIdx.x % head_num; const int warp_id = tid / WARP_SIZE; // warp_id in block const int lane_id = tid % WARP_SIZE; // lane_id in warp typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef hipcub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num]; int qkv_id = bid * size_per_head / 2; int qkv_bias_id = head_id * size_per_head / 2; query_buf_ptr = &query_buf_ptr[qkv_id]; key_buf_ptr = &key_buf_ptr[qkv_id]; value_buf_ptr = &value_buf_ptr[qkv_id]; self_K_bias_ptr = &self_K_bias_ptr[qkv_bias_id]; key_cache_ptr = &key_cache_ptr[qkv_id]; self_Q_bias_ptr = &self_Q_bias_ptr[qkv_bias_id]; self_V_bias_ptr = &self_V_bias_ptr[qkv_bias_id]; value_cache_ptr = &value_cache_ptr[qkv_id]; context_buf_ptr = &context_buf_ptr[qkv_id]; Access_t bias_r, query_buf_r; Access_t key_val_r, key_buf_r; Access_t value_val_r, value_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t *)query_buf_ptr + lane_id); key_buf_r.v = *((copy_t *)key_buf_ptr + lane_id); bias_r.v = *((copy_t *)self_Q_bias_ptr + lane_id); half2 qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = __hadd2(query_buf_r.x[i], bias_r.x[i]); } //offset for each step int offset = batch_size * head_num * size_per_head / 2; bias_r.v = *((copy_t *) self_K_bias + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { key_val_r.v = *((copy_t *)&key_cache_ptr[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = __hadd2(key_buf_r.x[i], bias_r.x[i]); } *((copy_t *)&key_cache_ptr[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { half2 val2 = __hmul2(key_val_r.x[i], qb_r[i]); val = val + (float)((val2.x + val2.y) * scalar); } float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for(int i = tid; i < step; i += blockDim.x) local_i = max(local_i, logits[i]); float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max()); if(tid == 0) s_max_val = max_val; __syncthreads(); float local_o = 0.0f; for(int i = tid; i < step; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for(int i = tid; i < step; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation half2 sum_r[elems_per_thread]; for(int i = 0; i < elems_per_thread; i++) { sum_r[i].x = (half)0.f; sum_r[i].y = (half)0.f; } bias_r.v = *((copy_t *) self_V_bias_ptr + lane_id); value_buf_r.v = *((copy_t *)value_buf_ptr + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { value_val_r.v = *((copy_t *)&value_cache_ptr[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = __hadd2(value_buf_r.x[i], bias_r.x[i]); } *((copy_t *)&value_cache_ptr[ite * offset] + lane_id) = value_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { half2 logit2_val; logit2_val.x = (half)logits[ite]; logit2_val.y = (half)logits[ite]; sum_r[i] = __hadd2(sum_r[i], __hmul2(value_val_r.x[i], logit2_val)); } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (warp_id == 0) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = __hadd2(sum_r[i], sq[j * WARP_SIZE + tid].x[i]); } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = sum_r[i]; } if (warp_id == 0) { *((copy_t *)context_buf_ptr + lane_id) = value_val_r.v; } } template <typename T> __global__ void masked_attention_kernel( T* key_buf, T* value_buf, T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar) { extern __shared__ __align__(sizeof(T)) unsigned s_buf[]; T* sq = reinterpret_cast<T *>(s_buf); T* logits = reinterpret_cast<T *>(&sq[size_per_head]); int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if(tid < size_per_head) sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id]; __syncthreads(); //offset for each step int offset = batch_size * head_num * size_per_head; for(int ite = 0; ite < step; ++ite) { T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f; //for the last step, we should update K + bias_K to the cache if(ite == step - 1 && tid < size_per_head) { key = key_buf[qkv_id] + self_K_bias[qkv_bias_id]; key_cache[ite * offset + qkv_id] = key; } T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f); T qk = blockReduceSum(val); if(threadIdx.x == 0) logits[ite] = qk; __syncthreads(); //try to remove } __syncthreads(); //try to remove __shared__ float s_max_val, s_sum; float local_i = tid < step ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax<float>(local_i); if(tid == 0) s_max_val = max_val; __syncthreads(); local_i -= s_max_val; float local_o = tid < step ? __expf(local_i) : 0.0f; float val = blockReduceSum<float>(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); if(tid < step) logits[tid] = local_o / s_sum; __syncthreads(); if(tid < size_per_head) { T sum = (T)0.0f; for(int ite = 0; ite < step; ++ite) { T value = value_cache[ite * offset + qkv_id]; //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { value = value_buf[qkv_id] + self_V_bias[qkv_bias_id]; value_cache[ite * offset + qkv_id] = value; } sum += value * logits[ite]; } context_buf[qkv_id] = sum; } } template <typename T> __global__ void masked_attention_kernel_v2(T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar) { extern __shared__ __align__(sizeof(T)) unsigned s_buf[]; T* sq = reinterpret_cast<T *>(s_buf); T* logits = reinterpret_cast<T *>(&sq[size_per_head]); int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if(tid < size_per_head) sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id]; __syncthreads(); int warp_size = 32; int offset = batch_size * head_num * size_per_head; int warp_ite = size_per_head / warp_size; T qk = (T)0.0f; //each warp process one step int step_id = threadIdx.x >> 5; if(step_id < step) { for(int wite = 0; wite < warp_ite; ++wite) { T key = key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head + tid % warp_size + wite * warp_size]; //for the last step, we should update K + bias_K to the cache if(step_id == step - 1) { key += self_K_bias[bid * head_num * size_per_head + head_id * size_per_head + tid % warp_size + wite * warp_size]; key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head + tid % warp_size + wite * warp_size] = key; } qk += key * sq[tid % warp_size + wite * warp_size]; } qk = warpReduceSum(qk * scalar); if(threadIdx.x % warp_size == 0) { logits[step_id] = qk; printf("step_id %d %f\n", step_id, qk); } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = tid < step ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax<float>(local_i); if(tid == 0) s_max_val = max_val; __syncthreads(); local_i -= s_max_val; float local_o = tid < step ? __expf(local_i) : 0.0f; float val = blockReduceSum<float>(local_o); if(tid == 0) s_sum = val; __syncthreads(); if(tid < step) logits[tid] = local_o / s_sum; __syncthreads(); if(tid < size_per_head) { T sum = (T)0.0f; for(int ite = 0; ite < step; ++ite) { T value = value_cache[ite * offset + qkv_id]; //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { value += self_V_bias[qkv_bias_id]; value_cache[ite * offset + qkv_id] = value; } sum += value * logits[ite]; } context_buf[qkv_id] = sum; } } template <typename T> void masked_attention_dispatch( T* key_buf, T* value_buf, T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, hipStream_t stream) { const int block_sz = ATTENTION_BLOCK_SIZE; T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f)); dim3 grid(batch_size * head_num); int cond = size_per_head * ((ATTENION_OPT)? 1:0); switch (cond) { case 32: hipLaunchKernelGGL(( masked_attention_kernel_opt<32, block_sz, T>), dim3(grid), dim3(block_sz), 0, stream, key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; case 64: if(sizeof(T) == 2) hipLaunchKernelGGL(( masked_attention_kernel_opt_half2<64, block_sz>), dim3(grid), dim3(block_sz), 0, stream, key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); else hipLaunchKernelGGL(( masked_attention_kernel_opt<64, block_sz, T>), dim3(grid), dim3(block_sz), 0, stream, key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; case 128: if(sizeof(T) == 2) hipLaunchKernelGGL(( masked_attention_kernel_opt_half2<128, block_sz>), dim3(grid), dim3(block_sz), 0, stream, key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); else hipLaunchKernelGGL(( masked_attention_kernel_opt<128, block_sz, T>), dim3(grid), dim3(block_sz), 0, stream, key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; default: // default path int block_size = 128; //suppose size_per_head <= 128 if(step <= 64) block_size = 64; else if(step <= 128 && step > size_per_head) block_size = 128; else if(step > 128 && step <= 256) block_size = 256; else if(step > 256 && step <= 512) block_size = 512; else block_size = 1024; if((int)block_size < size_per_head) block_size = size_per_head; assert(block_size <= 1024); dim3 block(block_size); T scalar = 1 / sqrtf(size_per_head * 1.0f); int shared_size = sizeof(T) * (size_per_head + step); hipLaunchKernelGGL(( masked_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream, key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, size_per_head, step, scalar); } } template<OperationType OpType_> void OpenDecoder<OpType_>::masked_multi_head_attention( const DataType_* from_tensor, DataType_* key_cache_, DataType_* value_cache_, DataType_* decoder_output, const int step) { int m = batch_size_; int n = hidden_units_; int k = hidden_units_; DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f; if(is_fuse_QKV == true) { check_cuda_error(hipblasGemmBatchedEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, (const void* const*) qkv_kernel_, AType_, n, (const void* const*) qkv_input_, BType_, k, &beta, (void* const*)qkv_buf_, CType_, n, 3, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[4]))); } else { key_buf_ = key_cache_ + (step - 1) * m * n; value_buf_ = value_cache_ + (step - 1) * m * n; check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.self_attention.query_weight.kernel , AType_, n, from_tensor, BType_, k, &beta, query_buf_, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0]))); check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.self_attention.key_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, key_buf_, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0]))); check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.self_attention.value_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, value_buf_, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0]))); } masked_attention_dispatch<DataType_>( key_buf_, value_buf_, query_buf_, param_.self_attention.query_weight.bias, key_cache_, param_.self_attention.key_weight.bias, value_cache_, param_.self_attention.value_weight.bias, context_buf_, batch_size_, head_num_, size_per_head_, step, param_.stream); check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.self_attention.attention_output_weight.kernel, AType_, n, context_buf_, BType_, k, &beta, decoder_output, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0]))); } template <typename T, int size_per_head, int block_sz> __global__ void cross_attention_kernel_opt( T* __restrict query_buf, const T* __restrict Q_bias, T* __restrict key_cache, const T* __restrict K_bias, T* __restrict value_cache, const T* __restrict V_bias, const int* length_per_sample, T* __restrict context_buf, int batch_size, int head_num, const int step, const int seq_len, const float scalar) { typedef Copy_t<T, size_per_head> copy_t; const int elems_per_thread = size_per_head / WARP_SIZE; union Access_t { copy_t v; T x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Float_n_t { float x[elems_per_thread]; // supported size 1,2,4 } float_n_t; __shared__ float_n_t sq[block_sz]; __shared__ float logits[1024]; const int warp_id = threadIdx.x / WARP_SIZE; const int warp_num = block_sz / WARP_SIZE; typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef hipcub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num]; const int tid = threadIdx.x; const int bid = blockIdx.x / head_num; const int head_id = blockIdx.x % head_num; int length = __ldg(&length_per_sample[bid]); const int lane_id = tid % WARP_SIZE; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head; int qkv_bias_id = head_id * size_per_head; int key_value_id = bid * (seq_len * head_num * size_per_head) + + head_id * size_per_head; query_buf = &query_buf[qkv_id]; K_bias = &K_bias[qkv_bias_id]; key_cache = &key_cache[key_value_id]; Q_bias = &Q_bias[qkv_bias_id]; V_bias = &V_bias[qkv_bias_id]; value_cache = &value_cache[key_value_id]; context_buf = &context_buf[qkv_id]; Access_t bias_r, key_val_r, query_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t *)query_buf + lane_id); bias_r.v = *((copy_t *)Q_bias + lane_id); float qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i]; } //offset for each step int offset = head_num * size_per_head; bias_r.v = *((copy_t *) K_bias + lane_id); for(int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id); //For the first step, we should add bias to key memory cache. //The KV memory cache only need to be updated at the first step. if (step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { val = val + (float)key_val_r.x[i] * qb_r[i] * scalar; } float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for(int i = tid; i < length; i += blockDim.x) local_i = max(local_i, logits[i]); float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max()); if(tid == 0) s_max_val = max_val; __syncthreads(); float local_o = 0.0f; for(int i = tid; i < length; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for(int i = tid; i < length; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation float sum_r[elems_per_thread] = {0.f}; bias_r.v = *((copy_t *) V_bias + lane_id); for(int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id); //For the first step, we should add bias to key memory cache. if(step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] += (float)key_val_r.x[i] * logits[ite]; } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (threadIdx.x < WARP_SIZE) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = sum_r[i]; } if (threadIdx.x < WARP_SIZE) { *((copy_t *)context_buf + lane_id) = key_val_r.v; } } template<typename T> __global__ void cross_attention_kernel( T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length_per_sample, T* context_buf, int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar) { int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; extern __shared__ __align__(sizeof(T)) unsigned s_buf[]; T* sq = reinterpret_cast<T *>(s_buf); T* logits = reinterpret_cast<T *>(&sq[size_per_head]); int length = __ldg(&length_per_sample[bid]); int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if(tid < size_per_head) sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id]; __syncthreads(); for(int ite = 0; ite < length; ++ite) { int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head) + head_id * size_per_head + tid; T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f); //For the first step, we should add bias to key memory cache. //The KV memory cache only need to be updated at the first step. if(step == 1 && tid < size_per_head) { key += K_bias[head_id * size_per_head + tid]; key_cache[key_id] = key; } T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f); T qk = blockReduceSum(val); if(threadIdx.x == 0) logits[ite] = qk; __syncthreads(); //try to remove } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = tid < length ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax<float>(local_i); if(tid == 0) s_max_val = max_val; __syncthreads(); local_i -= s_max_val; float local_o = tid < length ? __expf(local_i) : 0.0f; float val = blockReduceSum<float>(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); if(tid < length) logits[tid] = local_o / s_sum; __syncthreads(); if(tid < size_per_head) { T sum = (T)0.0f; for(int ite = 0; ite < length; ++ite) { int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head + head_id * size_per_head + tid; T value = value_cache[value_id]; //for the first step, we should add bias to key memory cache if(step == 1) { value += V_bias[head_id * size_per_head + tid]; value_cache[value_id] = value; } sum += value * logits[ite]; } context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum; } } template <typename T> void cross_attention_dispatch(T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length, T* context_buf, int batch_size, int head_num, int size_per_head, int step, int seq_len, hipStream_t stream) { const int block_sz = ATTENTION_BLOCK_SIZE; float scalar = 1.f / sqrtf(size_per_head * 1.0f); dim3 grid(batch_size * head_num); int cond = size_per_head * ((ATTENION_OPT)? 1:0); switch (cond) { case 32: hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 32, block_sz>), dim3(grid), dim3(block_sz), 0, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, step, seq_len, scalar); break; case 64: hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 64, block_sz>), dim3(grid), dim3(block_sz), 0, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, step, seq_len, scalar); break; case 128: hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 128, block_sz>), dim3(grid), dim3(block_sz), 0, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, step, seq_len, scalar); break; default: // default path int block_size = 128; if(seq_len <= 64) block_size = 64; else if(seq_len <= 128 && seq_len > size_per_head) block_size = 128; else if(seq_len > 128 && seq_len <= 256) block_size = 256; else if(seq_len > 256 && seq_len <= 512) block_size = 512; else block_size = 1024; if(block_size < size_per_head) block_size = size_per_head; assert(block_size <= 1024); dim3 block(block_size); int shared_size = sizeof(T) * (size_per_head + seq_len); hipLaunchKernelGGL(( cross_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream, query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, size_per_head, step, seq_len, scalar); } } /* attention with source sentence */ template<OperationType OpType_> void OpenDecoder<OpType_>::cross_multi_head_attention( const DataType_* from_tensor, const DataType_* memory_tensor, DataType_* key_mem_cache, DataType_* value_mem_cache, DataType_* decoder_output, const int* length, const int seq_len, const int step) { int m = batch_size_; int n = hidden_units_; int k = hidden_units_; DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f; //reuse the query_buf check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.query_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, query_buf_, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0]))); if(step == 1) { m *= seq_len; k = memory_hidden_units_; check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.key_weight.kernel, AType_, n, memory_tensor, BType_, k, &beta, key_mem_cache, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[1]))); check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.value_weight.kernel, AType_, n, memory_tensor, BType_, k, &beta, value_mem_cache, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[1]))); k = hidden_units_; } cross_attention_dispatch<DataType_>( query_buf_, param_.cross_attention.query_weight.bias, key_mem_cache, param_.cross_attention.key_weight.bias, value_mem_cache, param_.cross_attention.value_weight.bias, length, context_buf_, batch_size_, head_num_, size_per_head_, step, seq_len, param_.stream); m = batch_size_; n = head_num_ * size_per_head_; k = n; check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.attention_output_weight.kernel, AType_, n, context_buf_, BType_, k, &beta, decoder_output, CType_, n, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0]))); } template <typename T> __global__ void decoder_norm1_kernel(const T* __restrict input, const T* __restrict gamma, const T* __restrict beta, T* output, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = tid < n ? (float)(__ldg(&input[blockIdx.x * n + tid])) : 0.0f; mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f); if(threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < n) output[blockIdx.x * n + tid] = (T)(((local_out - s_mean) * s_variance) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid]))); } template <> __global__ void decoder_norm1_kernel(const half* __restrict input, const half* __restrict gamma, const half* __restrict beta, half* output, int m, int n) { const int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2; const half2* input_ptr = (const half2*)input; const half2* gamma_ptr = (const half2*)gamma; const half2* beta_ptr = (const half2*)beta; half2* output_ptr = (half2*)output; float local_out = 0.0f; int id = blockIdx.x * blockDim.x + tid; if(tid < blockDim.x) { local_out_fp2 = __half22float2(__ldg(&input_ptr[id])); local_out += local_out_fp2.x; local_out += local_out_fp2.y; } mean = blockReduceSum<float>(local_out); if(tid == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < blockDim.x ? (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean) : 0.0f); if(tid == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < blockDim.x) { float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid])); local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y; output_ptr[id] = __float22half2_rn(local_out_fp2); } } template <typename T> __global__ void decoder_norm2_kernel(const T* __restrict input, const T* __restrict gamma, const T* __restrict beta, const T* __restrict bias, T* output, T* norm_output, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; if(tid < n) { local_out = (float)(__ldg(&input[blockIdx.x * n + tid])); local_out += (float)(output[blockIdx.x * n + tid]); local_out += (float)(__ldg(&bias[tid])); output[blockIdx.x * n + tid] = (T)local_out; } mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f); if(threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < n) norm_output[blockIdx.x * n + tid] = (T)((local_out - s_mean) * s_variance * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid]))); } template <> __global__ void decoder_norm2_kernel(const half* __restrict input, const half* __restrict gamma, const half* __restrict beta, const half* __restrict bias, half* output, half* norm_output, int m, int n) { const int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2; const half2* input_ptr = (const half2*)input; const half2* gamma_ptr = (const half2*)gamma; const half2* beta_ptr = (const half2*)beta; const half2* bias_ptr = (const half2*)bias; half2* output_ptr = (half2*)output; half2* norm_output_ptr = (half2*)norm_output; float local_out = 0.0f; int id = blockIdx.x * blockDim.x + tid; if(tid < blockDim.x) { output_ptr[id] = __hadd2(__hadd2(output_ptr[id], __ldg(&input_ptr[id])), __ldg(&bias_ptr[tid])); local_out_fp2 = __half22float2(output_ptr[id]); local_out += local_out_fp2.x; local_out += local_out_fp2.y; } mean = blockReduceSum<float>(local_out); if(tid == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < blockDim.x ? (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean) : 0.0f); if(tid == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < blockDim.x) { float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid])); local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y; norm_output_ptr[id] = __float22half2_rn(local_out_fp2); } } template<OperationType OpType_> void OpenDecoder<OpType_>::decoder_norm1( const DataType_* input, const DataType_* gamma, const DataType_* beta, DataType_* output, int m, int n) { dim3 grid(m); dim3 block(min(n, 1024)); /* For general cases, n is equal to hidden_units, e.g., 512/1024. Since we have warp shuffle inside the code, block.x % 32 should be 0. */ if(n % 32 != 0) block.x = 1024; block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x assert(block.x <= 1024); /* should pay attention to the rsqrt precision*/ hipLaunchKernelGGL(( decoder_norm1_kernel<DataType_>), dim3(grid), dim3(block), 0, param_.stream, input, gamma, beta, output, m, n); } template<OperationType OpType_> void OpenDecoder<OpType_>::decoder_norm2( const DataType_* input, const DataType_* gamma, const DataType_* beta, const DataType_* bias, DataType_* output, DataType_* norm_output, int m, int n) { dim3 grid(m); dim3 block(min(n, 1024)); /* For general cases, n is equal to hidden_units, e.g., 512/1024. Since we have warp shuffle inside the code, block.x % 32 should be 0. */ if(n % 32 != 0) block.x = 1024; block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x assert(block.x <= 1024); /* should pay attention to the rsqrt precision*/ hipLaunchKernelGGL(( decoder_norm2_kernel<DataType_>), dim3(grid), dim3(block), 0, param_.stream, input, gamma, beta, bias, output, norm_output, m, n); } template<OperationType OpType_> void OpenDecoder<OpType_>::ffn( const DataType_* input, DataType_* ffn_inner, DataType_* output, const int m, const int inner_size, const int n) { int m1 = m, k1 = n, n1 = inner_size; DataType_ alpha = (DataType_)1.0f; DataType_ beta = (DataType_)0.0f; check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n1, m1, k1, &alpha, param_.ffn.intermediate_weight.kernel, AType_, n1, input, BType_, k1, &beta, ffn_inner, CType_, n1, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[2]))); dim3 grid(m1); dim3 block(n1 / 4); assert(block.x <= 1024); hipLaunchKernelGGL(( add_bias_relu<DataType_>), dim3(grid), dim3(block), 0, param_.stream, ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1); int m2 = m, n2 = n, k2 = inner_size; check_cuda_error(hipblasGemmEx(param_.cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n2, m2, k2, &alpha, param_.ffn.output_weight.kernel, AType_, n2, ffn_inner, BType_, k2, &beta, output, CType_, n2, computeType_, static_cast<hipblasGemmAlgo_t>(cublasAlgo_[3]))); } template <typename T> __global__ void add_bias_input_kernel(T* output, const T* input, const T* bias, const int m, const int n) { int id = blockIdx.x * n + threadIdx.x; output[id] = output[id] + input[id] + __ldg(&bias[threadIdx.x]); } template<OperationType OpType_> void OpenDecoder<OpType_>::add_bias_input(DataType_* output, const DataType_* input, const int m, const int n) { dim3 grid(m); dim3 block(n); assert(n <= 1024); hipLaunchKernelGGL(( add_bias_input_kernel), dim3(grid), dim3(block), 0, param_.stream, output, input, param_.ffn.output_weight.bias, m, n); } template void OpenDecoder<OperationType::FP32>::masked_multi_head_attention( const float* from_tensor, float* key_cache, float* value_cache, float* decoder_output, const int step); template void OpenDecoder<OperationType::FP16>::masked_multi_head_attention( const half* from_tensor, half* key_cache, half* value_cache, half* decoder_output, const int step); template void OpenDecoder<OperationType::FP32>::cross_multi_head_attention( const float* from_tensor, const float* memory_tensor, float* key_mem_cache, float* value_mem_cache, float* decoder_output, const int* length, const int max_seq_len, const int step); template void OpenDecoder<OperationType::FP16>::cross_multi_head_attention( const half* from_tensor, const half* memory_tensor, half* key_mem_cache, half* value_mem_cache, half* decoder_output, const int* length, const int max_seq_len, const int step); template void OpenDecoder<OperationType::FP32>::ffn( const float* input, float* ffn_inner, float* otuput, const int m, const int inner_size, const int n); template void OpenDecoder<OperationType::FP16>::ffn( const half* input, half* ffn_inner, half* otuput, const int m, const int inner_size, const int n); template void OpenDecoder<OperationType::FP32>::decoder_norm1( const float* input, const float* gamma, const float* beta, float* output, int m, int n); template void OpenDecoder<OperationType::FP16>::decoder_norm1( const half* input, const half* gamma, const half* beta, half* output, int m, int n); template void OpenDecoder<OperationType::FP32>::decoder_norm2( const float* input, const float* gamma, const float* beta, const float* bias, float* output, float* norm_output, int m, int n); template void OpenDecoder<OperationType::FP16>::decoder_norm2( const half* input, const half* gamma, const half* beta, const half* bias, half* output, half* norm_output, int m, int n); template void OpenDecoder<OperationType::FP32>::add_bias_input( float* output, const float* input, const int m, const int n); template void OpenDecoder<OperationType::FP16>::add_bias_input( half* output, const half* input, const int m, const int n); }//namespace FasterTransformer
db085cafef379d80316565c53b4695e00ce7b550.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Open sourced multi-head attention **/ #include "fastertransformer/open_decoder.h" #include "cub/cub.cuh" namespace fastertransformer{ const int WARP_SIZE = 32; const bool ATTENION_OPT = true; const int ATTENTION_BLOCK_SIZE = 256; /////////////////////////////////////////////////////////////////////////////////////////////////// template <int HALF_ELEMENTS_PER_WARP_LOAD> using Copy_half_t = typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int, typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4 >::type >::type >::type; template <typename T, int ELEMENTS_PER_WARP_LOAD> using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>; /////////////////////////////////////////////////////////////////////////////////////////////////// /** masked multi-head attention */ #define FINAL_MASK 0xffffffff template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } /* Calculate the sum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; // __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f); val = warpReduceSum<T>(val); return val; } template <typename T> __global__ void add_bias_relu(T* out, const T* bias, int m, int n) { T val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x; int tid = threadIdx.x; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m) { val = out[tid + i * blockDim.x + row_id * n] + reg_bias; out[tid + i * blockDim.x + row_id * n] = (T)(val > 0.0f ? val : 0.0f); row_id += gridDim.x; } } } template <> __global__ void add_bias_relu(half* out, const half* bias, int m, int n) { half2 val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x / 2; int tid = threadIdx.x; half2* out_ptr = (half2*) out; const half2* bias_ptr = (half2*) bias; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m) { val = out_ptr[tid + i * blockDim.x + row_id * n / 2]; val = __hadd2(val, reg_bias); val.x = val.x > (half)0.0f ? val.x : (half)0.0f; val.y = val.y > (half)0.0f ? val.y : (half)0.0f; out_ptr[tid + i * blockDim.x + row_id * n / 2] = val; row_id += gridDim.x; } } } template <typename T> __inline__ __device__ T warpReduceMax(T val) { for(int mask = 16; mask > 0; mask >>= 1) val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32)); return val; } /* Calculate the maximum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceMax(T val) { static __shared__ T shared[32]; // __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; // in-warp idx int wid = threadIdx.x >> 5; // warp idx val = warpReduceMax(val); // get maxx in each warp if(lane == 0) // record in-warp maxx by warp Idx shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f; val = warpReduceMax(val); return val; } template <int size_per_head, int block_sz, typename T> __global__ void masked_attention_kernel_opt( T* __restrict key_buf, T* __restrict value_buf, T* __restrict query_buf, const T* __restrict self_Q_bias, T* __restrict key_cache, const T* __restrict self_K_bias, T* __restrict value_cache, const T* __restrict self_V_bias, T* __restrict context_buf, int batch_size, int head_num, const int step, const T scalar) { typedef Copy_t<T, size_per_head> copy_t; const int elems_per_thread = size_per_head / WARP_SIZE; union Access_t { copy_t v; T x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Float_n_t { T x[elems_per_thread]; // supported size 1,2,4 } float_n_t; __shared__ float_n_t sq[block_sz]; __shared__ float logits[1024]; // only use [0 ~ step-1], the step should be smaller than 1024 const int tid = threadIdx.x; const int warp_num = block_sz / WARP_SIZE; const int bid = blockIdx.x; const int head_id = blockIdx.x % head_num; const int warp_id = tid / WARP_SIZE; // warp_id in block const int lane_id = tid % WARP_SIZE; // lane_id in warp typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef cub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num]; int qkv_id = bid * size_per_head; int qkv_bias_id = head_id * size_per_head; query_buf = &query_buf[qkv_id]; key_buf = &key_buf[qkv_id]; value_buf = &value_buf[qkv_id]; self_K_bias = &self_K_bias[qkv_bias_id]; key_cache = &key_cache[qkv_id]; self_Q_bias = &self_Q_bias[qkv_bias_id]; self_V_bias = &self_V_bias[qkv_bias_id]; value_cache = &value_cache[qkv_id]; context_buf = &context_buf[qkv_id]; Access_t bias_r, query_buf_r; Access_t key_val_r, key_buf_r; Access_t value_val_r, value_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t *)query_buf + lane_id); key_buf_r.v = *((copy_t *)key_buf + lane_id); bias_r.v = *((copy_t *)self_Q_bias + lane_id); float qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i]; } //offset for each step int offset = batch_size * head_num * size_per_head; bias_r.v = *((copy_t *) self_K_bias + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar; } float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for(int i = tid; i < step; i += blockDim.x) local_i = max(local_i, logits[i]); float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max()); if(tid == 0) s_max_val = max_val; __syncthreads(); float local_o = 0.0f; for(int i = tid; i < step; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for(int i = tid; i < step; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation float sum_r[elems_per_thread] = {0.f}; bias_r.v = *((copy_t *) self_V_bias + lane_id); value_buf_r.v = *((copy_t *)value_buf + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] += (float)value_val_r.x[i] * logits[ite]; } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (warp_id == 0) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = sum_r[i]; } if (warp_id == 0) { *((copy_t *)context_buf + lane_id) = value_val_r.v; } } // only use for compile template <int size_per_head, int block_sz> __global__ void masked_attention_kernel_opt_half2( float* __restrict key_buf, float* __restrict value_buf, float* __restrict query_buf, const float* __restrict self_Q_bias, float* __restrict key_cache, const float* __restrict self_K_bias, float* __restrict value_cache, const float* __restrict self_V_bias, float* __restrict context_buf, int batch_size, int head_num, const int step, const float scalar) {} template <int size_per_head, int block_sz> __global__ void masked_attention_kernel_opt_half2( half* __restrict key_buf, half* __restrict value_buf, half* __restrict query_buf, const half* __restrict self_Q_bias, half* __restrict key_cache, const half* __restrict self_K_bias, half* __restrict value_cache, const half* __restrict self_V_bias, half* __restrict context_buf, int batch_size, int head_num, const int step, const half scalar) { half2* key_buf_ptr = (half2*)key_buf; half2* value_buf_ptr = (half2*)value_buf; half2* query_buf_ptr = (half2*)query_buf; half2* key_cache_ptr = (half2*)key_cache; half2* value_cache_ptr = (half2*)value_cache; const half2* self_Q_bias_ptr = (const half2*)self_Q_bias; const half2* self_K_bias_ptr = (const half2*)self_K_bias; const half2* self_V_bias_ptr = (const half2*)self_V_bias; half2* context_buf_ptr = (half2*)context_buf; typedef Copy_t<half2, size_per_head/2> copy_t; const int elems_per_thread = size_per_head / 2 / WARP_SIZE; union Access_t { copy_t v; half2 x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Half_n_t { half2 x[elems_per_thread]; // supported size 1,2,4 } half_n_t; __shared__ half_n_t sq[block_sz]; __shared__ float logits[1024]; // only use [0 ~ step-1] const int tid = threadIdx.x; const int warp_num = block_sz / WARP_SIZE; const int bid = blockIdx.x; const int head_id = blockIdx.x % head_num; const int warp_id = tid / WARP_SIZE; // warp_id in block const int lane_id = tid % WARP_SIZE; // lane_id in warp typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef cub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num]; int qkv_id = bid * size_per_head / 2; int qkv_bias_id = head_id * size_per_head / 2; query_buf_ptr = &query_buf_ptr[qkv_id]; key_buf_ptr = &key_buf_ptr[qkv_id]; value_buf_ptr = &value_buf_ptr[qkv_id]; self_K_bias_ptr = &self_K_bias_ptr[qkv_bias_id]; key_cache_ptr = &key_cache_ptr[qkv_id]; self_Q_bias_ptr = &self_Q_bias_ptr[qkv_bias_id]; self_V_bias_ptr = &self_V_bias_ptr[qkv_bias_id]; value_cache_ptr = &value_cache_ptr[qkv_id]; context_buf_ptr = &context_buf_ptr[qkv_id]; Access_t bias_r, query_buf_r; Access_t key_val_r, key_buf_r; Access_t value_val_r, value_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t *)query_buf_ptr + lane_id); key_buf_r.v = *((copy_t *)key_buf_ptr + lane_id); bias_r.v = *((copy_t *)self_Q_bias_ptr + lane_id); half2 qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = __hadd2(query_buf_r.x[i], bias_r.x[i]); } //offset for each step int offset = batch_size * head_num * size_per_head / 2; bias_r.v = *((copy_t *) self_K_bias + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { key_val_r.v = *((copy_t *)&key_cache_ptr[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = __hadd2(key_buf_r.x[i], bias_r.x[i]); } *((copy_t *)&key_cache_ptr[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { half2 val2 = __hmul2(key_val_r.x[i], qb_r[i]); val = val + (float)((val2.x + val2.y) * scalar); } float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for(int i = tid; i < step; i += blockDim.x) local_i = max(local_i, logits[i]); float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max()); if(tid == 0) s_max_val = max_val; __syncthreads(); float local_o = 0.0f; for(int i = tid; i < step; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for(int i = tid; i < step; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation half2 sum_r[elems_per_thread]; for(int i = 0; i < elems_per_thread; i++) { sum_r[i].x = (half)0.f; sum_r[i].y = (half)0.f; } bias_r.v = *((copy_t *) self_V_bias_ptr + lane_id); value_buf_r.v = *((copy_t *)value_buf_ptr + lane_id); for(int ite = warp_id; ite < step; ite += warp_num) { value_val_r.v = *((copy_t *)&value_cache_ptr[ite * offset] + lane_id); //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = __hadd2(value_buf_r.x[i], bias_r.x[i]); } *((copy_t *)&value_cache_ptr[ite * offset] + lane_id) = value_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { half2 logit2_val; logit2_val.x = (half)logits[ite]; logit2_val.y = (half)logits[ite]; sum_r[i] = __hadd2(sum_r[i], __hmul2(value_val_r.x[i], logit2_val)); } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (warp_id == 0) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = __hadd2(sum_r[i], sq[j * WARP_SIZE + tid].x[i]); } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { value_val_r.x[i] = sum_r[i]; } if (warp_id == 0) { *((copy_t *)context_buf_ptr + lane_id) = value_val_r.v; } } template <typename T> __global__ void masked_attention_kernel( T* key_buf, T* value_buf, T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar) { extern __shared__ __align__(sizeof(T)) unsigned s_buf[]; T* sq = reinterpret_cast<T *>(s_buf); T* logits = reinterpret_cast<T *>(&sq[size_per_head]); int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if(tid < size_per_head) sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id]; __syncthreads(); //offset for each step int offset = batch_size * head_num * size_per_head; for(int ite = 0; ite < step; ++ite) { T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f; //for the last step, we should update K + bias_K to the cache if(ite == step - 1 && tid < size_per_head) { key = key_buf[qkv_id] + self_K_bias[qkv_bias_id]; key_cache[ite * offset + qkv_id] = key; } T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f); T qk = blockReduceSum(val); if(threadIdx.x == 0) logits[ite] = qk; __syncthreads(); //try to remove } __syncthreads(); //try to remove __shared__ float s_max_val, s_sum; float local_i = tid < step ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax<float>(local_i); if(tid == 0) s_max_val = max_val; __syncthreads(); local_i -= s_max_val; float local_o = tid < step ? __expf(local_i) : 0.0f; float val = blockReduceSum<float>(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); if(tid < step) logits[tid] = local_o / s_sum; __syncthreads(); if(tid < size_per_head) { T sum = (T)0.0f; for(int ite = 0; ite < step; ++ite) { T value = value_cache[ite * offset + qkv_id]; //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { value = value_buf[qkv_id] + self_V_bias[qkv_bias_id]; value_cache[ite * offset + qkv_id] = value; } sum += value * logits[ite]; } context_buf[qkv_id] = sum; } } template <typename T> __global__ void masked_attention_kernel_v2(T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar) { extern __shared__ __align__(sizeof(T)) unsigned s_buf[]; T* sq = reinterpret_cast<T *>(s_buf); T* logits = reinterpret_cast<T *>(&sq[size_per_head]); int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if(tid < size_per_head) sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id]; __syncthreads(); int warp_size = 32; int offset = batch_size * head_num * size_per_head; int warp_ite = size_per_head / warp_size; T qk = (T)0.0f; //each warp process one step int step_id = threadIdx.x >> 5; if(step_id < step) { for(int wite = 0; wite < warp_ite; ++wite) { T key = key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head + tid % warp_size + wite * warp_size]; //for the last step, we should update K + bias_K to the cache if(step_id == step - 1) { key += self_K_bias[bid * head_num * size_per_head + head_id * size_per_head + tid % warp_size + wite * warp_size]; key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head + tid % warp_size + wite * warp_size] = key; } qk += key * sq[tid % warp_size + wite * warp_size]; } qk = warpReduceSum(qk * scalar); if(threadIdx.x % warp_size == 0) { logits[step_id] = qk; printf("step_id %d %f\n", step_id, qk); } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = tid < step ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax<float>(local_i); if(tid == 0) s_max_val = max_val; __syncthreads(); local_i -= s_max_val; float local_o = tid < step ? __expf(local_i) : 0.0f; float val = blockReduceSum<float>(local_o); if(tid == 0) s_sum = val; __syncthreads(); if(tid < step) logits[tid] = local_o / s_sum; __syncthreads(); if(tid < size_per_head) { T sum = (T)0.0f; for(int ite = 0; ite < step; ++ite) { T value = value_cache[ite * offset + qkv_id]; //for the last step, we should update K + bias_K to the cache if(ite == step - 1) { value += self_V_bias[qkv_bias_id]; value_cache[ite * offset + qkv_id] = value; } sum += value * logits[ite]; } context_buf[qkv_id] = sum; } } template <typename T> void masked_attention_dispatch( T* key_buf, T* value_buf, T* query_buf, const T* self_Q_bias, T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias, T* context_buf, int batch_size, int head_num, int size_per_head, const int step, cudaStream_t stream) { const int block_sz = ATTENTION_BLOCK_SIZE; T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f)); dim3 grid(batch_size * head_num); int cond = size_per_head * ((ATTENION_OPT)? 1:0); switch (cond) { case 32: masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz, 0, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; case 64: if(sizeof(T) == 2) masked_attention_kernel_opt_half2<64, block_sz><<<grid, block_sz, 0, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); else masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz, 0, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; case 128: if(sizeof(T) == 2) masked_attention_kernel_opt_half2<128, block_sz><<<grid, block_sz, 0, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); else masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz, 0, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, step, scalar); break; default: // default path int block_size = 128; //suppose size_per_head <= 128 if(step <= 64) block_size = 64; else if(step <= 128 && step > size_per_head) block_size = 128; else if(step > 128 && step <= 256) block_size = 256; else if(step > 256 && step <= 512) block_size = 512; else block_size = 1024; if((int)block_size < size_per_head) block_size = size_per_head; assert(block_size <= 1024); dim3 block(block_size); T scalar = 1 / sqrtf(size_per_head * 1.0f); int shared_size = sizeof(T) * (size_per_head + step); masked_attention_kernel<T><<<grid, block, shared_size, stream>>>( key_buf, value_buf, query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf, batch_size, head_num, size_per_head, step, scalar); } } template<OperationType OpType_> void OpenDecoder<OpType_>::masked_multi_head_attention( const DataType_* from_tensor, DataType_* key_cache_, DataType_* value_cache_, DataType_* decoder_output, const int step) { int m = batch_size_; int n = hidden_units_; int k = hidden_units_; DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f; if(is_fuse_QKV == true) { check_cuda_error(cublasGemmBatchedEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, (const void* const*) qkv_kernel_, AType_, n, (const void* const*) qkv_input_, BType_, k, &beta, (void* const*)qkv_buf_, CType_, n, 3, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[4]))); } else { key_buf_ = key_cache_ + (step - 1) * m * n; value_buf_ = value_cache_ + (step - 1) * m * n; check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.query_weight.kernel , AType_, n, from_tensor, BType_, k, &beta, query_buf_, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.key_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, key_buf_, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.value_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, value_buf_, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); } masked_attention_dispatch<DataType_>( key_buf_, value_buf_, query_buf_, param_.self_attention.query_weight.bias, key_cache_, param_.self_attention.key_weight.bias, value_cache_, param_.self_attention.value_weight.bias, context_buf_, batch_size_, head_num_, size_per_head_, step, param_.stream); check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.self_attention.attention_output_weight.kernel, AType_, n, context_buf_, BType_, k, &beta, decoder_output, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); } template <typename T, int size_per_head, int block_sz> __global__ void cross_attention_kernel_opt( T* __restrict query_buf, const T* __restrict Q_bias, T* __restrict key_cache, const T* __restrict K_bias, T* __restrict value_cache, const T* __restrict V_bias, const int* length_per_sample, T* __restrict context_buf, int batch_size, int head_num, const int step, const int seq_len, const float scalar) { typedef Copy_t<T, size_per_head> copy_t; const int elems_per_thread = size_per_head / WARP_SIZE; union Access_t { copy_t v; T x[elems_per_thread]; // supported size 1,2,4 }; typedef struct Float_n_t { float x[elems_per_thread]; // supported size 1,2,4 } float_n_t; __shared__ float_n_t sq[block_sz]; __shared__ float logits[1024]; const int warp_id = threadIdx.x / WARP_SIZE; const int warp_num = block_sz / WARP_SIZE; typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce; typedef cub::BlockReduce<float, block_sz> BlockReduce; __shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage; __shared__ typename BlockReduce::TempStorage block_temp_storage; __shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num]; const int tid = threadIdx.x; const int bid = blockIdx.x / head_num; const int head_id = blockIdx.x % head_num; int length = __ldg(&length_per_sample[bid]); const int lane_id = tid % WARP_SIZE; int qkv_id = bid * head_num * size_per_head + head_id * size_per_head; int qkv_bias_id = head_id * size_per_head; int key_value_id = bid * (seq_len * head_num * size_per_head) + + head_id * size_per_head; query_buf = &query_buf[qkv_id]; K_bias = &K_bias[qkv_bias_id]; key_cache = &key_cache[key_value_id]; Q_bias = &Q_bias[qkv_bias_id]; V_bias = &V_bias[qkv_bias_id]; value_cache = &value_cache[key_value_id]; context_buf = &context_buf[qkv_id]; Access_t bias_r, key_val_r, query_buf_r; // each warp will have its own copy of sq query_buf_r.v = *((copy_t *)query_buf + lane_id); bias_r.v = *((copy_t *)Q_bias + lane_id); float qb_r[elems_per_thread]; for (int i = 0; i < elems_per_thread; ++i) { qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i]; } //offset for each step int offset = head_num * size_per_head; bias_r.v = *((copy_t *) K_bias + lane_id); for(int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id); //For the first step, we should add bias to key memory cache. //The KV memory cache only need to be updated at the first step. if (step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v; } float val = 0.f; for (int i = 0; i < elems_per_thread; i++) { val = val + (float)key_val_r.x[i] * qb_r[i] * scalar; } float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val); if (lane_id == 0) { logits[ite] = qk; } } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = -1e20f; for(int i = tid; i < length; i += blockDim.x) local_i = max(local_i, logits[i]); float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max()); if(tid == 0) s_max_val = max_val; __syncthreads(); float local_o = 0.0f; for(int i = tid; i < length; i += blockDim.x) { logits[i] = __expf(logits[i] - s_max_val); local_o += logits[i]; } float val = BlockReduce(block_temp_storage).Sum(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); float s_sum_inverse = __fdividef(1.0f, s_sum); for(int i = tid; i < length; i += blockDim.x) { logits[i] = logits[i] * s_sum_inverse; } __syncthreads(); // This optimization introduces discrepancy because of different order in FP32 summation float sum_r[elems_per_thread] = {0.f}; bias_r.v = *((copy_t *) V_bias + lane_id); for(int ite = warp_id; ite < length; ite += warp_num) { key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id); //For the first step, we should add bias to key memory cache. if(step == 1) { for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i]; } *((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v; } for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] += (float)key_val_r.x[i] * logits[ite]; } } for (int i = 0; i < elems_per_thread; i++) { sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i]; } __syncthreads(); if (threadIdx.x < WARP_SIZE) { #pragma unroll for (int j = 1; j < warp_num; j++) { for (int i = 0; i < elems_per_thread; ++i) { sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < elems_per_thread; i++) { key_val_r.x[i] = sum_r[i]; } if (threadIdx.x < WARP_SIZE) { *((copy_t *)context_buf + lane_id) = key_val_r.v; } } template<typename T> __global__ void cross_attention_kernel( T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length_per_sample, T* context_buf, int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar) { int tid = threadIdx.x; int bid = blockIdx.x / head_num; int head_id = blockIdx.x % head_num; extern __shared__ __align__(sizeof(T)) unsigned s_buf[]; T* sq = reinterpret_cast<T *>(s_buf); T* logits = reinterpret_cast<T *>(&sq[size_per_head]); int length = __ldg(&length_per_sample[bid]); int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid; int qkv_bias_id = head_id * size_per_head + tid; if(tid < size_per_head) sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id]; __syncthreads(); for(int ite = 0; ite < length; ++ite) { int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head) + head_id * size_per_head + tid; T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f); //For the first step, we should add bias to key memory cache. //The KV memory cache only need to be updated at the first step. if(step == 1 && tid < size_per_head) { key += K_bias[head_id * size_per_head + tid]; key_cache[key_id] = key; } T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f); T qk = blockReduceSum(val); if(threadIdx.x == 0) logits[ite] = qk; __syncthreads(); //try to remove } __syncthreads(); __shared__ float s_max_val, s_sum; float local_i = tid < length ? (float)logits[tid] : -1e20f; float max_val = blockReduceMax<float>(local_i); if(tid == 0) s_max_val = max_val; __syncthreads(); local_i -= s_max_val; float local_o = tid < length ? __expf(local_i) : 0.0f; float val = blockReduceSum<float>(local_o); if(tid == 0) s_sum = val + 1e-6; __syncthreads(); if(tid < length) logits[tid] = local_o / s_sum; __syncthreads(); if(tid < size_per_head) { T sum = (T)0.0f; for(int ite = 0; ite < length; ++ite) { int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head + head_id * size_per_head + tid; T value = value_cache[value_id]; //for the first step, we should add bias to key memory cache if(step == 1) { value += V_bias[head_id * size_per_head + tid]; value_cache[value_id] = value; } sum += value * logits[ite]; } context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum; } } template <typename T> void cross_attention_dispatch(T* query_buf, const T* Q_bias, T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length, T* context_buf, int batch_size, int head_num, int size_per_head, int step, int seq_len, cudaStream_t stream) { const int block_sz = ATTENTION_BLOCK_SIZE; float scalar = 1.f / sqrtf(size_per_head * 1.0f); dim3 grid(batch_size * head_num); int cond = size_per_head * ((ATTENION_OPT)? 1:0); switch (cond) { case 32: cross_attention_kernel_opt<T, 32, block_sz><<<grid, block_sz, 0, stream>>>( query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, step, seq_len, scalar); break; case 64: cross_attention_kernel_opt<T, 64, block_sz><<<grid, block_sz, 0, stream>>>( query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, step, seq_len, scalar); break; case 128: cross_attention_kernel_opt<T, 128, block_sz><<<grid, block_sz, 0, stream>>>( query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, step, seq_len, scalar); break; default: // default path int block_size = 128; if(seq_len <= 64) block_size = 64; else if(seq_len <= 128 && seq_len > size_per_head) block_size = 128; else if(seq_len > 128 && seq_len <= 256) block_size = 256; else if(seq_len > 256 && seq_len <= 512) block_size = 512; else block_size = 1024; if(block_size < size_per_head) block_size = size_per_head; assert(block_size <= 1024); dim3 block(block_size); int shared_size = sizeof(T) * (size_per_head + seq_len); cross_attention_kernel<T><<<grid, block, shared_size, stream>>>( query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf, batch_size, head_num, size_per_head, step, seq_len, scalar); } } /* attention with source sentence */ template<OperationType OpType_> void OpenDecoder<OpType_>::cross_multi_head_attention( const DataType_* from_tensor, const DataType_* memory_tensor, DataType_* key_mem_cache, DataType_* value_mem_cache, DataType_* decoder_output, const int* length, const int seq_len, const int step) { int m = batch_size_; int n = hidden_units_; int k = hidden_units_; DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f; //reuse the query_buf check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.query_weight.kernel, AType_, n, from_tensor, BType_, k, &beta, query_buf_, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); if(step == 1) { m *= seq_len; k = memory_hidden_units_; check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.key_weight.kernel, AType_, n, memory_tensor, BType_, k, &beta, key_mem_cache, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[1]))); check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.value_weight.kernel, AType_, n, memory_tensor, BType_, k, &beta, value_mem_cache, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[1]))); k = hidden_units_; } cross_attention_dispatch<DataType_>( query_buf_, param_.cross_attention.query_weight.bias, key_mem_cache, param_.cross_attention.key_weight.bias, value_mem_cache, param_.cross_attention.value_weight.bias, length, context_buf_, batch_size_, head_num_, size_per_head_, step, seq_len, param_.stream); m = batch_size_; n = head_num_ * size_per_head_; k = n; check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, param_.cross_attention.attention_output_weight.kernel, AType_, n, context_buf_, BType_, k, &beta, decoder_output, CType_, n, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[0]))); } template <typename T> __global__ void decoder_norm1_kernel(const T* __restrict input, const T* __restrict gamma, const T* __restrict beta, T* output, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = tid < n ? (float)(__ldg(&input[blockIdx.x * n + tid])) : 0.0f; mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f); if(threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < n) output[blockIdx.x * n + tid] = (T)(((local_out - s_mean) * s_variance) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid]))); } template <> __global__ void decoder_norm1_kernel(const half* __restrict input, const half* __restrict gamma, const half* __restrict beta, half* output, int m, int n) { const int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2; const half2* input_ptr = (const half2*)input; const half2* gamma_ptr = (const half2*)gamma; const half2* beta_ptr = (const half2*)beta; half2* output_ptr = (half2*)output; float local_out = 0.0f; int id = blockIdx.x * blockDim.x + tid; if(tid < blockDim.x) { local_out_fp2 = __half22float2(__ldg(&input_ptr[id])); local_out += local_out_fp2.x; local_out += local_out_fp2.y; } mean = blockReduceSum<float>(local_out); if(tid == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < blockDim.x ? (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean) : 0.0f); if(tid == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < blockDim.x) { float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid])); local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y; output_ptr[id] = __float22half2_rn(local_out_fp2); } } template <typename T> __global__ void decoder_norm2_kernel(const T* __restrict input, const T* __restrict gamma, const T* __restrict beta, const T* __restrict bias, T* output, T* norm_output, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; if(tid < n) { local_out = (float)(__ldg(&input[blockIdx.x * n + tid])); local_out += (float)(output[blockIdx.x * n + tid]); local_out += (float)(__ldg(&bias[tid])); output[blockIdx.x * n + tid] = (T)local_out; } mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f); if(threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < n) norm_output[blockIdx.x * n + tid] = (T)((local_out - s_mean) * s_variance * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid]))); } template <> __global__ void decoder_norm2_kernel(const half* __restrict input, const half* __restrict gamma, const half* __restrict beta, const half* __restrict bias, half* output, half* norm_output, int m, int n) { const int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2; const half2* input_ptr = (const half2*)input; const half2* gamma_ptr = (const half2*)gamma; const half2* beta_ptr = (const half2*)beta; const half2* bias_ptr = (const half2*)bias; half2* output_ptr = (half2*)output; half2* norm_output_ptr = (half2*)norm_output; float local_out = 0.0f; int id = blockIdx.x * blockDim.x + tid; if(tid < blockDim.x) { output_ptr[id] = __hadd2(__hadd2(output_ptr[id], __ldg(&input_ptr[id])), __ldg(&bias_ptr[tid])); local_out_fp2 = __half22float2(output_ptr[id]); local_out += local_out_fp2.x; local_out += local_out_fp2.y; } mean = blockReduceSum<float>(local_out); if(tid == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>(tid < blockDim.x ? (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean) : 0.0f); if(tid == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); if(tid < blockDim.x) { float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid])); local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y; norm_output_ptr[id] = __float22half2_rn(local_out_fp2); } } template<OperationType OpType_> void OpenDecoder<OpType_>::decoder_norm1( const DataType_* input, const DataType_* gamma, const DataType_* beta, DataType_* output, int m, int n) { dim3 grid(m); dim3 block(min(n, 1024)); /* For general cases, n is equal to hidden_units, e.g., 512/1024. Since we have warp shuffle inside the code, block.x % 32 should be 0. */ if(n % 32 != 0) block.x = 1024; block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x assert(block.x <= 1024); /* should pay attention to the rsqrt precision*/ decoder_norm1_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, output, m, n); } template<OperationType OpType_> void OpenDecoder<OpType_>::decoder_norm2( const DataType_* input, const DataType_* gamma, const DataType_* beta, const DataType_* bias, DataType_* output, DataType_* norm_output, int m, int n) { dim3 grid(m); dim3 block(min(n, 1024)); /* For general cases, n is equal to hidden_units, e.g., 512/1024. Since we have warp shuffle inside the code, block.x % 32 should be 0. */ if(n % 32 != 0) block.x = 1024; block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x assert(block.x <= 1024); /* should pay attention to the rsqrt precision*/ decoder_norm2_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, bias, output, norm_output, m, n); } template<OperationType OpType_> void OpenDecoder<OpType_>::ffn( const DataType_* input, DataType_* ffn_inner, DataType_* output, const int m, const int inner_size, const int n) { int m1 = m, k1 = n, n1 = inner_size; DataType_ alpha = (DataType_)1.0f; DataType_ beta = (DataType_)0.0f; check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n1, m1, k1, &alpha, param_.ffn.intermediate_weight.kernel, AType_, n1, input, BType_, k1, &beta, ffn_inner, CType_, n1, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[2]))); dim3 grid(m1); dim3 block(n1 / 4); assert(block.x <= 1024); add_bias_relu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1); int m2 = m, n2 = n, k2 = inner_size; check_cuda_error(cublasGemmEx(param_.cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n2, m2, k2, &alpha, param_.ffn.output_weight.kernel, AType_, n2, ffn_inner, BType_, k2, &beta, output, CType_, n2, computeType_, static_cast<cublasGemmAlgo_t>(cublasAlgo_[3]))); } template <typename T> __global__ void add_bias_input_kernel(T* output, const T* input, const T* bias, const int m, const int n) { int id = blockIdx.x * n + threadIdx.x; output[id] = output[id] + input[id] + __ldg(&bias[threadIdx.x]); } template<OperationType OpType_> void OpenDecoder<OpType_>::add_bias_input(DataType_* output, const DataType_* input, const int m, const int n) { dim3 grid(m); dim3 block(n); assert(n <= 1024); add_bias_input_kernel<<<grid, block, 0, param_.stream>>>(output, input, param_.ffn.output_weight.bias, m, n); } template void OpenDecoder<OperationType::FP32>::masked_multi_head_attention( const float* from_tensor, float* key_cache, float* value_cache, float* decoder_output, const int step); template void OpenDecoder<OperationType::FP16>::masked_multi_head_attention( const half* from_tensor, half* key_cache, half* value_cache, half* decoder_output, const int step); template void OpenDecoder<OperationType::FP32>::cross_multi_head_attention( const float* from_tensor, const float* memory_tensor, float* key_mem_cache, float* value_mem_cache, float* decoder_output, const int* length, const int max_seq_len, const int step); template void OpenDecoder<OperationType::FP16>::cross_multi_head_attention( const half* from_tensor, const half* memory_tensor, half* key_mem_cache, half* value_mem_cache, half* decoder_output, const int* length, const int max_seq_len, const int step); template void OpenDecoder<OperationType::FP32>::ffn( const float* input, float* ffn_inner, float* otuput, const int m, const int inner_size, const int n); template void OpenDecoder<OperationType::FP16>::ffn( const half* input, half* ffn_inner, half* otuput, const int m, const int inner_size, const int n); template void OpenDecoder<OperationType::FP32>::decoder_norm1( const float* input, const float* gamma, const float* beta, float* output, int m, int n); template void OpenDecoder<OperationType::FP16>::decoder_norm1( const half* input, const half* gamma, const half* beta, half* output, int m, int n); template void OpenDecoder<OperationType::FP32>::decoder_norm2( const float* input, const float* gamma, const float* beta, const float* bias, float* output, float* norm_output, int m, int n); template void OpenDecoder<OperationType::FP16>::decoder_norm2( const half* input, const half* gamma, const half* beta, const half* bias, half* output, half* norm_output, int m, int n); template void OpenDecoder<OperationType::FP32>::add_bias_input( float* output, const float* input, const int m, const int n); template void OpenDecoder<OperationType::FP16>::add_bias_input( half* output, const half* input, const int m, const int n); }//namespace FasterTransformer
a1c4a66c4e351fcc53d5136708863db2179c0273.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SDSC SCC Training - GPU Computing and Programming // May 3, 2019 // Andreas Goetz ([email protected]) // CUDA program to add two vectors in parallel on the GPU // launch all kernels at once // #include<stdio.h> // define vector length and threads per block #define N (255*4096) #define TPB 512 // // CUDA device function that adds two integer vectors // __global__ void add(int *a, int *b, int *c, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) c[tid] = a[tid] + b[tid]; } // // main program // int main(void){ int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i, nblock, err; // allocate host memory h_a = (int *) malloc(size); h_b = (int *) malloc(size); h_c = (int *) malloc(size); // allocate device memory hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // initialize vectors for (i=0; i<N; i++){ h_a[i] = i+1; h_b[i] = i+1; } // copy input data to device hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice); // add vectors by launching a sufficient number of blocks of the add() kernel nblock = (N+TPB-1)/TPB; printf("\nLaunching vector addition kernel...\n"); printf("Vector length = %d\n",N); printf("Blocks = %d\n",nblock); printf("Threads per block = %d\n",TPB); printf("Kernel copies = %d\n",nblock*TPB); hipLaunchKernelGGL(( add), dim3(nblock),dim3(TPB), 0, 0, d_a, d_b, d_c, N); // copy results back to host hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost); // deallocate memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // check results err = 0; for (i=0; i<N; i++){ if (h_c[i] != 2*(i+1)) err += 1; } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } // deallocate host memory free(h_a); free(h_b); free(h_c); return err; }
a1c4a66c4e351fcc53d5136708863db2179c0273.cu
// SDSC SCC Training - GPU Computing and Programming // May 3, 2019 // Andreas Goetz ([email protected]) // CUDA program to add two vectors in parallel on the GPU // launch all kernels at once // #include<stdio.h> // define vector length and threads per block #define N (255*4096) #define TPB 512 // // CUDA device function that adds two integer vectors // __global__ void add(int *a, int *b, int *c, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < n) c[tid] = a[tid] + b[tid]; } // // main program // int main(void){ int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i, nblock, err; // allocate host memory h_a = (int *) malloc(size); h_b = (int *) malloc(size); h_c = (int *) malloc(size); // allocate device memory cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // initialize vectors for (i=0; i<N; i++){ h_a[i] = i+1; h_b[i] = i+1; } // copy input data to device cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); // add vectors by launching a sufficient number of blocks of the add() kernel nblock = (N+TPB-1)/TPB; printf("\nLaunching vector addition kernel...\n"); printf("Vector length = %d\n",N); printf("Blocks = %d\n",nblock); printf("Threads per block = %d\n",TPB); printf("Kernel copies = %d\n",nblock*TPB); add<<<nblock,TPB>>>(d_a, d_b, d_c, N); // copy results back to host cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); // deallocate memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // check results err = 0; for (i=0; i<N; i++){ if (h_c[i] != 2*(i+1)) err += 1; } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } // deallocate host memory free(h_a); free(h_b); free(h_c); return err; }
9c59c893cf6235aea455be8aeb653d9cd0546aad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { template <typename T> __global__ void pyrUp(const PtrStepSz<T> src, PtrStepSz<T> dst) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ sum_t s_srcPatch[10][10]; __shared__ sum_t s_dstPatch[20][16]; if (threadIdx.x < 10 && threadIdx.y < 10) { int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1; int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1; srcx = ::abs(srcx); srcx = ::min(src.cols - 1, srcx); srcy = ::abs(srcy); srcy = ::min(src.rows - 1, srcy); s_srcPatch[threadIdx.y][threadIdx.x] = saturate_cast<sum_t>(src(srcy, srcx)); } __syncthreads(); sum_t sum = VecTraits<sum_t>::all(0); const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0); const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0); const bool eveny = ((threadIdx.y & 1) == 0); const int tidx = threadIdx.x; if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)]; } s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum; if (threadIdx.y < 2) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[0][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx + 2) >> 1)]; } s_dstPatch[threadIdx.y][threadIdx.x] = sum; } if (threadIdx.y > 13) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[9][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx + 2) >> 1)]; } s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum; } __syncthreads(); sum = VecTraits<sum_t>::all(0); const int tidy = threadIdx.y; sum = sum + 0.0625f * s_dstPatch[2 + tidy - 2][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy - 1][threadIdx.x]; sum = sum + 0.375f * s_dstPatch[2 + tidy ][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy + 1][threadIdx.x]; sum = sum + 0.0625f * s_dstPatch[2 + tidy + 2][threadIdx.x]; if (x < dst.cols && y < dst.rows) dst(y, x) = saturate_cast<T>(4.0f * sum); } template <typename T> void pyrUp_caller(PtrStepSz<T> src, PtrStepSz<T> dst, hipStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); hipLaunchKernelGGL(( pyrUp), dim3(grid), dim3(block), 0, stream, src, dst); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <typename T> void pyrUp_gpu(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream) { pyrUp_caller<T>(static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(dst), stream); } template void pyrUp_gpu<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<uchar2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<uchar3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<uchar4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<char2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<char3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<char4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<ushort2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<ushort3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<ushort4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<short2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<short3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<short4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<float2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<float3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<float4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
9c59c893cf6235aea455be8aeb653d9cd0546aad.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/saturate_cast.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { template <typename T> __global__ void pyrUp(const PtrStepSz<T> src, PtrStepSz<T> dst) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ sum_t s_srcPatch[10][10]; __shared__ sum_t s_dstPatch[20][16]; if (threadIdx.x < 10 && threadIdx.y < 10) { int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1; int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1; srcx = ::abs(srcx); srcx = ::min(src.cols - 1, srcx); srcy = ::abs(srcy); srcy = ::min(src.rows - 1, srcy); s_srcPatch[threadIdx.y][threadIdx.x] = saturate_cast<sum_t>(src(srcy, srcx)); } __syncthreads(); sum_t sum = VecTraits<sum_t>::all(0); const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0); const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0); const bool eveny = ((threadIdx.y & 1) == 0); const int tidx = threadIdx.x; if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)]; } s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum; if (threadIdx.y < 2) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[0][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx + 2) >> 1)]; } s_dstPatch[threadIdx.y][threadIdx.x] = sum; } if (threadIdx.y > 13) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[9][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx + 2) >> 1)]; } s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum; } __syncthreads(); sum = VecTraits<sum_t>::all(0); const int tidy = threadIdx.y; sum = sum + 0.0625f * s_dstPatch[2 + tidy - 2][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy - 1][threadIdx.x]; sum = sum + 0.375f * s_dstPatch[2 + tidy ][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy + 1][threadIdx.x]; sum = sum + 0.0625f * s_dstPatch[2 + tidy + 2][threadIdx.x]; if (x < dst.cols && y < dst.rows) dst(y, x) = saturate_cast<T>(4.0f * sum); } template <typename T> void pyrUp_caller(PtrStepSz<T> src, PtrStepSz<T> dst, cudaStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); pyrUp<<<grid, block, 0, stream>>>(src, dst); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <typename T> void pyrUp_gpu(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) { pyrUp_caller<T>(static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(dst), stream); } template void pyrUp_gpu<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<uchar2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<uchar3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<uchar4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<char2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<char3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<char4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<ushort2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<ushort3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<ushort4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<short2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<short3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<short4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<float2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<float3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<float4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
b11a9c05003ecbe2a46e7b71f93c20afbcab3fee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) GeneralOCR. All rights reserved #include <hip/hip_fp16.h> #include "common_cuda_helper.hpp" #include "deform_conv_cuda_kernel.cuh" #include "trt_cuda_helper.cuh" #include "trt_plugin_helper.hpp" template <typename T> void trt_deformable_im2col(const T* data_input, const T* data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, T* data_col, hipStream_t stream) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<T>) , dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, data_input, data_offset, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col); cudaCheckError(); } template <typename scalar_t> void DeformConvForwardCUDAKernelLauncher( const scalar_t* input, const scalar_t* weight, const scalar_t* offset, scalar_t* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, hipblasHandle_t cublas_handle, hipStream_t stream) { size_t word_size = sizeof(scalar_t); im2col_step = ::min(int(batchSize), im2col_step); long outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; long long columns_size = general_ocr::getAlignedSize(nInputPlane * kW * kH * im2col_step * outputHeight * outputWidth * word_size); // column buffer for img2col scalar_t* columns = (scalar_t*)workspace; workspace = workspace + columns_size; scalar_t* output_buffer; long long output_buffer_size = 0; if (im2col_step == 1) { output_buffer = output; } else { // output need permute when im2col_step!=1 output_buffer = (scalar_t*)workspace; output_buffer_size = batchSize * nOutputPlane * outputWidth * outputHeight; } long long input_elt_step = im2col_step * nInputPlane * inputHeight * inputWidth; long long offset_elt_step = im2col_step * deformable_group * 2 * kH * kW * outputHeight * outputWidth; long long out_buffer_step = nOutputPlane * im2col_step * outputHeight * outputWidth; long long col_g_step = nInputPlane * kW * kH / group * im2col_step * outputHeight * outputWidth; long long weight_g_step = nOutputPlane / group * nInputPlane / group * kH * kW; long long out_buffer_g_step = nOutputPlane / group * im2col_step * outputHeight * outputWidth; int m = nOutputPlane / group; int n = im2col_step * outputHeight * outputWidth; int k = nInputPlane / group * kH * kW; scalar_t alpha = 1.; scalar_t beta = 0.; for (int elt = 0; elt < batchSize / im2col_step; elt++) { const scalar_t* input_start = input + elt * input_elt_step; const scalar_t* offset_start = offset + elt * offset_elt_step; trt_deformable_im2col<scalar_t>(input_start, offset_start, nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, columns, stream); for (int g = 0; g < group; ++g) { const scalar_t* weight_start = weight + g * weight_g_step; scalar_t* col_start = columns + g * col_g_step; scalar_t* out_buffer_start = output_buffer + elt * out_buffer_step + g * out_buffer_g_step; cublasGemmWrap<scalar_t>(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, col_start, n, weight_start, k, &beta, out_buffer_start, n); cudaCheckError(); } } if (im2col_step != 1) { int output_buffer_shape[5] = {batchSize / im2col_step, nOutputPlane, im2col_step, outputHeight, outputWidth}; int output_buffer_permute[5] = {0, 2, 1, 3, 4}; memcpyPermute<scalar_t>(output, output_buffer, &output_buffer_shape[0], &output_buffer_permute[0], 5, stream); } } void DeformConvForwardCUDAKernelLauncher_float( const float* input, const float* weight, const float* offset, float* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, hipblasHandle_t cublas_handle, hipStream_t stream) { DeformConvForwardCUDAKernelLauncher<float>( input, weight, offset, output, workspace, batchSize, nInputPlane, inputHeight, inputWidth, nOutputPlane, kW, kH, dW, dH, padW, padH, dilationW, dilationH, group, deformable_group, im2col_step, cublas_handle, stream); }
b11a9c05003ecbe2a46e7b71f93c20afbcab3fee.cu
// Copyright (c) GeneralOCR. All rights reserved #include <cuda_fp16.h> #include "common_cuda_helper.hpp" #include "deform_conv_cuda_kernel.cuh" #include "trt_cuda_helper.cuh" #include "trt_plugin_helper.hpp" template <typename T> void trt_deformable_im2col(const T* data_input, const T* data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, T* data_col, cudaStream_t stream) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; deformable_im2col_gpu_kernel<T> <<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, data_input, data_offset, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col); cudaCheckError(); } template <typename scalar_t> void DeformConvForwardCUDAKernelLauncher( const scalar_t* input, const scalar_t* weight, const scalar_t* offset, scalar_t* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, cublasHandle_t cublas_handle, cudaStream_t stream) { size_t word_size = sizeof(scalar_t); im2col_step = std::min(int(batchSize), im2col_step); long outputWidth = (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; long outputHeight = (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; long long columns_size = general_ocr::getAlignedSize(nInputPlane * kW * kH * im2col_step * outputHeight * outputWidth * word_size); // column buffer for img2col scalar_t* columns = (scalar_t*)workspace; workspace = workspace + columns_size; scalar_t* output_buffer; long long output_buffer_size = 0; if (im2col_step == 1) { output_buffer = output; } else { // output need permute when im2col_step!=1 output_buffer = (scalar_t*)workspace; output_buffer_size = batchSize * nOutputPlane * outputWidth * outputHeight; } long long input_elt_step = im2col_step * nInputPlane * inputHeight * inputWidth; long long offset_elt_step = im2col_step * deformable_group * 2 * kH * kW * outputHeight * outputWidth; long long out_buffer_step = nOutputPlane * im2col_step * outputHeight * outputWidth; long long col_g_step = nInputPlane * kW * kH / group * im2col_step * outputHeight * outputWidth; long long weight_g_step = nOutputPlane / group * nInputPlane / group * kH * kW; long long out_buffer_g_step = nOutputPlane / group * im2col_step * outputHeight * outputWidth; int m = nOutputPlane / group; int n = im2col_step * outputHeight * outputWidth; int k = nInputPlane / group * kH * kW; scalar_t alpha = 1.; scalar_t beta = 0.; for (int elt = 0; elt < batchSize / im2col_step; elt++) { const scalar_t* input_start = input + elt * input_elt_step; const scalar_t* offset_start = offset + elt * offset_elt_step; trt_deformable_im2col<scalar_t>(input_start, offset_start, nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, dilationH, dilationW, im2col_step, deformable_group, columns, stream); for (int g = 0; g < group; ++g) { const scalar_t* weight_start = weight + g * weight_g_step; scalar_t* col_start = columns + g * col_g_step; scalar_t* out_buffer_start = output_buffer + elt * out_buffer_step + g * out_buffer_g_step; cublasGemmWrap<scalar_t>(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, col_start, n, weight_start, k, &beta, out_buffer_start, n); cudaCheckError(); } } if (im2col_step != 1) { int output_buffer_shape[5] = {batchSize / im2col_step, nOutputPlane, im2col_step, outputHeight, outputWidth}; int output_buffer_permute[5] = {0, 2, 1, 3, 4}; memcpyPermute<scalar_t>(output, output_buffer, &output_buffer_shape[0], &output_buffer_permute[0], 5, stream); } } void DeformConvForwardCUDAKernelLauncher_float( const float* input, const float* weight, const float* offset, float* output, void* workspace, int batchSize, int nInputPlane, int inputHeight, int inputWidth, int nOutputPlane, int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH, int group, int deformable_group, int im2col_step, cublasHandle_t cublas_handle, cudaStream_t stream) { DeformConvForwardCUDAKernelLauncher<float>( input, weight, offset, output, workspace, batchSize, nInputPlane, inputHeight, inputWidth, nOutputPlane, kW, kH, dW, dH, padW, padH, dilationW, dilationH, group, deformable_group, im2col_step, cublas_handle, stream); }
8ec3084b80ff7768c61c24fb4500f2fc8a03d34b.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include "paddle/phi/kernels/matrix_rank_tol_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/abs_kernel.h" #include "paddle/phi/kernels/elementwise_multiply_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/compare_functors.h" #include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h" #include "paddle/phi/kernels/reduce_max_kernel.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename T> static void GesvdjBatched(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, T* A, T* U, T* V, T* S, int* info, int thin_UV = 1); template <typename T> void SyevjBatched(const phi::GPUContext& dev_ctx, int batchSize, int n, T* A, T* W, int* info); template <> void GesvdjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, float* A, float* U, float* V, float* S, int* info, int thin_UV) { // do not compute singular vectors const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; hipsolverGesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnSgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(float), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void GesvdjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, double* A, double* U, double* V, double* S, int* info, int thin_UV) { // do not compute singular vectors const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; hipsolverGesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(double), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); // check the error info int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void SyevjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int n, float* A, float* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; // matrix is saved as column-major in cusolver. // numpy and torch use lower triangle to compute eigenvalues, so here use // upper triangle hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; hipsolverSyevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(float), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params)); } template <> void SyevjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int n, double* A, double* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; // upper triangle of A is stored hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; hipsolverSyevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(double), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params)); } template <typename T, typename Context> void MatrixRankTolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& atol_tensor, bool use_default_tol, bool hermitian, DenseTensor* out) { auto* x_data = x.data<T>(); dev_ctx.template Alloc<int64_t>(out); auto dim_x = x.dims(); auto dim_out = out->dims(); int rows = dim_x[dim_x.size() - 2]; int cols = dim_x[dim_x.size() - 1]; int k = ::min(rows, cols); auto numel = x.numel(); int batches = numel / (rows * cols); T rtol_T = 0; if (use_default_tol) { rtol_T = std::numeric_limits<T>::epsilon() * ::max(rows, cols); } // Must Copy X once, because the gesvdj will destory the content when exit. DenseTensor x_tmp; phi::Copy(dev_ctx, x, dev_ctx.GetPlace(), false, &x_tmp); auto info = phi::memory_utils::Alloc( dev_ctx.GetPlace(), sizeof(int) * batches, phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); int* info_ptr = reinterpret_cast<int*>(info->ptr()); DenseTensor eigenvalue_tensor; eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k)); auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor); if (hermitian) { SyevjBatched<T>( dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr); phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor); } else { DenseTensor U, VH; U.Resize(detail::GetUDDim(dim_x, k)); VH.Resize(detail::GetVHDDim(dim_x, k)); auto* u_data = dev_ctx.template Alloc<T>(&U); auto* vh_data = dev_ctx.template Alloc<T>(&VH); GesvdjBatched<T>(dev_ctx, batches, cols, rows, k, x_tmp.data<T>(), vh_data, u_data, eigenvalue_data, info_ptr, 1); } DenseTensor max_eigenvalue_tensor; dev_ctx.template Alloc<T>(&max_eigenvalue_tensor); max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims())); phi::MaxKernel<T, Context>(dev_ctx, eigenvalue_tensor, phi::IntArray({-1}), false, &max_eigenvalue_tensor); DenseTensor temp_rtol_tensor; temp_rtol_tensor = phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T)); DenseTensor rtol_tensor = phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor); DenseTensor tol_tensor; tol_tensor.Resize(dim_out); dev_ctx.template Alloc<T>(&tol_tensor); funcs::ElementwiseCompute<GreaterElementFunctor<T>, T>( dev_ctx, atol_tensor, rtol_tensor, GreaterElementFunctor<T>(), &tol_tensor); tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1)); DenseTensor compare_result; compare_result.Resize(detail::NewAxisDim(dim_out, k)); dev_ctx.template Alloc<int64_t>(&compare_result); funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>( dev_ctx, eigenvalue_tensor, tol_tensor, funcs::GreaterThanFunctor<T, int64_t>(), &compare_result); phi::SumKernel<int64_t>(dev_ctx, compare_result, std::vector<int64_t>{-1}, compare_result.dtype(), false, out); } } // namespace phi PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only GPU, ALL_LAYOUT, phi::MatrixRankTolKernel, float, double) { kernel->OutputAt(0).SetDataType(phi::DataType::INT64); } #endif // not PADDLE_WITH_HIP
8ec3084b80ff7768c61c24fb4500f2fc8a03d34b.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include "paddle/phi/kernels/matrix_rank_tol_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/abs_kernel.h" #include "paddle/phi/kernels/elementwise_multiply_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/compare_functors.h" #include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h" #include "paddle/phi/kernels/reduce_max_kernel.h" #include "paddle/phi/kernels/reduce_sum_kernel.h" namespace phi { template <typename T> static void GesvdjBatched(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, T* A, T* U, T* V, T* S, int* info, int thin_UV = 1); template <typename T> void SyevjBatched(const phi::GPUContext& dev_ctx, int batchSize, int n, T* A, T* W, int* info); template <> void GesvdjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, float* A, float* U, float* V, float* S, int* info, int thin_UV) { // do not compute singular vectors const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; gesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnSgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(float), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void GesvdjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, double* A, double* U, double* V, double* S, int* info, int thin_UV) { // do not compute singular vectors const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; gesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(double), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); // check the error info int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void SyevjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int n, float* A, float* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; // matrix is saved as column-major in cusolver. // numpy and torch use lower triangle to compute eigenvalues, so here use // upper triangle cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(float), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params)); } template <> void SyevjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int n, double* A, double* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; // upper triangle of A is stored cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(double), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; memory_utils::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params)); } template <typename T, typename Context> void MatrixRankTolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& atol_tensor, bool use_default_tol, bool hermitian, DenseTensor* out) { auto* x_data = x.data<T>(); dev_ctx.template Alloc<int64_t>(out); auto dim_x = x.dims(); auto dim_out = out->dims(); int rows = dim_x[dim_x.size() - 2]; int cols = dim_x[dim_x.size() - 1]; int k = std::min(rows, cols); auto numel = x.numel(); int batches = numel / (rows * cols); T rtol_T = 0; if (use_default_tol) { rtol_T = std::numeric_limits<T>::epsilon() * std::max(rows, cols); } // Must Copy X once, because the gesvdj will destory the content when exit. DenseTensor x_tmp; phi::Copy(dev_ctx, x, dev_ctx.GetPlace(), false, &x_tmp); auto info = phi::memory_utils::Alloc( dev_ctx.GetPlace(), sizeof(int) * batches, phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); int* info_ptr = reinterpret_cast<int*>(info->ptr()); DenseTensor eigenvalue_tensor; eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k)); auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor); if (hermitian) { SyevjBatched<T>( dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr); phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor); } else { DenseTensor U, VH; U.Resize(detail::GetUDDim(dim_x, k)); VH.Resize(detail::GetVHDDim(dim_x, k)); auto* u_data = dev_ctx.template Alloc<T>(&U); auto* vh_data = dev_ctx.template Alloc<T>(&VH); GesvdjBatched<T>(dev_ctx, batches, cols, rows, k, x_tmp.data<T>(), vh_data, u_data, eigenvalue_data, info_ptr, 1); } DenseTensor max_eigenvalue_tensor; dev_ctx.template Alloc<T>(&max_eigenvalue_tensor); max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims())); phi::MaxKernel<T, Context>(dev_ctx, eigenvalue_tensor, phi::IntArray({-1}), false, &max_eigenvalue_tensor); DenseTensor temp_rtol_tensor; temp_rtol_tensor = phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T)); DenseTensor rtol_tensor = phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor); DenseTensor tol_tensor; tol_tensor.Resize(dim_out); dev_ctx.template Alloc<T>(&tol_tensor); funcs::ElementwiseCompute<GreaterElementFunctor<T>, T>( dev_ctx, atol_tensor, rtol_tensor, GreaterElementFunctor<T>(), &tol_tensor); tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1)); DenseTensor compare_result; compare_result.Resize(detail::NewAxisDim(dim_out, k)); dev_ctx.template Alloc<int64_t>(&compare_result); funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>( dev_ctx, eigenvalue_tensor, tol_tensor, funcs::GreaterThanFunctor<T, int64_t>(), &compare_result); phi::SumKernel<int64_t>(dev_ctx, compare_result, std::vector<int64_t>{-1}, compare_result.dtype(), false, out); } } // namespace phi PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only GPU, ALL_LAYOUT, phi::MatrixRankTolKernel, float, double) { kernel->OutputAt(0).SetDataType(phi::DataType::INT64); } #endif // not PADDLE_WITH_HIP
a86c127ef0f0471402753bd511af69aabe0721db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void blockcopyToOpenMM( float *target, float *source, int *blocks, int numblocks, int setnum, int N ) { int blockNum = blockIdx.x * blockDim.x + threadIdx.x; int dof = 3 * blocks[blockNum] + setnum; int atom = dof / 3; if( atom >= N || ( blockNum != numblocks && atom >= blocks[blockNum + 1] ) ) { return; // Out of bounds } *( target + ( dof + atom + 1 )*sizeof( float ) ) = source[dof]; // Save the old }
a86c127ef0f0471402753bd511af69aabe0721db.cu
#include "includes.h" __global__ void blockcopyToOpenMM( float *target, float *source, int *blocks, int numblocks, int setnum, int N ) { int blockNum = blockIdx.x * blockDim.x + threadIdx.x; int dof = 3 * blocks[blockNum] + setnum; int atom = dof / 3; if( atom >= N || ( blockNum != numblocks && atom >= blocks[blockNum + 1] ) ) { return; // Out of bounds } *( target + ( dof + atom + 1 )*sizeof( float ) ) = source[dof]; // Save the old }
d6c79363a4ba5ab522b1d3b21c304785acb94c7d.hip
// !!! This is a file automatically generated by hipify!!! #include <GL/glew.h> #include <GL/gl.h> #include <cuda_gl_interop.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> extern "C" void sort_pixels(size_t num_pixels); extern "C" void register_buffer(GLuint buffer); static GLuint bufferObj; static cudaGraphicsResource *resource; struct sort_functor { __host__ __device__ bool operator()(float4 left, float4 right) const { return (left.z < right.z); } }; extern "C" void sort_pixels(size_t num_pixels) { hipGraphicsMapResources(1, &resource, NULL); float4* devPtr; size_t size; hipGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource); thrust::device_ptr<float4> tptr = thrust::device_pointer_cast(devPtr); thrust::sort(tptr, tptr + (num_pixels), sort_functor()); hipGraphicsUnmapResources(1, &resource, NULL); } extern "C" void register_buffer(GLuint buffer) { bufferObj = buffer; hipGraphicsGLRegisterBuffer(&resource, bufferObj, hipGraphicsMapFlagsNone); }
d6c79363a4ba5ab522b1d3b21c304785acb94c7d.cu
#include <GL/glew.h> #include <GL/gl.h> #include <cuda_gl_interop.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> extern "C" void sort_pixels(size_t num_pixels); extern "C" void register_buffer(GLuint buffer); static GLuint bufferObj; static cudaGraphicsResource *resource; struct sort_functor { __host__ __device__ bool operator()(float4 left, float4 right) const { return (left.z < right.z); } }; extern "C" void sort_pixels(size_t num_pixels) { cudaGraphicsMapResources(1, &resource, NULL); float4* devPtr; size_t size; cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resource); thrust::device_ptr<float4> tptr = thrust::device_pointer_cast(devPtr); thrust::sort(tptr, tptr + (num_pixels), sort_functor()); cudaGraphicsUnmapResources(1, &resource, NULL); } extern "C" void register_buffer(GLuint buffer) { bufferObj = buffer; cudaGraphicsGLRegisterBuffer(&resource, bufferObj, cudaGraphicsMapFlagsNone); }
3fc0b93d8d37fded069f4dffbf05319681013985.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "math.hpp" #include "bbox_utils.hpp" #include "grid_stride_range.hpp" #include "block_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "memory.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/csl/tensor.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, bool SHARE_LOCATION, bool VARIANCE_ENCODED_IN_TARGET, bool CORNER_TRUE_CENTER_FALSE, bool CLIP_BBOX> __global__ void decode_bbox(Span<T> decoded_bboxes, View<T> locations, View<T> priors, bool transpose_location, bool normalized_bbox, size_type num_loc_classes, index_type background_class_id, float clip_width, float clip_height) { // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // locations: [batch_size, num_priors, num_loc_classes, 4] // priors: [1, C, num_priors, 4] // C = 2 if !VARIANCE_ENCODED_IN_TARGET; otherwise, 1 /* 4 bbox values + 4 variance values per prior */ constexpr int PRIOR_BOX_SIZE = VARIANCE_ENCODED_IN_TARGET ? 4 : 8; const size_type num_priors = priors.size() / PRIOR_BOX_SIZE; using vector_type = get_vector_type_t<T, 4>; auto locations_vPtr = vector_type::get_pointer(locations.data()); auto priors_vPtr = vector_type::get_pointer(priors.data()); auto decoded_bboxes_vPtr = vector_type::get_pointer(decoded_bboxes.data()); const auto boxes_per_batch = num_priors * num_loc_classes; for (auto idx : grid_stride_range(decoded_bboxes.size() / 4)) { index_type p; index_type c; if (SHARE_LOCATION) { // locations are shared across all classes => num_loc_classes = 1 p = idx % boxes_per_batch; c = 0; } else { p = (idx % boxes_per_batch) / num_loc_classes; c = idx % num_loc_classes; } if (!SHARE_LOCATION && c == background_class_id) continue; BoundingBox bbox; { vector_type location; v_load(location, locations_vPtr[idx]); if (transpose_location) { bbox.ymin = location.data[0]; bbox.xmin = location.data[1]; bbox.ymax = location.data[2]; bbox.xmax = location.data[3]; } else { bbox.xmin = location.data[0]; bbox.ymin = location.data[1]; bbox.xmax = location.data[2]; bbox.ymax = location.data[3]; } } if (!VARIANCE_ENCODED_IN_TARGET) { vector_type prior_variance; v_load_ldg(prior_variance, priors_vPtr[num_priors + p]); bbox.xmin *= static_cast<float>(prior_variance.data[0]); bbox.ymin *= static_cast<float>(prior_variance.data[1]); bbox.xmax *= static_cast<float>(prior_variance.data[2]); bbox.ymax *= static_cast<float>(prior_variance.data[3]); } BoundingBox prior; { vector_type prior_box; v_load_ldg(prior_box, priors_vPtr[p]); prior.xmin = prior_box.data[0]; prior.ymin = prior_box.data[1]; prior.xmax = prior_box.data[2]; prior.ymax = prior_box.data[3]; } BoundingBox decoded_bbox; if (CORNER_TRUE_CENTER_FALSE) { decoded_bbox.xmin = prior.xmin + bbox.xmin; decoded_bbox.ymin = prior.ymin + bbox.ymin; decoded_bbox.xmax = prior.xmax + bbox.xmax; decoded_bbox.ymax = prior.ymax + bbox.ymax; } else { auto prior_width = prior.xmax - prior.xmin; auto prior_height = prior.ymax - prior.ymin; if (!normalized_bbox) { prior_width += 1; prior_height += 1; } auto prior_center_x = prior.xmin + prior_width * 0.5f; auto prior_center_y = prior.ymin + prior_height * 0.5f; auto decode_bbox_center_x = bbox.xmin * prior_width + prior_center_x; auto decode_bbox_center_y = bbox.ymin * prior_height + prior_center_y; using device::exp; float decode_bbox_width = exp(bbox.xmax) * prior_width; float decode_bbox_height = exp(bbox.ymax) * prior_height; decoded_bbox.xmin = decode_bbox_center_x - decode_bbox_width * 0.5f; decoded_bbox.ymin = decode_bbox_center_y - decode_bbox_height * 0.5f; decoded_bbox.xmax = decode_bbox_center_x + decode_bbox_width * 0.5f; decoded_bbox.ymax = decode_bbox_center_y + decode_bbox_height * 0.5f; } vector_type decoded_bbox_vec; if (CLIP_BBOX) { decoded_bbox_vec.data[0] = clamp(decoded_bbox.xmin, 0.0f, clip_width); decoded_bbox_vec.data[1] = clamp(decoded_bbox.ymin, 0.0f, clip_height); decoded_bbox_vec.data[2] = clamp(decoded_bbox.xmax, 0.0f, clip_width); decoded_bbox_vec.data[3] = clamp(decoded_bbox.ymax, 0.0f, clip_height); } else { decoded_bbox_vec.data[0] = decoded_bbox.xmin; decoded_bbox_vec.data[1] = decoded_bbox.ymin; decoded_bbox_vec.data[2] = decoded_bbox.xmax; decoded_bbox_vec.data[3] = decoded_bbox.ymax; } v_store(decoded_bboxes_vPtr[idx], decoded_bbox_vec); } } template <class T, int BINS, int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ void findTopK(Span<int> indices_, Span<int> count_, View<T> scores_, float threshold, size_type classwise_topK, size_type num_classes, size_type num_priors, index_type background_class_id) { /* We need to sort boxes based on their confidence scores. The confidence scores fall in * the range [0.0, 1.0]. We break the range into bins and perform count sort. This is an * approximate algorithm. * * Each block handles a particular class of a particular batch item. */ const auto c = blockIdx.x; const auto b = blockIdx.y; if (c == background_class_id) return; // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] auto count = count_.data() + b * num_classes + c; auto scores = scores_.data() + (b * num_classes + c) * num_priors; auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; /* We do not require a large number of bins to find the top K confidence scores. We will use * a reasonable number of bins which will fit in the shared memory. * * Note that smaller scores will have a smaller index, i.e. the `bins` are ordered in * ascending order. */ __shared__ int bins[BINS]; #pragma unroll for (int unroll = 0; unroll < BINS / BLOCK_SIZE; unroll++) bins[unroll * BLOCK_SIZE + threadIdx.x] = 0; __syncthreads(); for (auto i : block_stride_range<BLOCK_SIZE>(num_priors)) { const float confidence = load_ldg(scores[i]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); using device::clamp; int bin_index = conf_scaled * BINS; /* We store counts of confidence scores in the bins. Our ultimate goal is to store the indices * of the `classwise_topK` confidence values in the `indices` array. * * We use a little trick to parallelize the process of filling up the `indices` array. * We want every thread in the block to participate in the process. To do so, we want the * bins array to be shifted by one place to the left. We will be computing the suffix sum * of the bins array later. Details and reasons for doing so will be explained later. */ bin_index = clamp<int>(bin_index, 0, BINS - 1) - 1; // shift left by one if (bin_index >= 0) atomicAdd(&bins[bin_index], 1); } } __syncthreads(); constexpr int WARP_SIZE = 32; /* must be equal to warpSize */ // FORWARD_COMPATIBILITY_TAG: WARP_SIZE_DEPENDENT_CODE if (threadIdx.x < WARP_SIZE) { /* We can compute suffix sum of an array in groups of N numbers. * Let N be 4 for this example. * * 1) Last 4 numbers * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: 42 33 23 12 * * 2) Middle 4 numbers * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: | 26 21 15 8 | * * We add `42` (first element in the previous group) to each element to get: * * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * | 68 63 57 50 | 42 33 23 12 * 3) First 4 numbers * * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: 10 9 7 4 | * * We add `68` (first element in the previous group) to each element to get: * * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: 78 77 75 72 | 68 63 57 50 | 42 33 23 12 * * What we are left with now is the suffix sum of the entire array. * * We use the aforementioned logic in the code below but work in groups of `warpSize`. */ /* We calculate suffix sums WARP_SIZE elements at a time starting from the right end. * Hence, we will need BINS / WARP_SIZE number of iterations. * * Each iteration uses shuffle instructions to exchange data between threads. Shuffle * instructions cannot be used in warp-divergent code. If the bins are a multiple of * the warpSize, all the threads in the warp will participate. */ static_assert(BINS % WARP_SIZE == 0, "number of bins must be a multiple of warp size"); const int thread_id = threadIdx.x; const int inverse_lane_id = WARP_SIZE - thread_id - 1; int previous_group_first_element = 0; for (int iter = BINS / WARP_SIZE - 1; iter >= 0; iter--) { const index_type idx = iter * WARP_SIZE + thread_id; auto value = bins[idx]; for (int i = 1; i < WARP_SIZE; i *= 2) { auto n = __shfl_down_sync(0xFFFFFFFF, value, i); if (inverse_lane_id >= i) value += n; } value += previous_group_first_element; bins[idx] = value; previous_group_first_element = __shfl_sync(0xFFFFFFFF, value, 0); } } if (threadIdx.x == 0) *count = 0; __syncthreads(); for (auto i : block_stride_range<BLOCK_SIZE>(num_priors)) { const float confidence = load_ldg(scores[i]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); int bin_index = conf_scaled * BINS; bin_index = clamp<int>(bin_index, 0, BINS - 1); /* This bounding box is eligible to be selected unless it does not fall in * the `classwise_topK`. If it did, we would have to compute the location where it needs * to be stored. * * Suppose we had just 4 bins and say the following were the counts: * BIN0 2 * BIN1 1 * BIN2 3 * BIN3 0 (last bin is always zero as we shift left by one while populating the bins) * * We will try our best to store the boxes in a sorted order in the `indices` array. * This requires that the boxes in later bins (higher confidence scores) must be * stored earlier. * * We compute the suffix sum of the array. This gives us: * BIN0 6 * BIN1 4 * BIN2 3 * BIN3 0 * * The bins now give us the location in the `indices` array from which the indices of the * scores corresponding to that bin would be stored. We atomically increment the bin count * everytime we store a box corresponding to that bin. Therefore, the value in the bins * gives the index in the `indices` array where the next box corresponding to that bin must * be put. */ const index_type idx = atomicAdd(&bins[bin_index], 1); if (idx < classwise_topK) { indices[idx] = i; atomicAdd(&count[0], 1); } } } } template <class T> __global__ void box_collect(Span<T> collected_bboxes_, View<T> decoded_bboxes_, View<int> indices_, View<int> count_, bool share_location, size_type num_priors, size_type num_classes, size_type classwise_topK, index_type background_class_id) { const index_type c = blockIdx.x; if (c == background_class_id) return; const index_type b = blockIdx.y; // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] const auto num_loc_classes = share_location ? 1 : num_classes; auto collected_bboxes = collected_bboxes_.data() + (b * num_classes + c) * classwise_topK * 4; auto decoded_bboxes = decoded_bboxes_.data() + b * num_priors * num_loc_classes * 4; auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; auto count = count_.data() + b * num_classes + c; const auto boxes = load_ldg(&count[0]); if (boxes == 0) return; using vector_type = get_vector_type_t<T, 4>; auto decoded_bboxes_vPtr = vector_type::get_pointer(decoded_bboxes); auto collected_bboxes_vPtr = vector_type::get_pointer(collected_bboxes); for (auto i : block_stride_range<>(boxes)) { const auto prior_id = indices[i]; const index_type idx = share_location ? prior_id : (prior_id * num_classes + c); vector_type box; v_load(box, decoded_bboxes_vPtr[idx]); v_store(collected_bboxes_vPtr[i], box); } } template <class T, bool NORMALIZED_BBOX> __global__ void blockwise_class_nms(Span<int> indices_, Span<int> count_, View<T> collected_bboxes_, size_type num_classes, size_type classwise_topK, index_type background_class_id, float nms_threshold) { const index_type b = blockIdx.x / num_classes; const index_type c = blockIdx.x % num_classes; if (c == background_class_id) return; // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; auto count = count_.data() + b * num_classes + c; auto collected_bboxes = collected_bboxes_.data() + (b * num_classes + c) * classwise_topK * 4; const auto boxes = count[0]; if (boxes == 0) return; using vector_type = get_vector_type_t<T, 4>; auto collected_bboxes_vPtr = vector_type::get_pointer(collected_bboxes); for (int i = 0; i < boxes; i++) { auto prior_id = indices[i]; if (prior_id != -1) { BoundingBox bbox1; { vector_type box; v_load(box, collected_bboxes_vPtr[i]); bbox1.xmin = box.data[0]; bbox1.ymin = box.data[1]; bbox1.xmax = box.data[2]; bbox1.ymax = box.data[3]; } for (auto j : block_stride_range<>(i + 1, boxes)) { prior_id = indices[j]; if (prior_id == -1) continue; BoundingBox bbox2; { vector_type box; v_load_ldg(box, collected_bboxes_vPtr[j]); bbox2.xmin = box.data[0]; bbox2.ymin = box.data[1]; bbox2.xmax = box.data[2]; bbox2.ymax = box.data[3]; } using device::min; using device::max; BoundingBox intersect_bbox; intersect_bbox.xmin = max(bbox1.xmin, bbox2.xmin); intersect_bbox.ymin = max(bbox1.ymin, bbox2.ymin); intersect_bbox.xmax = min(bbox1.xmax, bbox2.xmax); intersect_bbox.ymax = min(bbox1.ymax, bbox2.ymax); float intersect_size = compute_bbox_size<NORMALIZED_BBOX>(intersect_bbox); float bbox1_size = compute_bbox_size<NORMALIZED_BBOX>(bbox1); float bbox2_size = compute_bbox_size<NORMALIZED_BBOX>(bbox2); using device::fast_divide_ftz; float iou = fast_divide_ftz(intersect_size, bbox1_size + bbox2_size - intersect_size); if (iou > nms_threshold) indices[j] = -1; } } __syncthreads(); } if (threadIdx.x == 0) count[0] = 0; __syncthreads(); for (auto i : block_stride_range<>(boxes)) { auto prior_id = indices[i]; if(prior_id != -1) { const index_type idx = atomicAdd(&count[0], 1); indices[idx] = prior_id; } } } template <class T, std::size_t BINS, int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ void nms_collect( Span<int> kept_indices, Span<int> kept_count, View<int> indices_, View<int> count, View<T> scores_, float threshold, size_type num_classes, size_type num_priors, size_type classwise_topK, size_type keepTopK, index_type background_class_id) { // sorting algorithm is documented in detail in findTopK kernel comments // no explanations are provided here // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] const auto b = blockIdx.x; __shared__ int bins[BINS]; #pragma unroll for (int unroll = 0; unroll < BINS / BLOCK_SIZE; unroll++) bins[unroll * BLOCK_SIZE + threadIdx.x] = 0; __syncthreads(); for (int c = 0; c < num_classes; c++) { if (c == background_class_id) continue; // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] const auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; const auto scores = scores_.data() + (b * num_classes + c) * num_priors; auto boxes = count[b * num_classes + c]; for (auto i : block_stride_range<BLOCK_SIZE>(boxes)) { auto prior_id = indices[i]; const float confidence = load_ldg(scores[prior_id]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); using device::clamp; int bin_index = conf_scaled * BINS; bin_index = clamp<int>(bin_index, 0, BINS - 1) - 1; // shift left by one if (bin_index >= 0) atomicAdd(&bins[bin_index], 1); } } } __syncthreads(); constexpr int WARP_SIZE = 32; /* must be equal to warpSize */ // FORWARD_COMPATIBILITY_TAG: WARP_SIZE_DEPENDENT_CODE if (threadIdx.x < WARP_SIZE) { static_assert(BINS % WARP_SIZE == 0, "number of bins must be a multiple of warp size"); const int thread_id = threadIdx.x; const int inverse_lane_id = WARP_SIZE - thread_id - 1; int previous_group_first_element = 0; for (int iter = BINS / WARP_SIZE - 1; iter >= 0; iter--) { const index_type idx = iter * WARP_SIZE + thread_id; auto value = bins[idx]; for (int i = 1; i < WARP_SIZE; i *= 2) { auto n = __shfl_down_sync(0xFFFFFFFF, value, i); if (inverse_lane_id >= i) value += n; } value += previous_group_first_element; bins[idx] = value; previous_group_first_element = __shfl_sync(0xFFFFFFFF, value, 0); } } if (threadIdx.x == 0) kept_count[b] = 0; __syncthreads(); for (int c = 0; c < num_classes; c++) { if (c == background_class_id) continue; const auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; const auto scores = scores_.data() + (b * num_classes + c) * num_priors; auto boxes = count[b * num_classes + c]; for (auto i : block_stride_range<BLOCK_SIZE>(boxes)) { auto prior_id = indices[i]; const float confidence = load_ldg(scores[prior_id]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); using device::clamp; int bin_index = conf_scaled * BINS; bin_index = clamp<int>(bin_index, 0, BINS - 1); const index_type idx = atomicAdd(&bins[bin_index], 1); if (idx < keepTopK) { kept_indices[b * keepTopK + idx] = c * num_priors + prior_id; atomicAdd(&kept_count[b], 1); } } } } } template <class T> __global__ void consolidate_detections(Span<T> output, View<int> kept_indices, View<int> kept_count, View<T> decoded_bboxes, View<T> scores, bool share_location, size_type batch_size, size_type num_classes, size_type num_priors, size_type keepTopK, DevicePtr<int> num_detections) { using vector_type = get_vector_type_t<T, 4>; auto decoded_bboxes_vPtr = vector_type::get_pointer(decoded_bboxes.data()); // output: [1, 1, batch_size * keepTopK, 7] // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // scores: [batch_size, num_classes, num_priors] for (int b = 0; b < batch_size; b++) { for (auto i : grid_stride_range(kept_count[b])) { auto score_id = kept_indices[b * keepTopK + i]; auto c = score_id / num_priors; auto prior_id = score_id % num_priors; const auto confidence = scores[b * num_classes * num_priors + score_id]; index_type bbox_id; if (share_location) { // decoded_bboxes: [batch_size, num_priors, 1, 4] bbox_id = b * num_priors + prior_id; } else { // decoded_bboxes: [batch_size, num_priors, num_classes, 4] bbox_id = (b * num_priors + prior_id) * num_classes + c; } vector_type bbox; v_load(bbox, decoded_bboxes_vPtr[bbox_id]); auto output_id = atomicAdd(num_detections.get(), 1); output[output_id * 7 + 0] = b; output[output_id * 7 + 1] = c; output[output_id * 7 + 2] = confidence; output[output_id * 7 + 3] = bbox.data[0]; output[output_id * 7 + 4] = bbox.data[1]; output[output_id * 7 + 5] = bbox.data[2]; output[output_id * 7 + 6] = bbox.data[3]; } } } } template <class T, bool SHARE_LOCATION, bool VARIANCE_ENCODED_IN_TARGET, bool CORNER_TRUE_CENTER_FALSE, bool CLIP_BBOX> static void launch_decode_boxes_kernel(const Stream& stream, Span<T> decoded_bboxes, View<T> locations, View<T> priors, bool transpose_location, bool normalized_bbox, size_type num_loc_classes, index_type background_class_id, float clip_width, float clip_height) { auto kernel = raw::decode_bbox<T, SHARE_LOCATION, VARIANCE_ENCODED_IN_TARGET, CORNER_TRUE_CENTER_FALSE, CLIP_BBOX>; auto policy = make_policy(kernel, decoded_bboxes.size() / 4, 0, stream); launch_kernel(kernel, policy, decoded_bboxes, locations, priors, transpose_location, normalized_bbox, num_loc_classes, background_class_id, clip_width, clip_height); } template <class T, unsigned int current, class ...Args> static typename std::enable_if<current == 0, void> ::type dispatch_decode_bboxes(int selector, Args&& ...args) { if(selector == 0) launch_decode_boxes_kernel<T, 0, 0, 0, 0>(std::forward<Args>(args)...); } template <class T, unsigned int current, class ...Args> static typename std::enable_if<current != 0, void> ::type dispatch_decode_bboxes(int selector, Args&& ...args) { if(selector == current) launch_decode_boxes_kernel<T, static_cast<bool>(current & 8), static_cast<bool>(current & 4), static_cast<bool>(current & 2), static_cast<bool>(current & 1)>(std::forward<Args>(args)...); else dispatch_decode_bboxes<T, current - 1, Args...>(selector, std::forward<Args>(args)...); } template <class T> void decode_bboxes(const Stream& stream, Span<T> output, View<T> locations, View<T> priors, std::size_t num_loc_classes, bool share_location, std::size_t background_class_id, bool transpose_location, bool variance_encoded_in_target, bool corner_true_or_center_false, bool normalized_bbox, bool clip_box, float clip_width, float clip_height) { /* `config` combines three kernel template options into one number using which a bit of TMP code can * run through all possible combinations and instantiate the correct template */ unsigned int config = (share_location << 3 | variance_encoded_in_target << 2 | corner_true_or_center_false << 1 | clip_box); dispatch_decode_bboxes<T, 15>(config, stream, output, locations, priors, transpose_location, normalized_bbox, num_loc_classes, background_class_id, clip_width, clip_height); } template void decode_bboxes(const Stream&, Span<__half>, View<__half>, View<__half>, std::size_t, bool, std::size_t, bool, bool, bool, bool, bool, float, float); template void decode_bboxes(const Stream&, Span<float>, View<float>, View<float>, std::size_t, bool, std::size_t, bool, bool, bool, bool, bool, float, float); template <class T> void findTopK(const Stream& stream, TensorSpan<int> indices, TensorSpan<int> count, TensorView<T> scores, std::size_t background_class_id, float threshold) { // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] const auto batch_size = indices.get_axis_size(0); CV_Assert(count.get_axis_size(0) == batch_size); CV_Assert(scores.get_axis_size(0) == batch_size); const auto num_classes = indices.get_axis_size(1); CV_Assert(count.get_axis_size(1) == num_classes); CV_Assert(scores.get_axis_size(1) == num_classes); const auto classwise_topK = indices.get_axis_size(2); const auto num_priors = scores.get_axis_size(2); /* each block processes one class from each batch */ constexpr auto BLOCK_SIZE = 256; dim3 grid_size(num_classes, batch_size); dim3 block_size(BLOCK_SIZE); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::findTopK<T, 2048, BLOCK_SIZE>; launch_kernel(kernel, policy, indices, count, scores, threshold, classwise_topK, num_classes, num_priors, background_class_id); } template void findTopK(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<__half>, std::size_t, float); template void findTopK(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<float>, std::size_t, float); template <class T> void box_collect(const Stream& stream, TensorSpan<T> collected_bboxes, TensorView<T> decoded_bboxes, TensorView<int> indices, TensorView<int> count, bool share_location, std::size_t background_class_id) { // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] const auto batch_size = collected_bboxes.get_axis_size(0); CV_Assert(decoded_bboxes.get_axis_size(0) == batch_size); CV_Assert(indices.get_axis_size(0) == batch_size); CV_Assert(count.get_axis_size(0) == batch_size); const auto num_classes = collected_bboxes.get_axis_size(1); CV_Assert(indices.get_axis_size(1) == num_classes); CV_Assert(count.get_axis_size(1) == num_classes); const auto classwise_topK = collected_bboxes.get_axis_size(2); CV_Assert(indices.get_axis_size(2) == classwise_topK); const auto num_priors = decoded_bboxes.get_axis_size(1); CV_Assert(!share_location || decoded_bboxes.get_axis_size(2) == 1); constexpr int BLOCK_SIZE = 256; /* each block processes one class from each batch */ dim3 grid_size(num_classes, batch_size); dim3 block_size(BLOCK_SIZE); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::box_collect<T>; launch_kernel(kernel, policy, collected_bboxes, decoded_bboxes, indices, count, share_location, num_priors, num_classes, classwise_topK, background_class_id); } template void box_collect(const Stream&, TensorSpan<float>, TensorView<float>, TensorView<int>, TensorView<int>, bool, std::size_t); template void box_collect(const Stream&, TensorSpan<__half>, TensorView<__half>, TensorView<int>, TensorView<int>, bool, std::size_t); template <class T> void blockwise_class_nms(const Stream& stream, TensorSpan<int> indices, TensorSpan<int> count, TensorView<T> collected_bboxes, bool normalized_bbox, std::size_t background_class_id, float nms_threshold) { // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] const auto batch_size = indices.get_axis_size(0); CV_Assert(count.get_axis_size(0) == batch_size); CV_Assert(collected_bboxes.get_axis_size(0) == batch_size); const auto num_classes = indices.get_axis_size(1); CV_Assert(count.get_axis_size(1) == num_classes); CV_Assert(collected_bboxes.get_axis_size(1) == num_classes); const auto classwise_topK = indices.get_axis_size(2); CV_Assert(collected_bboxes.get_axis_size(2) == classwise_topK); /* each block processes one class from each batch */ auto num_blocks = batch_size * num_classes; auto num_threads = std::max<std::size_t>(std::min<std::size_t>(1024, classwise_topK), 32); dim3 grid_size(num_blocks); dim3 block_size(num_threads); auto policy = execution_policy(grid_size, block_size, stream); if (normalized_bbox) { auto kernel = raw::blockwise_class_nms<T, true>; launch_kernel(kernel, policy, indices, count, collected_bboxes, num_classes, classwise_topK, background_class_id, nms_threshold); } else { auto kernel = raw::blockwise_class_nms<T, false>; launch_kernel(kernel, policy, indices, count, collected_bboxes, num_classes, classwise_topK, background_class_id, nms_threshold); } } template void blockwise_class_nms(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<__half>, bool, std::size_t, float); template void blockwise_class_nms(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<float>, bool, std::size_t, float); template <class T> void nms_collect(const Stream& stream, TensorSpan<int> kept_indices, TensorSpan<int> kept_count, TensorView<int> indices, TensorView<int> count, TensorView<T> scores, float threshold, std::size_t background_class_id) { // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] auto batch_size = kept_indices.get_axis_size(0); CV_Assert(kept_count.get_axis_size(0) == batch_size); CV_Assert(indices.get_axis_size(0) == batch_size); CV_Assert(count.get_axis_size(0) == batch_size); CV_Assert(scores.get_axis_size(0) == batch_size); auto keepTopK = kept_indices.get_axis_size(1); auto num_classes = indices.get_axis_size(1); CV_Assert(count.get_axis_size(1) == num_classes); CV_Assert(scores.get_axis_size(1) == num_classes); auto classwise_topK = indices.get_axis_size(2); auto num_priors = scores.get_axis_size(2); auto num_blocks = batch_size; constexpr int BLOCK_SIZE = 1024; dim3 grid_size(num_blocks); dim3 block_size(BLOCK_SIZE); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::nms_collect<T, 1024, BLOCK_SIZE>; launch_kernel(kernel, policy, kept_indices, kept_count, indices, count, scores, threshold, num_classes, num_priors, classwise_topK, keepTopK, background_class_id); } template void nms_collect(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<int>, TensorView<int>, TensorView<__half>, float, std::size_t); template void nms_collect(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<int>, TensorView<int>, TensorView<float>, float, std::size_t); template <class T> void consolidate_detections(const Stream& stream, TensorSpan<T> output, TensorView<int> kept_indices, TensorView<int> kept_count, TensorView<T> decoded_bboxes, TensorView<T> scores, bool share_location, DevicePtr<int> num_detections) { // output: [1, 1, batch_size * keepTopK, 7] // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // scores: [batch_size, num_classes, num_priors] auto batch_size = kept_indices.get_axis_size(0); CV_Assert(kept_count.get_axis_size(0) == batch_size); CV_Assert(decoded_bboxes.get_axis_size(0) == batch_size); CV_Assert(scores.get_axis_size(0) == batch_size); auto keepTopK = kept_indices.get_axis_size(1); auto num_classes = scores.get_axis_size(1); auto num_priors = scores.get_axis_size(2); CV_Assert(batch_size * keepTopK * 7 == output.size()); auto kernel = raw::consolidate_detections<T>; auto policy = make_policy(kernel, keepTopK, 0, stream); launch_kernel(kernel, policy, output, kept_indices, kept_count, decoded_bboxes, scores, share_location, batch_size, num_classes, num_priors, keepTopK, num_detections); } template void consolidate_detections(const Stream&, TensorSpan<__half>, TensorView<int>, TensorView<int>, TensorView<__half>, TensorView<__half>, bool, DevicePtr<int>); template void consolidate_detections(const Stream&, TensorSpan<float>, TensorView<int>, TensorView<int>, TensorView<float>, TensorView<float>, bool, DevicePtr<int>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
3fc0b93d8d37fded069f4dffbf05319681013985.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <cuda_runtime.h> #include <cuda_fp16.h> #include "math.hpp" #include "bbox_utils.hpp" #include "grid_stride_range.hpp" #include "block_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "memory.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/csl/tensor.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, bool SHARE_LOCATION, bool VARIANCE_ENCODED_IN_TARGET, bool CORNER_TRUE_CENTER_FALSE, bool CLIP_BBOX> __global__ void decode_bbox(Span<T> decoded_bboxes, View<T> locations, View<T> priors, bool transpose_location, bool normalized_bbox, size_type num_loc_classes, index_type background_class_id, float clip_width, float clip_height) { // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // locations: [batch_size, num_priors, num_loc_classes, 4] // priors: [1, C, num_priors, 4] // C = 2 if !VARIANCE_ENCODED_IN_TARGET; otherwise, 1 /* 4 bbox values + 4 variance values per prior */ constexpr int PRIOR_BOX_SIZE = VARIANCE_ENCODED_IN_TARGET ? 4 : 8; const size_type num_priors = priors.size() / PRIOR_BOX_SIZE; using vector_type = get_vector_type_t<T, 4>; auto locations_vPtr = vector_type::get_pointer(locations.data()); auto priors_vPtr = vector_type::get_pointer(priors.data()); auto decoded_bboxes_vPtr = vector_type::get_pointer(decoded_bboxes.data()); const auto boxes_per_batch = num_priors * num_loc_classes; for (auto idx : grid_stride_range(decoded_bboxes.size() / 4)) { index_type p; index_type c; if (SHARE_LOCATION) { // locations are shared across all classes => num_loc_classes = 1 p = idx % boxes_per_batch; c = 0; } else { p = (idx % boxes_per_batch) / num_loc_classes; c = idx % num_loc_classes; } if (!SHARE_LOCATION && c == background_class_id) continue; BoundingBox bbox; { vector_type location; v_load(location, locations_vPtr[idx]); if (transpose_location) { bbox.ymin = location.data[0]; bbox.xmin = location.data[1]; bbox.ymax = location.data[2]; bbox.xmax = location.data[3]; } else { bbox.xmin = location.data[0]; bbox.ymin = location.data[1]; bbox.xmax = location.data[2]; bbox.ymax = location.data[3]; } } if (!VARIANCE_ENCODED_IN_TARGET) { vector_type prior_variance; v_load_ldg(prior_variance, priors_vPtr[num_priors + p]); bbox.xmin *= static_cast<float>(prior_variance.data[0]); bbox.ymin *= static_cast<float>(prior_variance.data[1]); bbox.xmax *= static_cast<float>(prior_variance.data[2]); bbox.ymax *= static_cast<float>(prior_variance.data[3]); } BoundingBox prior; { vector_type prior_box; v_load_ldg(prior_box, priors_vPtr[p]); prior.xmin = prior_box.data[0]; prior.ymin = prior_box.data[1]; prior.xmax = prior_box.data[2]; prior.ymax = prior_box.data[3]; } BoundingBox decoded_bbox; if (CORNER_TRUE_CENTER_FALSE) { decoded_bbox.xmin = prior.xmin + bbox.xmin; decoded_bbox.ymin = prior.ymin + bbox.ymin; decoded_bbox.xmax = prior.xmax + bbox.xmax; decoded_bbox.ymax = prior.ymax + bbox.ymax; } else { auto prior_width = prior.xmax - prior.xmin; auto prior_height = prior.ymax - prior.ymin; if (!normalized_bbox) { prior_width += 1; prior_height += 1; } auto prior_center_x = prior.xmin + prior_width * 0.5f; auto prior_center_y = prior.ymin + prior_height * 0.5f; auto decode_bbox_center_x = bbox.xmin * prior_width + prior_center_x; auto decode_bbox_center_y = bbox.ymin * prior_height + prior_center_y; using device::exp; float decode_bbox_width = exp(bbox.xmax) * prior_width; float decode_bbox_height = exp(bbox.ymax) * prior_height; decoded_bbox.xmin = decode_bbox_center_x - decode_bbox_width * 0.5f; decoded_bbox.ymin = decode_bbox_center_y - decode_bbox_height * 0.5f; decoded_bbox.xmax = decode_bbox_center_x + decode_bbox_width * 0.5f; decoded_bbox.ymax = decode_bbox_center_y + decode_bbox_height * 0.5f; } vector_type decoded_bbox_vec; if (CLIP_BBOX) { decoded_bbox_vec.data[0] = clamp(decoded_bbox.xmin, 0.0f, clip_width); decoded_bbox_vec.data[1] = clamp(decoded_bbox.ymin, 0.0f, clip_height); decoded_bbox_vec.data[2] = clamp(decoded_bbox.xmax, 0.0f, clip_width); decoded_bbox_vec.data[3] = clamp(decoded_bbox.ymax, 0.0f, clip_height); } else { decoded_bbox_vec.data[0] = decoded_bbox.xmin; decoded_bbox_vec.data[1] = decoded_bbox.ymin; decoded_bbox_vec.data[2] = decoded_bbox.xmax; decoded_bbox_vec.data[3] = decoded_bbox.ymax; } v_store(decoded_bboxes_vPtr[idx], decoded_bbox_vec); } } template <class T, int BINS, int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ void findTopK(Span<int> indices_, Span<int> count_, View<T> scores_, float threshold, size_type classwise_topK, size_type num_classes, size_type num_priors, index_type background_class_id) { /* We need to sort boxes based on their confidence scores. The confidence scores fall in * the range [0.0, 1.0]. We break the range into bins and perform count sort. This is an * approximate algorithm. * * Each block handles a particular class of a particular batch item. */ const auto c = blockIdx.x; const auto b = blockIdx.y; if (c == background_class_id) return; // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] auto count = count_.data() + b * num_classes + c; auto scores = scores_.data() + (b * num_classes + c) * num_priors; auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; /* We do not require a large number of bins to find the top K confidence scores. We will use * a reasonable number of bins which will fit in the shared memory. * * Note that smaller scores will have a smaller index, i.e. the `bins` are ordered in * ascending order. */ __shared__ int bins[BINS]; #pragma unroll for (int unroll = 0; unroll < BINS / BLOCK_SIZE; unroll++) bins[unroll * BLOCK_SIZE + threadIdx.x] = 0; __syncthreads(); for (auto i : block_stride_range<BLOCK_SIZE>(num_priors)) { const float confidence = load_ldg(scores[i]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); using device::clamp; int bin_index = conf_scaled * BINS; /* We store counts of confidence scores in the bins. Our ultimate goal is to store the indices * of the `classwise_topK` confidence values in the `indices` array. * * We use a little trick to parallelize the process of filling up the `indices` array. * We want every thread in the block to participate in the process. To do so, we want the * bins array to be shifted by one place to the left. We will be computing the suffix sum * of the bins array later. Details and reasons for doing so will be explained later. */ bin_index = clamp<int>(bin_index, 0, BINS - 1) - 1; // shift left by one if (bin_index >= 0) atomicAdd(&bins[bin_index], 1); } } __syncthreads(); constexpr int WARP_SIZE = 32; /* must be equal to warpSize */ // FORWARD_COMPATIBILITY_TAG: WARP_SIZE_DEPENDENT_CODE if (threadIdx.x < WARP_SIZE) { /* We can compute suffix sum of an array in groups of N numbers. * Let N be 4 for this example. * * 1) Last 4 numbers * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: 42 33 23 12 * * 2) Middle 4 numbers * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: | 26 21 15 8 | * * We add `42` (first element in the previous group) to each element to get: * * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * | 68 63 57 50 | 42 33 23 12 * 3) First 4 numbers * * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: 10 9 7 4 | * * We add `68` (first element in the previous group) to each element to get: * * 1 2 3 4 | 5 6 7 8 | 9 10 11 12 * group suffix sum: 78 77 75 72 | 68 63 57 50 | 42 33 23 12 * * What we are left with now is the suffix sum of the entire array. * * We use the aforementioned logic in the code below but work in groups of `warpSize`. */ /* We calculate suffix sums WARP_SIZE elements at a time starting from the right end. * Hence, we will need BINS / WARP_SIZE number of iterations. * * Each iteration uses shuffle instructions to exchange data between threads. Shuffle * instructions cannot be used in warp-divergent code. If the bins are a multiple of * the warpSize, all the threads in the warp will participate. */ static_assert(BINS % WARP_SIZE == 0, "number of bins must be a multiple of warp size"); const int thread_id = threadIdx.x; const int inverse_lane_id = WARP_SIZE - thread_id - 1; int previous_group_first_element = 0; for (int iter = BINS / WARP_SIZE - 1; iter >= 0; iter--) { const index_type idx = iter * WARP_SIZE + thread_id; auto value = bins[idx]; for (int i = 1; i < WARP_SIZE; i *= 2) { auto n = __shfl_down_sync(0xFFFFFFFF, value, i); if (inverse_lane_id >= i) value += n; } value += previous_group_first_element; bins[idx] = value; previous_group_first_element = __shfl_sync(0xFFFFFFFF, value, 0); } } if (threadIdx.x == 0) *count = 0; __syncthreads(); for (auto i : block_stride_range<BLOCK_SIZE>(num_priors)) { const float confidence = load_ldg(scores[i]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); int bin_index = conf_scaled * BINS; bin_index = clamp<int>(bin_index, 0, BINS - 1); /* This bounding box is eligible to be selected unless it does not fall in * the `classwise_topK`. If it did, we would have to compute the location where it needs * to be stored. * * Suppose we had just 4 bins and say the following were the counts: * BIN0 2 * BIN1 1 * BIN2 3 * BIN3 0 (last bin is always zero as we shift left by one while populating the bins) * * We will try our best to store the boxes in a sorted order in the `indices` array. * This requires that the boxes in later bins (higher confidence scores) must be * stored earlier. * * We compute the suffix sum of the array. This gives us: * BIN0 6 * BIN1 4 * BIN2 3 * BIN3 0 * * The bins now give us the location in the `indices` array from which the indices of the * scores corresponding to that bin would be stored. We atomically increment the bin count * everytime we store a box corresponding to that bin. Therefore, the value in the bins * gives the index in the `indices` array where the next box corresponding to that bin must * be put. */ const index_type idx = atomicAdd(&bins[bin_index], 1); if (idx < classwise_topK) { indices[idx] = i; atomicAdd(&count[0], 1); } } } } template <class T> __global__ void box_collect(Span<T> collected_bboxes_, View<T> decoded_bboxes_, View<int> indices_, View<int> count_, bool share_location, size_type num_priors, size_type num_classes, size_type classwise_topK, index_type background_class_id) { const index_type c = blockIdx.x; if (c == background_class_id) return; const index_type b = blockIdx.y; // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] const auto num_loc_classes = share_location ? 1 : num_classes; auto collected_bboxes = collected_bboxes_.data() + (b * num_classes + c) * classwise_topK * 4; auto decoded_bboxes = decoded_bboxes_.data() + b * num_priors * num_loc_classes * 4; auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; auto count = count_.data() + b * num_classes + c; const auto boxes = load_ldg(&count[0]); if (boxes == 0) return; using vector_type = get_vector_type_t<T, 4>; auto decoded_bboxes_vPtr = vector_type::get_pointer(decoded_bboxes); auto collected_bboxes_vPtr = vector_type::get_pointer(collected_bboxes); for (auto i : block_stride_range<>(boxes)) { const auto prior_id = indices[i]; const index_type idx = share_location ? prior_id : (prior_id * num_classes + c); vector_type box; v_load(box, decoded_bboxes_vPtr[idx]); v_store(collected_bboxes_vPtr[i], box); } } template <class T, bool NORMALIZED_BBOX> __global__ void blockwise_class_nms(Span<int> indices_, Span<int> count_, View<T> collected_bboxes_, size_type num_classes, size_type classwise_topK, index_type background_class_id, float nms_threshold) { const index_type b = blockIdx.x / num_classes; const index_type c = blockIdx.x % num_classes; if (c == background_class_id) return; // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; auto count = count_.data() + b * num_classes + c; auto collected_bboxes = collected_bboxes_.data() + (b * num_classes + c) * classwise_topK * 4; const auto boxes = count[0]; if (boxes == 0) return; using vector_type = get_vector_type_t<T, 4>; auto collected_bboxes_vPtr = vector_type::get_pointer(collected_bboxes); for (int i = 0; i < boxes; i++) { auto prior_id = indices[i]; if (prior_id != -1) { BoundingBox bbox1; { vector_type box; v_load(box, collected_bboxes_vPtr[i]); bbox1.xmin = box.data[0]; bbox1.ymin = box.data[1]; bbox1.xmax = box.data[2]; bbox1.ymax = box.data[3]; } for (auto j : block_stride_range<>(i + 1, boxes)) { prior_id = indices[j]; if (prior_id == -1) continue; BoundingBox bbox2; { vector_type box; v_load_ldg(box, collected_bboxes_vPtr[j]); bbox2.xmin = box.data[0]; bbox2.ymin = box.data[1]; bbox2.xmax = box.data[2]; bbox2.ymax = box.data[3]; } using device::min; using device::max; BoundingBox intersect_bbox; intersect_bbox.xmin = max(bbox1.xmin, bbox2.xmin); intersect_bbox.ymin = max(bbox1.ymin, bbox2.ymin); intersect_bbox.xmax = min(bbox1.xmax, bbox2.xmax); intersect_bbox.ymax = min(bbox1.ymax, bbox2.ymax); float intersect_size = compute_bbox_size<NORMALIZED_BBOX>(intersect_bbox); float bbox1_size = compute_bbox_size<NORMALIZED_BBOX>(bbox1); float bbox2_size = compute_bbox_size<NORMALIZED_BBOX>(bbox2); using device::fast_divide_ftz; float iou = fast_divide_ftz(intersect_size, bbox1_size + bbox2_size - intersect_size); if (iou > nms_threshold) indices[j] = -1; } } __syncthreads(); } if (threadIdx.x == 0) count[0] = 0; __syncthreads(); for (auto i : block_stride_range<>(boxes)) { auto prior_id = indices[i]; if(prior_id != -1) { const index_type idx = atomicAdd(&count[0], 1); indices[idx] = prior_id; } } } template <class T, std::size_t BINS, int BLOCK_SIZE> __launch_bounds__(BLOCK_SIZE) __global__ void nms_collect( Span<int> kept_indices, Span<int> kept_count, View<int> indices_, View<int> count, View<T> scores_, float threshold, size_type num_classes, size_type num_priors, size_type classwise_topK, size_type keepTopK, index_type background_class_id) { // sorting algorithm is documented in detail in findTopK kernel comments // no explanations are provided here // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] const auto b = blockIdx.x; __shared__ int bins[BINS]; #pragma unroll for (int unroll = 0; unroll < BINS / BLOCK_SIZE; unroll++) bins[unroll * BLOCK_SIZE + threadIdx.x] = 0; __syncthreads(); for (int c = 0; c < num_classes; c++) { if (c == background_class_id) continue; // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] const auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; const auto scores = scores_.data() + (b * num_classes + c) * num_priors; auto boxes = count[b * num_classes + c]; for (auto i : block_stride_range<BLOCK_SIZE>(boxes)) { auto prior_id = indices[i]; const float confidence = load_ldg(scores[prior_id]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); using device::clamp; int bin_index = conf_scaled * BINS; bin_index = clamp<int>(bin_index, 0, BINS - 1) - 1; // shift left by one if (bin_index >= 0) atomicAdd(&bins[bin_index], 1); } } } __syncthreads(); constexpr int WARP_SIZE = 32; /* must be equal to warpSize */ // FORWARD_COMPATIBILITY_TAG: WARP_SIZE_DEPENDENT_CODE if (threadIdx.x < WARP_SIZE) { static_assert(BINS % WARP_SIZE == 0, "number of bins must be a multiple of warp size"); const int thread_id = threadIdx.x; const int inverse_lane_id = WARP_SIZE - thread_id - 1; int previous_group_first_element = 0; for (int iter = BINS / WARP_SIZE - 1; iter >= 0; iter--) { const index_type idx = iter * WARP_SIZE + thread_id; auto value = bins[idx]; for (int i = 1; i < WARP_SIZE; i *= 2) { auto n = __shfl_down_sync(0xFFFFFFFF, value, i); if (inverse_lane_id >= i) value += n; } value += previous_group_first_element; bins[idx] = value; previous_group_first_element = __shfl_sync(0xFFFFFFFF, value, 0); } } if (threadIdx.x == 0) kept_count[b] = 0; __syncthreads(); for (int c = 0; c < num_classes; c++) { if (c == background_class_id) continue; const auto indices = indices_.data() + (b * num_classes + c) * classwise_topK; const auto scores = scores_.data() + (b * num_classes + c) * num_priors; auto boxes = count[b * num_classes + c]; for (auto i : block_stride_range<BLOCK_SIZE>(boxes)) { auto prior_id = indices[i]; const float confidence = load_ldg(scores[prior_id]); if (confidence > threshold) { using device::fast_divide_ftz; auto conf_scaled = fast_divide_ftz(confidence - threshold, 1 - threshold); using device::clamp; int bin_index = conf_scaled * BINS; bin_index = clamp<int>(bin_index, 0, BINS - 1); const index_type idx = atomicAdd(&bins[bin_index], 1); if (idx < keepTopK) { kept_indices[b * keepTopK + idx] = c * num_priors + prior_id; atomicAdd(&kept_count[b], 1); } } } } } template <class T> __global__ void consolidate_detections(Span<T> output, View<int> kept_indices, View<int> kept_count, View<T> decoded_bboxes, View<T> scores, bool share_location, size_type batch_size, size_type num_classes, size_type num_priors, size_type keepTopK, DevicePtr<int> num_detections) { using vector_type = get_vector_type_t<T, 4>; auto decoded_bboxes_vPtr = vector_type::get_pointer(decoded_bboxes.data()); // output: [1, 1, batch_size * keepTopK, 7] // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // scores: [batch_size, num_classes, num_priors] for (int b = 0; b < batch_size; b++) { for (auto i : grid_stride_range(kept_count[b])) { auto score_id = kept_indices[b * keepTopK + i]; auto c = score_id / num_priors; auto prior_id = score_id % num_priors; const auto confidence = scores[b * num_classes * num_priors + score_id]; index_type bbox_id; if (share_location) { // decoded_bboxes: [batch_size, num_priors, 1, 4] bbox_id = b * num_priors + prior_id; } else { // decoded_bboxes: [batch_size, num_priors, num_classes, 4] bbox_id = (b * num_priors + prior_id) * num_classes + c; } vector_type bbox; v_load(bbox, decoded_bboxes_vPtr[bbox_id]); auto output_id = atomicAdd(num_detections.get(), 1); output[output_id * 7 + 0] = b; output[output_id * 7 + 1] = c; output[output_id * 7 + 2] = confidence; output[output_id * 7 + 3] = bbox.data[0]; output[output_id * 7 + 4] = bbox.data[1]; output[output_id * 7 + 5] = bbox.data[2]; output[output_id * 7 + 6] = bbox.data[3]; } } } } template <class T, bool SHARE_LOCATION, bool VARIANCE_ENCODED_IN_TARGET, bool CORNER_TRUE_CENTER_FALSE, bool CLIP_BBOX> static void launch_decode_boxes_kernel(const Stream& stream, Span<T> decoded_bboxes, View<T> locations, View<T> priors, bool transpose_location, bool normalized_bbox, size_type num_loc_classes, index_type background_class_id, float clip_width, float clip_height) { auto kernel = raw::decode_bbox<T, SHARE_LOCATION, VARIANCE_ENCODED_IN_TARGET, CORNER_TRUE_CENTER_FALSE, CLIP_BBOX>; auto policy = make_policy(kernel, decoded_bboxes.size() / 4, 0, stream); launch_kernel(kernel, policy, decoded_bboxes, locations, priors, transpose_location, normalized_bbox, num_loc_classes, background_class_id, clip_width, clip_height); } template <class T, unsigned int current, class ...Args> static typename std::enable_if<current == 0, void> ::type dispatch_decode_bboxes(int selector, Args&& ...args) { if(selector == 0) launch_decode_boxes_kernel<T, 0, 0, 0, 0>(std::forward<Args>(args)...); } template <class T, unsigned int current, class ...Args> static typename std::enable_if<current != 0, void> ::type dispatch_decode_bboxes(int selector, Args&& ...args) { if(selector == current) launch_decode_boxes_kernel<T, static_cast<bool>(current & 8), static_cast<bool>(current & 4), static_cast<bool>(current & 2), static_cast<bool>(current & 1)>(std::forward<Args>(args)...); else dispatch_decode_bboxes<T, current - 1, Args...>(selector, std::forward<Args>(args)...); } template <class T> void decode_bboxes(const Stream& stream, Span<T> output, View<T> locations, View<T> priors, std::size_t num_loc_classes, bool share_location, std::size_t background_class_id, bool transpose_location, bool variance_encoded_in_target, bool corner_true_or_center_false, bool normalized_bbox, bool clip_box, float clip_width, float clip_height) { /* `config` combines three kernel template options into one number using which a bit of TMP code can * run through all possible combinations and instantiate the correct template */ unsigned int config = (share_location << 3 | variance_encoded_in_target << 2 | corner_true_or_center_false << 1 | clip_box); dispatch_decode_bboxes<T, 15>(config, stream, output, locations, priors, transpose_location, normalized_bbox, num_loc_classes, background_class_id, clip_width, clip_height); } template void decode_bboxes(const Stream&, Span<__half>, View<__half>, View<__half>, std::size_t, bool, std::size_t, bool, bool, bool, bool, bool, float, float); template void decode_bboxes(const Stream&, Span<float>, View<float>, View<float>, std::size_t, bool, std::size_t, bool, bool, bool, bool, bool, float, float); template <class T> void findTopK(const Stream& stream, TensorSpan<int> indices, TensorSpan<int> count, TensorView<T> scores, std::size_t background_class_id, float threshold) { // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] const auto batch_size = indices.get_axis_size(0); CV_Assert(count.get_axis_size(0) == batch_size); CV_Assert(scores.get_axis_size(0) == batch_size); const auto num_classes = indices.get_axis_size(1); CV_Assert(count.get_axis_size(1) == num_classes); CV_Assert(scores.get_axis_size(1) == num_classes); const auto classwise_topK = indices.get_axis_size(2); const auto num_priors = scores.get_axis_size(2); /* each block processes one class from each batch */ constexpr auto BLOCK_SIZE = 256; dim3 grid_size(num_classes, batch_size); dim3 block_size(BLOCK_SIZE); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::findTopK<T, 2048, BLOCK_SIZE>; launch_kernel(kernel, policy, indices, count, scores, threshold, classwise_topK, num_classes, num_priors, background_class_id); } template void findTopK(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<__half>, std::size_t, float); template void findTopK(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<float>, std::size_t, float); template <class T> void box_collect(const Stream& stream, TensorSpan<T> collected_bboxes, TensorView<T> decoded_bboxes, TensorView<int> indices, TensorView<int> count, bool share_location, std::size_t background_class_id) { // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] const auto batch_size = collected_bboxes.get_axis_size(0); CV_Assert(decoded_bboxes.get_axis_size(0) == batch_size); CV_Assert(indices.get_axis_size(0) == batch_size); CV_Assert(count.get_axis_size(0) == batch_size); const auto num_classes = collected_bboxes.get_axis_size(1); CV_Assert(indices.get_axis_size(1) == num_classes); CV_Assert(count.get_axis_size(1) == num_classes); const auto classwise_topK = collected_bboxes.get_axis_size(2); CV_Assert(indices.get_axis_size(2) == classwise_topK); const auto num_priors = decoded_bboxes.get_axis_size(1); CV_Assert(!share_location || decoded_bboxes.get_axis_size(2) == 1); constexpr int BLOCK_SIZE = 256; /* each block processes one class from each batch */ dim3 grid_size(num_classes, batch_size); dim3 block_size(BLOCK_SIZE); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::box_collect<T>; launch_kernel(kernel, policy, collected_bboxes, decoded_bboxes, indices, count, share_location, num_priors, num_classes, classwise_topK, background_class_id); } template void box_collect(const Stream&, TensorSpan<float>, TensorView<float>, TensorView<int>, TensorView<int>, bool, std::size_t); template void box_collect(const Stream&, TensorSpan<__half>, TensorView<__half>, TensorView<int>, TensorView<int>, bool, std::size_t); template <class T> void blockwise_class_nms(const Stream& stream, TensorSpan<int> indices, TensorSpan<int> count, TensorView<T> collected_bboxes, bool normalized_bbox, std::size_t background_class_id, float nms_threshold) { // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // collected_bboxes: [batch_size, num_classes, classwise_topK, 4] const auto batch_size = indices.get_axis_size(0); CV_Assert(count.get_axis_size(0) == batch_size); CV_Assert(collected_bboxes.get_axis_size(0) == batch_size); const auto num_classes = indices.get_axis_size(1); CV_Assert(count.get_axis_size(1) == num_classes); CV_Assert(collected_bboxes.get_axis_size(1) == num_classes); const auto classwise_topK = indices.get_axis_size(2); CV_Assert(collected_bboxes.get_axis_size(2) == classwise_topK); /* each block processes one class from each batch */ auto num_blocks = batch_size * num_classes; auto num_threads = std::max<std::size_t>(std::min<std::size_t>(1024, classwise_topK), 32); dim3 grid_size(num_blocks); dim3 block_size(num_threads); auto policy = execution_policy(grid_size, block_size, stream); if (normalized_bbox) { auto kernel = raw::blockwise_class_nms<T, true>; launch_kernel(kernel, policy, indices, count, collected_bboxes, num_classes, classwise_topK, background_class_id, nms_threshold); } else { auto kernel = raw::blockwise_class_nms<T, false>; launch_kernel(kernel, policy, indices, count, collected_bboxes, num_classes, classwise_topK, background_class_id, nms_threshold); } } template void blockwise_class_nms(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<__half>, bool, std::size_t, float); template void blockwise_class_nms(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<float>, bool, std::size_t, float); template <class T> void nms_collect(const Stream& stream, TensorSpan<int> kept_indices, TensorSpan<int> kept_count, TensorView<int> indices, TensorView<int> count, TensorView<T> scores, float threshold, std::size_t background_class_id) { // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] // indices: [batch_size, num_classes, classwise_topK] // count: [batch_size, num_classes] // scores: [batch_size, num_classes, num_priors] auto batch_size = kept_indices.get_axis_size(0); CV_Assert(kept_count.get_axis_size(0) == batch_size); CV_Assert(indices.get_axis_size(0) == batch_size); CV_Assert(count.get_axis_size(0) == batch_size); CV_Assert(scores.get_axis_size(0) == batch_size); auto keepTopK = kept_indices.get_axis_size(1); auto num_classes = indices.get_axis_size(1); CV_Assert(count.get_axis_size(1) == num_classes); CV_Assert(scores.get_axis_size(1) == num_classes); auto classwise_topK = indices.get_axis_size(2); auto num_priors = scores.get_axis_size(2); auto num_blocks = batch_size; constexpr int BLOCK_SIZE = 1024; dim3 grid_size(num_blocks); dim3 block_size(BLOCK_SIZE); auto policy = execution_policy(grid_size, block_size, stream); auto kernel = raw::nms_collect<T, 1024, BLOCK_SIZE>; launch_kernel(kernel, policy, kept_indices, kept_count, indices, count, scores, threshold, num_classes, num_priors, classwise_topK, keepTopK, background_class_id); } template void nms_collect(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<int>, TensorView<int>, TensorView<__half>, float, std::size_t); template void nms_collect(const Stream&, TensorSpan<int>, TensorSpan<int>, TensorView<int>, TensorView<int>, TensorView<float>, float, std::size_t); template <class T> void consolidate_detections(const Stream& stream, TensorSpan<T> output, TensorView<int> kept_indices, TensorView<int> kept_count, TensorView<T> decoded_bboxes, TensorView<T> scores, bool share_location, DevicePtr<int> num_detections) { // output: [1, 1, batch_size * keepTopK, 7] // kept_indices: [batch_size, keepTopK] // kept_count: [batch_size] // decoded_bboxes: [batch_size, num_priors, num_loc_classes, 4] // scores: [batch_size, num_classes, num_priors] auto batch_size = kept_indices.get_axis_size(0); CV_Assert(kept_count.get_axis_size(0) == batch_size); CV_Assert(decoded_bboxes.get_axis_size(0) == batch_size); CV_Assert(scores.get_axis_size(0) == batch_size); auto keepTopK = kept_indices.get_axis_size(1); auto num_classes = scores.get_axis_size(1); auto num_priors = scores.get_axis_size(2); CV_Assert(batch_size * keepTopK * 7 == output.size()); auto kernel = raw::consolidate_detections<T>; auto policy = make_policy(kernel, keepTopK, 0, stream); launch_kernel(kernel, policy, output, kept_indices, kept_count, decoded_bboxes, scores, share_location, batch_size, num_classes, num_priors, keepTopK, num_detections); } template void consolidate_detections(const Stream&, TensorSpan<__half>, TensorView<int>, TensorView<int>, TensorView<__half>, TensorView<__half>, bool, DevicePtr<int>); template void consolidate_detections(const Stream&, TensorSpan<float>, TensorView<int>, TensorView<int>, TensorView<float>, TensorView<float>, bool, DevicePtr<int>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
ca9de848327f69171663dec9be706a7f64efb2e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void additionMatricesKernel(int* d_a, int* d_b, int* d_c) { // -:YOUR CODE HERE:- }
ca9de848327f69171663dec9be706a7f64efb2e1.cu
#include "includes.h" __global__ void additionMatricesKernel(int* d_a, int* d_b, int* d_c) { // -:YOUR CODE HERE:- }
92a02956119339109387e1eb27bcdf0924f546b5.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <math.h> namespace { template <typename scalar_t> __global__ void cvo_dense_angle_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts_info, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> grid_source, const torch::PackedTensorAccessor<bool,4,torch::RestrictPtrTraits,size_t> grid_valid, const int neighbor_range, const bool ignore_ib, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> y) { const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const int NN_sqrt = 2 * neighbor_range + 1; //dim3 block[N, NN, 1] const auto in = blockIdx.x * blockDim.x + threadIdx.x; const int innh = blockIdx.y / NN_sqrt - neighbor_range; const int innw = blockIdx.y % NN_sqrt - neighbor_range; if (in < N ){ const int u = pts[0][0][in]; const int v = pts[0][1][in]; int ib; if (ignore_ib){ ib = 0; } else{ ib = pts[0][2][in]; } if (u+innw >= 0 && u+innw < W && v+innh >= 0 && v+innh < H){ if (grid_valid[ib][0][v+innh][u+innw] > 0){ for (int ic = 0; ic < C; ic++){ y[0][blockIdx.y][in] += pts_info[0][ic][in] * grid_source[ib][ic][v+innh][u+innw] ; } } } } } template <typename scalar_t> __global__ void cvo_dense_angle_cuda_backward_kernel_dx( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dx1, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> dx2, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dy, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts_info, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> grid_source, const torch::PackedTensorAccessor<bool,4,torch::RestrictPtrTraits,size_t> grid_valid, const int neighbor_range, const bool ignore_ib, const int inn) { // dx1: 1*C*N // dx2: B*C*H*W // dy: 1*NN*N const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const auto NN = (2*neighbor_range+1)*(2*neighbor_range+1); const auto NN_sqrt = 2 * neighbor_range + 1; //dim3 block[N, C, 1] if (inn < NN){ const int in = blockIdx.x * blockDim.x + threadIdx.x; const int innh = inn / NN_sqrt - neighbor_range; const int innw = inn % NN_sqrt - neighbor_range; if (in < N ){ const int u = pts[0][0][in]; const int v = pts[0][1][in]; int ib; if (ignore_ib){ ib = 0; } else{ ib = pts[0][2][in]; } if (u+innw >= 0 && u+innw < W && v+innh >= 0 && v+innh < H){ if (grid_valid[ib][0][v+innh][u+innw] > 0){ dx1[0][blockIdx.y][in] += dy[0][inn][in] * grid_source[ib][blockIdx.y][v+innh][u+innw] ; dx2[ib][blockIdx.y][v+innh][u+innw] += dy[0][inn][in] * pts_info[0][blockIdx.y][in]; } } } } } } // namespace torch::Tensor cvo_dense_angle_cuda_forward( torch::Tensor pts, torch::Tensor pts_info, torch::Tensor grid_source, torch::Tensor grid_valid, int neighbor_range, bool ignore_ib ) { // pts: 1*2*N, pts_info: 1*C*N, grid_source: B*C*H*W (C could be xyz, rgb, ...), // grid_valid: B*1*H*W, neighbor_range: int const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const auto NN = (2*neighbor_range+1)*(2*neighbor_range+1); auto options = torch::TensorOptions().dtype(pts_info.dtype()).layout(torch::kStrided).device(pts_info.device()).requires_grad(true); auto y = torch::zeros({1, NN, N}, options); // printf("x1 device: %d \n", x1.device().type()); // printf("x1 index: %d \n", x1.device().index()); const int threads = 1024; // cannot parallize across channels, because it will case modifying the the location by multiple threads at the same time // const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); const dim3 blocks((N + threads - 1) / threads, NN); // const dim3 blocks(1, 1); int device_id = pts_info.device().index(); hipSetDevice(device_id); // AT_DISPATCH_FLOATING_TYPES // AT_DISPATCH_ALL_TYPES_AND_HALF AT_DISPATCH_FLOATING_TYPES(pts_info.type(), "cvo_dense_angle_forward_cuda", ([&] { hipLaunchKernelGGL(( cvo_dense_angle_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, pts.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), pts_info.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), grid_source.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), grid_valid.packed_accessor<bool,4,torch::RestrictPtrTraits,size_t>(), neighbor_range, ignore_ib, y.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>()); })); hipDeviceSynchronize(); return y; } std::vector<torch::Tensor> cvo_dense_angle_cuda_backward( torch::Tensor dy, torch::Tensor pts, torch::Tensor pts_info, torch::Tensor grid_source, torch::Tensor grid_valid, int neighbor_range, bool ignore_ib ) { // dy: 1*NN*N const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const auto NN = (2*neighbor_range+1)*(2*neighbor_range+1); auto dx1 = torch::zeros({1, C, N}, pts_info.device()); auto dx2 = torch::zeros({B, C, H, W}, pts_info.device()); const int threads = 1024; int device_id = pts_info.device().index(); hipSetDevice(device_id); const dim3 blocks_dx12(( N + threads - 1) / threads, C); for (int inn = 0; inn < NN; inn++){ AT_DISPATCH_FLOATING_TYPES(dy.type(), "cvo_dense_angle_backward_cuda_dx", ([&] { hipLaunchKernelGGL(( cvo_dense_angle_cuda_backward_kernel_dx<scalar_t>), dim3(blocks_dx12), dim3(threads), 0, 0, dx1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), dx2.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), dy.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), pts.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), pts_info.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), grid_source.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), grid_valid.packed_accessor<bool,4,torch::RestrictPtrTraits,size_t>(), neighbor_range, ignore_ib, inn); })); hipDeviceSynchronize(); } return {dx1, dx2}; }
92a02956119339109387e1eb27bcdf0924f546b5.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <math.h> namespace { template <typename scalar_t> __global__ void cvo_dense_angle_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts_info, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> grid_source, const torch::PackedTensorAccessor<bool,4,torch::RestrictPtrTraits,size_t> grid_valid, const int neighbor_range, const bool ignore_ib, torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> y) { const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const int NN_sqrt = 2 * neighbor_range + 1; //dim3 block[N, NN, 1] const auto in = blockIdx.x * blockDim.x + threadIdx.x; const int innh = blockIdx.y / NN_sqrt - neighbor_range; const int innw = blockIdx.y % NN_sqrt - neighbor_range; if (in < N ){ const int u = pts[0][0][in]; const int v = pts[0][1][in]; int ib; if (ignore_ib){ ib = 0; } else{ ib = pts[0][2][in]; } if (u+innw >= 0 && u+innw < W && v+innh >= 0 && v+innh < H){ if (grid_valid[ib][0][v+innh][u+innw] > 0){ for (int ic = 0; ic < C; ic++){ y[0][blockIdx.y][in] += pts_info[0][ic][in] * grid_source[ib][ic][v+innh][u+innw] ; } } } } } template <typename scalar_t> __global__ void cvo_dense_angle_cuda_backward_kernel_dx( torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dx1, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> dx2, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> dy, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts, const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> pts_info, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> grid_source, const torch::PackedTensorAccessor<bool,4,torch::RestrictPtrTraits,size_t> grid_valid, const int neighbor_range, const bool ignore_ib, const int inn) { // dx1: 1*C*N // dx2: B*C*H*W // dy: 1*NN*N const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const auto NN = (2*neighbor_range+1)*(2*neighbor_range+1); const auto NN_sqrt = 2 * neighbor_range + 1; //dim3 block[N, C, 1] if (inn < NN){ const int in = blockIdx.x * blockDim.x + threadIdx.x; const int innh = inn / NN_sqrt - neighbor_range; const int innw = inn % NN_sqrt - neighbor_range; if (in < N ){ const int u = pts[0][0][in]; const int v = pts[0][1][in]; int ib; if (ignore_ib){ ib = 0; } else{ ib = pts[0][2][in]; } if (u+innw >= 0 && u+innw < W && v+innh >= 0 && v+innh < H){ if (grid_valid[ib][0][v+innh][u+innw] > 0){ dx1[0][blockIdx.y][in] += dy[0][inn][in] * grid_source[ib][blockIdx.y][v+innh][u+innw] ; dx2[ib][blockIdx.y][v+innh][u+innw] += dy[0][inn][in] * pts_info[0][blockIdx.y][in]; } } } } } } // namespace torch::Tensor cvo_dense_angle_cuda_forward( torch::Tensor pts, torch::Tensor pts_info, torch::Tensor grid_source, torch::Tensor grid_valid, int neighbor_range, bool ignore_ib ) { // pts: 1*2*N, pts_info: 1*C*N, grid_source: B*C*H*W (C could be xyz, rgb, ...), // grid_valid: B*1*H*W, neighbor_range: int const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const auto NN = (2*neighbor_range+1)*(2*neighbor_range+1); auto options = torch::TensorOptions().dtype(pts_info.dtype()).layout(torch::kStrided).device(pts_info.device()).requires_grad(true); auto y = torch::zeros({1, NN, N}, options); // printf("x1 device: %d \n", x1.device().type()); // printf("x1 index: %d \n", x1.device().index()); const int threads = 1024; // cannot parallize across channels, because it will case modifying the the location by multiple threads at the same time // const dim3 blocks((n1 * n2 * channel_size + threads - 1) / threads, batch_size); const dim3 blocks((N + threads - 1) / threads, NN); // const dim3 blocks(1, 1); int device_id = pts_info.device().index(); cudaSetDevice(device_id); // AT_DISPATCH_FLOATING_TYPES // AT_DISPATCH_ALL_TYPES_AND_HALF AT_DISPATCH_FLOATING_TYPES(pts_info.type(), "cvo_dense_angle_forward_cuda", ([&] { cvo_dense_angle_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( pts.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), pts_info.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), grid_source.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), grid_valid.packed_accessor<bool,4,torch::RestrictPtrTraits,size_t>(), neighbor_range, ignore_ib, y.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>()); })); cudaDeviceSynchronize(); return y; } std::vector<torch::Tensor> cvo_dense_angle_cuda_backward( torch::Tensor dy, torch::Tensor pts, torch::Tensor pts_info, torch::Tensor grid_source, torch::Tensor grid_valid, int neighbor_range, bool ignore_ib ) { // dy: 1*NN*N const auto N = pts.size(2); const auto C = pts_info.size(1); const auto B = grid_source.size(0); const auto H = grid_source.size(2); const auto W = grid_source.size(3); const auto NN = (2*neighbor_range+1)*(2*neighbor_range+1); auto dx1 = torch::zeros({1, C, N}, pts_info.device()); auto dx2 = torch::zeros({B, C, H, W}, pts_info.device()); const int threads = 1024; int device_id = pts_info.device().index(); cudaSetDevice(device_id); const dim3 blocks_dx12(( N + threads - 1) / threads, C); for (int inn = 0; inn < NN; inn++){ AT_DISPATCH_FLOATING_TYPES(dy.type(), "cvo_dense_angle_backward_cuda_dx", ([&] { cvo_dense_angle_cuda_backward_kernel_dx<scalar_t><<<blocks_dx12, threads>>>( dx1.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), dx2.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), dy.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), pts.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), pts_info.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(), grid_source.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), grid_valid.packed_accessor<bool,4,torch::RestrictPtrTraits,size_t>(), neighbor_range, ignore_ib, inn); })); cudaDeviceSynchronize(); } return {dx1, dx2}; }
5fbd8ee483a27884f499e0274c6814bfd9dbc670.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "getPoses.h" #include <thrust/functional.h> #include <thrust/extrema.h> #include <thrust/count.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include "device_common.h" struct isLessTest { __host__ __device__ bool operator()(const thrust::tuple<float4, float2, bool>& a ) { return (thrust::get<2>(a) == false); }; }; __global__ void isLess_kernel(bool* isEasLess, float* Eas, const float threshold, const int numPoses) { const int tIdx = threadIdx.x; const int Idx = blockIdx.x * BLOCK_SIZE + tIdx; if (Idx >= numPoses) return; isEasLess[Idx] = (Eas[Idx] < threshold)? true : false; } thrust::device_vector<float>::iterator findMin(thrust::device_vector<float>* Eas) { return thrust::min_element(Eas->begin(), Eas->end()); } bool getPoses(thrust::device_vector<float4>* Poses4, thrust::device_vector<float2>* Poses2, thrust::device_vector<float>* Eas, float minEa, const float& delta, int* numPoses) { // get initial threhold const float thresh = 0.1869 * delta + 0.0161 - 0.002; minEa += thresh; // count reductions bool tooHighPercentage = false; bool first = true; int count = INT_MAX; thrust::device_vector<bool> isEasLess(*numPoses, false); const int BLOCK_NUM = (*numPoses - 1) / BLOCK_SIZE + 1; while (true) { hipLaunchKernelGGL(( isLess_kernel) , dim3(BLOCK_NUM), dim3(BLOCK_SIZE) , 0, 0, thrust::raw_pointer_cast(isEasLess.data()), thrust::raw_pointer_cast(Eas->data()), minEa, Eas->size()); count = thrust::count(isEasLess.begin(), isEasLess.end(), true); if (first) tooHighPercentage = (count / *numPoses > 0.1); if (count < 27000) { // cut poses4 and poses2 typedef thrust::tuple< thrust::device_vector< float4 >::iterator, thrust::device_vector< float2 >::iterator, thrust::device_vector< bool >::iterator > TupleIt; typedef thrust::zip_iterator< TupleIt > ZipIt; ZipIt Zend = thrust::remove_if( thrust::make_zip_iterator(thrust::make_tuple(Poses4->begin(), Poses2->begin(), isEasLess.begin())), thrust::make_zip_iterator(thrust::make_tuple(Poses4->end(), Poses2->end(), isEasLess.end())), isLessTest() ); Poses4->erase(thrust::get<0>(Zend.get_iterator_tuple()), Poses4->end()); Poses2->erase(thrust::get<1>(Zend.get_iterator_tuple()), Poses2->end()); *numPoses = count; break; } minEa *= 0.99; } return tooHighPercentage; }
5fbd8ee483a27884f499e0274c6814bfd9dbc670.cu
#include "getPoses.h" #include <thrust/functional.h> #include <thrust/extrema.h> #include <thrust/count.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include "device_common.h" struct isLessTest { __host__ __device__ bool operator()(const thrust::tuple<float4, float2, bool>& a ) { return (thrust::get<2>(a) == false); }; }; __global__ void isLess_kernel(bool* isEasLess, float* Eas, const float threshold, const int numPoses) { const int tIdx = threadIdx.x; const int Idx = blockIdx.x * BLOCK_SIZE + tIdx; if (Idx >= numPoses) return; isEasLess[Idx] = (Eas[Idx] < threshold)? true : false; } thrust::device_vector<float>::iterator findMin(thrust::device_vector<float>* Eas) { return thrust::min_element(Eas->begin(), Eas->end()); } bool getPoses(thrust::device_vector<float4>* Poses4, thrust::device_vector<float2>* Poses2, thrust::device_vector<float>* Eas, float minEa, const float& delta, int* numPoses) { // get initial threhold const float thresh = 0.1869 * delta + 0.0161 - 0.002; minEa += thresh; // count reductions bool tooHighPercentage = false; bool first = true; int count = INT_MAX; thrust::device_vector<bool> isEasLess(*numPoses, false); const int BLOCK_NUM = (*numPoses - 1) / BLOCK_SIZE + 1; while (true) { isLess_kernel <<< BLOCK_NUM, BLOCK_SIZE >>> (thrust::raw_pointer_cast(isEasLess.data()), thrust::raw_pointer_cast(Eas->data()), minEa, Eas->size()); count = thrust::count(isEasLess.begin(), isEasLess.end(), true); if (first) tooHighPercentage = (count / *numPoses > 0.1); if (count < 27000) { // cut poses4 and poses2 typedef thrust::tuple< thrust::device_vector< float4 >::iterator, thrust::device_vector< float2 >::iterator, thrust::device_vector< bool >::iterator > TupleIt; typedef thrust::zip_iterator< TupleIt > ZipIt; ZipIt Zend = thrust::remove_if( thrust::make_zip_iterator(thrust::make_tuple(Poses4->begin(), Poses2->begin(), isEasLess.begin())), thrust::make_zip_iterator(thrust::make_tuple(Poses4->end(), Poses2->end(), isEasLess.end())), isLessTest() ); Poses4->erase(thrust::get<0>(Zend.get_iterator_tuple()), Poses4->end()); Poses2->erase(thrust::get<1>(Zend.get_iterator_tuple()), Poses2->end()); *numPoses = count; break; } minEa *= 0.99; } return tooHighPercentage; }
6a8c80aa6d91c435bf723717dbe90a7ea3f96895.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/send_ue_recv_kernel.h" #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" #include "paddle/phi/kernels/gpu/graph_send_ue_recv_funcs.h" #include "paddle/phi/kernels/impl/graph_message_passing_impl.h" namespace phi { template <typename Context, typename T, typename IndexT> void GraphSendUERecvOpCUDAKernelLaunchHelper(const Context& ctx, const DenseTensor& x, const DenseTensor& e, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& message_op, const std::string& reduce_op, int64_t out_size, DenseTensor* out, DenseTensor* dst_count = nullptr) { const int& index_size = src_index.dims()[0]; auto out_dims = out->dims(); int64_t memset_size = 1; std::vector<int64_t> dims_ = phi::vectorize(out_dims); if (out_size <= 0) { dims_[0] = x.dims()[0]; } else { dims_[0] = out_size; } out->Resize(phi::make_ddim(dims_)); for (size_t i = 0; i < dims_.size(); i++) { memset_size *= dims_[i]; } ctx.template Alloc<T>(out); T* out_data = out->data<T>(); const size_t& memset_bytes = memset_size * sizeof(T); if (reduce_op == "SUM" || reduce_op == "MEAN") { #ifdef PADDLE_WITH_HIP hipMemset(out_data, 0, memset_bytes); #else hipMemset(out_data, 0, memset_bytes); #endif } else if (reduce_op == "MAX") { thrust::device_ptr<T> out_data_ptr(out_data); thrust::fill(thrust::device, out_data_ptr, out_data_ptr + memset_size, std::numeric_limits<T>::lowest()); } else if (reduce_op == "MIN") { thrust::device_ptr<T> out_data_ptr(out_data); thrust::fill(thrust::device, out_data_ptr, out_data_ptr + memset_size, std::numeric_limits<T>::max()); } if (index_size == 0) return; const auto& bcast_info = phi::CalcBCastInfo(x.dims(), e.dims()); const T* x_data = x.data<T>(); const T* e_data = e.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); thrust::device_vector<int64_t> x_bcastoff, e_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, &x_bcastoff, &e_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid(nbx, nby); const dim3 block(ntx, nty); int64_t input_size = x.dims()[0]; #ifdef PADDLE_WITH_HIP int block_ = 256; #else int block_ = 1024; #endif if (reduce_op == "SUM" || reduce_op == "MEAN") { GraphSendUERecvSumCUDAFunctor<T> sum_functor; if (message_op == "ADD") { funcs::AddFunctor<T> add_funtor; hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::AddFunctor<T>>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, add_funtor, sum_functor); } else if (message_op == "MUL") { funcs::MultiplyFunctor<T> mul_functor; hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::MultiplyFunctor<T>>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, sum_functor); } if (reduce_op == "MEAN") { input_size = out_size <= 0 ? x.dims()[0] : out_size; dst_count->Resize({input_size}); ctx.template Alloc<int>(dst_count); int* dst_count_data = dst_count->data<int>(); #ifdef PADDLE_WITH_HIP hipMemset(dst_count_data, 0, input_size * sizeof(int)); #else hipMemset(dst_count_data, 0, input_size * sizeof(int)); #endif int64_t grid_count = (index_size + block_ - 1) / block_; hipLaunchKernelGGL(( ComputeCountCUDAKernel<T, IndexT>) , dim3(grid_count), dim3(block_), 0, ctx.stream(), dst_count_data, d_index, index_size); int64_t grid_mean = (input_size * out_len + block_ - 1) / block_; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_mean_ = grid_mean < max_grid_dimx ? grid_mean : max_grid_dimx; hipLaunchKernelGGL(( ManipulateMeanCUDAKernel<T>), dim3(grid_mean_), dim3(block_), 0, ctx.stream(), out_data, dst_count_data, input_size, out_len); } } else if (reduce_op == "MAX") { GraphSendUERecvMaxCUDAFunctor<T> max_functor; if (message_op == "ADD") { funcs::AddFunctor<T> add_funtor; hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMaxCUDAFunctor<T>, funcs::AddFunctor<T>>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, add_funtor, max_functor); } else if (message_op == "MUL") { funcs::MultiplyFunctor<T> mul_functor; hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMaxCUDAFunctor<T>, funcs::MultiplyFunctor<T>>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, max_functor); } if (out_size > 0) { input_size = out_size; } int64_t grid_max = (input_size * out_len + block_ - 1) / block_; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_max_ = grid_max < max_grid_dimx ? grid_max : max_grid_dimx; hipLaunchKernelGGL(( InputResetMaxCUDAKernel<T>) , dim3(grid_max_), dim3(block_), 0, ctx.stream(), out_data, input_size, out_len); } else if (reduce_op == "MIN") { GraphSendUERecvMinCUDAFunctor<T> min_functor; if (message_op == "ADD") { funcs::AddFunctor<T> add_funtor; hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMinCUDAFunctor<T>, funcs::AddFunctor<T>>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, add_funtor, min_functor); } else if (message_op == "MUL") { funcs::MultiplyFunctor<T> mul_functor; hipLaunchKernelGGL(( GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMinCUDAFunctor<T>, funcs::MultiplyFunctor<T>>) , dim3(grid), dim3(block), 0, ctx.stream(), x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, min_functor); } if (out_size > 0) { input_size = out_size; } int64_t grid_min = (input_size * out_len + block_ - 1) / block_; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_min_ = grid_min < max_grid_dimx ? grid_min : max_grid_dimx; hipLaunchKernelGGL(( InputResetMinCUDAKernel<T>) , dim3(grid_min_), dim3(block_), 0, ctx.stream(), out_data, input_size, out_len); } } template <typename T, typename Context> void SendUERecvKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& message_op, const std::string& reduce_op, const IntArray& out_size, DenseTensor* out, DenseTensor* dst_count) { auto index_type = src_index.dtype(); auto& out_size_data = out_size.GetData(); if (index_type == phi::DataType::INT32) { GraphSendUERecvOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, x, y, src_index, dst_index, message_op, reduce_op, out_size_data[0], out, dst_count); } else if (index_type == phi::DataType::INT64) { GraphSendUERecvOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, x, y, src_index, dst_index, message_op, reduce_op, out_size_data[0], out, dst_count); } } } // namespace phi PD_REGISTER_KERNEL(send_ue_recv, GPU, ALL_LAYOUT, phi::SendUERecvKernel, float, double, int, int64_t, phi::dtype::float16) {}
6a8c80aa6d91c435bf723717dbe90a7ea3f96895.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/send_ue_recv_kernel.h" #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/hostdevice.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/elementwise_functor.h" #include "paddle/phi/kernels/gpu/graph_send_recv_funcs.h" #include "paddle/phi/kernels/gpu/graph_send_ue_recv_funcs.h" #include "paddle/phi/kernels/impl/graph_message_passing_impl.h" namespace phi { template <typename Context, typename T, typename IndexT> void GraphSendUERecvOpCUDAKernelLaunchHelper(const Context& ctx, const DenseTensor& x, const DenseTensor& e, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& message_op, const std::string& reduce_op, int64_t out_size, DenseTensor* out, DenseTensor* dst_count = nullptr) { const int& index_size = src_index.dims()[0]; auto out_dims = out->dims(); int64_t memset_size = 1; std::vector<int64_t> dims_ = phi::vectorize(out_dims); if (out_size <= 0) { dims_[0] = x.dims()[0]; } else { dims_[0] = out_size; } out->Resize(phi::make_ddim(dims_)); for (size_t i = 0; i < dims_.size(); i++) { memset_size *= dims_[i]; } ctx.template Alloc<T>(out); T* out_data = out->data<T>(); const size_t& memset_bytes = memset_size * sizeof(T); if (reduce_op == "SUM" || reduce_op == "MEAN") { #ifdef PADDLE_WITH_HIP hipMemset(out_data, 0, memset_bytes); #else cudaMemset(out_data, 0, memset_bytes); #endif } else if (reduce_op == "MAX") { thrust::device_ptr<T> out_data_ptr(out_data); thrust::fill(thrust::device, out_data_ptr, out_data_ptr + memset_size, std::numeric_limits<T>::lowest()); } else if (reduce_op == "MIN") { thrust::device_ptr<T> out_data_ptr(out_data); thrust::fill(thrust::device, out_data_ptr, out_data_ptr + memset_size, std::numeric_limits<T>::max()); } if (index_size == 0) return; const auto& bcast_info = phi::CalcBCastInfo(x.dims(), e.dims()); const T* x_data = x.data<T>(); const T* e_data = e.data<T>(); const IndexT* s_index = src_index.data<IndexT>(); const IndexT* d_index = dst_index.data<IndexT>(); thrust::device_vector<int64_t> x_bcastoff, e_bcastoff; if (bcast_info.use_bcast) { CopyBCastOff(bcast_info, &x_bcastoff, &e_bcastoff); } int64_t out_len = bcast_info.out_len; const int ntx = FindNumThreads(out_len, ctx.GetMaxThreadsPerBlock()); const int nty = ctx.GetMaxThreadsPerBlock() / ntx; const int nbx = (out_len + ntx - 1) / ntx; const int nby = FindNumBlocks('y', (index_size + nty - 1) / nty); const dim3 grid(nbx, nby); const dim3 block(ntx, nty); int64_t input_size = x.dims()[0]; #ifdef PADDLE_WITH_HIP int block_ = 256; #else int block_ = 1024; #endif if (reduce_op == "SUM" || reduce_op == "MEAN") { GraphSendUERecvSumCUDAFunctor<T> sum_functor; if (message_op == "ADD") { funcs::AddFunctor<T> add_funtor; GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::AddFunctor<T>> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, add_funtor, sum_functor); } else if (message_op == "MUL") { funcs::MultiplyFunctor<T> mul_functor; GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvSumCUDAFunctor<T>, funcs::MultiplyFunctor<T>> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, sum_functor); } if (reduce_op == "MEAN") { input_size = out_size <= 0 ? x.dims()[0] : out_size; dst_count->Resize({input_size}); ctx.template Alloc<int>(dst_count); int* dst_count_data = dst_count->data<int>(); #ifdef PADDLE_WITH_HIP hipMemset(dst_count_data, 0, input_size * sizeof(int)); #else cudaMemset(dst_count_data, 0, input_size * sizeof(int)); #endif int64_t grid_count = (index_size + block_ - 1) / block_; ComputeCountCUDAKernel<T, IndexT> <<<grid_count, block_, 0, ctx.stream()>>>( dst_count_data, d_index, index_size); int64_t grid_mean = (input_size * out_len + block_ - 1) / block_; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_mean_ = grid_mean < max_grid_dimx ? grid_mean : max_grid_dimx; ManipulateMeanCUDAKernel<T><<<grid_mean_, block_, 0, ctx.stream()>>>( out_data, dst_count_data, input_size, out_len); } } else if (reduce_op == "MAX") { GraphSendUERecvMaxCUDAFunctor<T> max_functor; if (message_op == "ADD") { funcs::AddFunctor<T> add_funtor; GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMaxCUDAFunctor<T>, funcs::AddFunctor<T>> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, add_funtor, max_functor); } else if (message_op == "MUL") { funcs::MultiplyFunctor<T> mul_functor; GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMaxCUDAFunctor<T>, funcs::MultiplyFunctor<T>> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, max_functor); } if (out_size > 0) { input_size = out_size; } int64_t grid_max = (input_size * out_len + block_ - 1) / block_; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_max_ = grid_max < max_grid_dimx ? grid_max : max_grid_dimx; InputResetMaxCUDAKernel<T> <<<grid_max_, block_, 0, ctx.stream()>>>(out_data, input_size, out_len); } else if (reduce_op == "MIN") { GraphSendUERecvMinCUDAFunctor<T> min_functor; if (message_op == "ADD") { funcs::AddFunctor<T> add_funtor; GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMinCUDAFunctor<T>, funcs::AddFunctor<T>> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, add_funtor, min_functor); } else if (message_op == "MUL") { funcs::MultiplyFunctor<T> mul_functor; GraphSendUERecvCUDAKernel<T, IndexT, GraphSendUERecvMinCUDAFunctor<T>, funcs::MultiplyFunctor<T>> <<<grid, block, 0, ctx.stream()>>>( x_data, e_data, s_index, d_index, thrust::raw_pointer_cast(x_bcastoff.data()), thrust::raw_pointer_cast(e_bcastoff.data()), out_data, index_size, bcast_info.l_len, bcast_info.r_len, out_len, bcast_info.use_bcast, mul_functor, min_functor); } if (out_size > 0) { input_size = out_size; } int64_t grid_min = (input_size * out_len + block_ - 1) / block_; int64_t max_grid_dimx = ctx.GetCUDAMaxGridDimSize()[0]; int64_t grid_min_ = grid_min < max_grid_dimx ? grid_min : max_grid_dimx; InputResetMinCUDAKernel<T> <<<grid_min_, block_, 0, ctx.stream()>>>(out_data, input_size, out_len); } } template <typename T, typename Context> void SendUERecvKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& y, const DenseTensor& src_index, const DenseTensor& dst_index, const std::string& message_op, const std::string& reduce_op, const IntArray& out_size, DenseTensor* out, DenseTensor* dst_count) { auto index_type = src_index.dtype(); auto& out_size_data = out_size.GetData(); if (index_type == phi::DataType::INT32) { GraphSendUERecvOpCUDAKernelLaunchHelper<Context, T, int32_t>( ctx, x, y, src_index, dst_index, message_op, reduce_op, out_size_data[0], out, dst_count); } else if (index_type == phi::DataType::INT64) { GraphSendUERecvOpCUDAKernelLaunchHelper<Context, T, int64_t>( ctx, x, y, src_index, dst_index, message_op, reduce_op, out_size_data[0], out, dst_count); } } } // namespace phi PD_REGISTER_KERNEL(send_ue_recv, GPU, ALL_LAYOUT, phi::SendUERecvKernel, float, double, int, int64_t, phi::dtype::float16) {}
826a7c9215b51c1aab75cb0a9a35cc14b45ace94.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <unistd.h> #include "utils.h" #define MAX_MSG_SIZE 64 * 1024 #define THREADS_PER_WARP 32 #define THREADS_PER_BLOCK 1024 __global__ void latency(volatile int *data_d, volatile int *flag_d, int len, int pe, int iter, int skip) { long long int start, stop; double usec, time; int i, tid, peer; peer = !pe; tid = threadIdx.x; for (i = 0; i < (iter + skip); i++) { if (i == skip) { nvshmem_quiet(); start = clock64(); } nvshmem_int_get_nbi((int *)data_d, (int *)data_d, len, peer); nvshmem_quiet(); } stop = clock64(); if (!tid) { time = (stop - start) / iter; usec = time * 1000 / clockrate; printf("%7lu \t %8.2f \n", len * sizeof(int), usec); } } #define LATENCY_THREADGROUP(group) \ __global__ void latency_##group(volatile int *data_d, volatile int *flag_d, int len, int pe, \ int iter, int skip) { \ long long int start, stop; \ double usec, time; \ int i, tid, peer; \ \ peer = !pe; \ tid = threadIdx.x; \ \ for (i = 0; i < (iter + skip); i++) { \ if (i == skip) { \ __syncthreads(); \ if (!tid) { \ nvshmem_quiet(); \ start = clock64(); \ } \ __syncthreads(); \ } \ \ nvshmemx_int_get_nbi_##group((int *)data_d, (int *)data_d, len, peer); \ \ __syncthreads(); \ if (!tid) nvshmem_quiet(); \ __syncthreads(); \ } \ \ if (!tid) { \ stop = clock64(); \ time = (stop - start) / iter; \ usec = time * 1000 / clockrate; \ printf("%7lu \t %8.2f \n", len * sizeof(int), usec); \ } \ } LATENCY_THREADGROUP(warp) LATENCY_THREADGROUP(block) int main(int c, char *v[]) { int mype, npes, size; int *flag_d = NULL, *data_d = NULL; int iter = 200; int skip = 20; int max_msg_size = MAX_MSG_SIZE; init_wrapper(&c, &v); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } data_d = (int *)nvshmem_malloc(max_msg_size); flag_d = (int *)nvshmem_malloc(sizeof(int)); CUDA_CHECK(hipMemset(data_d, 0, max_msg_size)); CUDA_CHECK(hipMemset(flag_d, 0, sizeof(int))); nvshmem_barrier_all(); CUDA_CHECK(hipDeviceSynchronize()); if (mype == 0) { printf(" Get with thread \n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { if (!mype) { int nelems; nelems = size / sizeof(int); hipLaunchKernelGGL(( latency), dim3(1), dim3(1), 0, 0, data_d, flag_d, nelems, mype, iter, skip); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } nvshmem_barrier_all(); } if (mype == 0) { printf(" Get with warp \n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { if (!mype) { int nelems; nelems = size / sizeof(int); hipLaunchKernelGGL(( latency_warp), dim3(1), dim3(THREADS_PER_WARP), 0, 0, data_d, flag_d, nelems, mype, iter, skip); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } nvshmem_barrier_all(); } if (mype == 0) { printf(" Get with block \n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { if (!mype) { int nelems; nelems = size / sizeof(int); hipLaunchKernelGGL(( latency_block), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, data_d, flag_d, nelems, mype, iter, skip); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } nvshmem_barrier_all(); } finalize: if (data_d) nvshmem_free(data_d); if (flag_d) nvshmem_free(flag_d); finalize_wrapper(); return 0; }
826a7c9215b51c1aab75cb0a9a35cc14b45ace94.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * * See COPYRIGHT.txt for license information */ #include <stdio.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #include <unistd.h> #include "utils.h" #define MAX_MSG_SIZE 64 * 1024 #define THREADS_PER_WARP 32 #define THREADS_PER_BLOCK 1024 __global__ void latency(volatile int *data_d, volatile int *flag_d, int len, int pe, int iter, int skip) { long long int start, stop; double usec, time; int i, tid, peer; peer = !pe; tid = threadIdx.x; for (i = 0; i < (iter + skip); i++) { if (i == skip) { nvshmem_quiet(); start = clock64(); } nvshmem_int_get_nbi((int *)data_d, (int *)data_d, len, peer); nvshmem_quiet(); } stop = clock64(); if (!tid) { time = (stop - start) / iter; usec = time * 1000 / clockrate; printf("%7lu \t %8.2f \n", len * sizeof(int), usec); } } #define LATENCY_THREADGROUP(group) \ __global__ void latency_##group(volatile int *data_d, volatile int *flag_d, int len, int pe, \ int iter, int skip) { \ long long int start, stop; \ double usec, time; \ int i, tid, peer; \ \ peer = !pe; \ tid = threadIdx.x; \ \ for (i = 0; i < (iter + skip); i++) { \ if (i == skip) { \ __syncthreads(); \ if (!tid) { \ nvshmem_quiet(); \ start = clock64(); \ } \ __syncthreads(); \ } \ \ nvshmemx_int_get_nbi_##group((int *)data_d, (int *)data_d, len, peer); \ \ __syncthreads(); \ if (!tid) nvshmem_quiet(); \ __syncthreads(); \ } \ \ if (!tid) { \ stop = clock64(); \ time = (stop - start) / iter; \ usec = time * 1000 / clockrate; \ printf("%7lu \t %8.2f \n", len * sizeof(int), usec); \ } \ } LATENCY_THREADGROUP(warp) LATENCY_THREADGROUP(block) int main(int c, char *v[]) { int mype, npes, size; int *flag_d = NULL, *data_d = NULL; int iter = 200; int skip = 20; int max_msg_size = MAX_MSG_SIZE; init_wrapper(&c, &v); mype = nvshmem_my_pe(); npes = nvshmem_n_pes(); if (npes != 2) { fprintf(stderr, "This test requires exactly two processes \n"); goto finalize; } data_d = (int *)nvshmem_malloc(max_msg_size); flag_d = (int *)nvshmem_malloc(sizeof(int)); CUDA_CHECK(cudaMemset(data_d, 0, max_msg_size)); CUDA_CHECK(cudaMemset(flag_d, 0, sizeof(int))); nvshmem_barrier_all(); CUDA_CHECK(cudaDeviceSynchronize()); if (mype == 0) { printf(" Get with thread \n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { if (!mype) { int nelems; nelems = size / sizeof(int); latency<<<1, 1>>>(data_d, flag_d, nelems, mype, iter, skip); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } nvshmem_barrier_all(); } if (mype == 0) { printf(" Get with warp \n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { if (!mype) { int nelems; nelems = size / sizeof(int); latency_warp<<<1, THREADS_PER_WARP>>>(data_d, flag_d, nelems, mype, iter, skip); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } nvshmem_barrier_all(); } if (mype == 0) { printf(" Get with block \n"); printf(" size(bytes) \t latency(us)\n"); fflush(stdout); } for (size = sizeof(int); size <= max_msg_size; size *= 2) { if (!mype) { int nelems; nelems = size / sizeof(int); latency_block<<<1, THREADS_PER_BLOCK>>>(data_d, flag_d, nelems, mype, iter, skip); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } nvshmem_barrier_all(); } finalize: if (data_d) nvshmem_free(data_d); if (flag_d) nvshmem_free(flag_d); finalize_wrapper(); return 0; }
8be8a31335ee010f7e6cbbf72e1f3ca22bd7eb52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <algorithm> #include <bitset> #include <string> #include <sstream> #include <set> #include "xgboost/logging.h" #include "xgboost/span.h" #include "constraints.cuh" #include "param.h" #include "../common/device_helpers.cuh" namespace xgboost { size_t FeatureInteractionConstraint::Features() const { return d_sets_ptr_.size() - 1; } void FeatureInteractionConstraint::Configure( tree::TrainParam const& param, int32_t const n_features) { has_constraint_ = true; if (param.interaction_constraints.length() == 0) { has_constraint_ = false; return; } // --- Parse interaction constraints std::istringstream iss(param.interaction_constraints); dmlc::JSONReader reader(&iss); // Interaction constraints parsed from string parameter. After // parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}. std::vector<std::vector<int32_t>> h_feature_constraints; try { reader.Read(&h_feature_constraints); } catch (dmlc::Error const& e) { LOG(FATAL) << "Failed to parse feature interaction constraint:\n" << param.interaction_constraints << "\n" << "With error:\n" << e.what(); } n_sets_ = h_feature_constraints.size(); size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features); if (n_feat_storage == 0 && n_features != 0) { LOG(FATAL) << "Wrong storage size, n_features: " << n_features; } // --- Initialize allowed features attached to nodes. if (param.max_depth == 0 && param.max_leaves == 0) { LOG(FATAL) << "Max leaves and max depth cannot both be unconstrained for gpu_hist."; } int32_t n_nodes {0}; if (param.max_depth != 0) { n_nodes = ::pow(2, param.max_depth + 1); } else { n_nodes = param.max_leaves * 2 - 1; } CHECK_NE(n_nodes, 0); node_constraints_.resize(n_nodes); node_constraints_storage_.resize(n_nodes); for (auto& n : node_constraints_storage_) { n.resize(LBitField64::ComputeStorageSize(n_features)); } for (size_t i = 0; i < node_constraints_storage_.size(); ++i) { auto span = dh::ToSpan(node_constraints_storage_[i]); node_constraints_[i] = LBitField64(span); } s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(), node_constraints_.size()); // Represent constraints as CSR format, flatten is the value vector, // ptr is row_ptr vector in CSR. std::vector<int32_t> h_feature_constraints_flatten; for (auto const& constraints : h_feature_constraints) { for (int32_t c : constraints) { h_feature_constraints_flatten.emplace_back(c); } } std::vector<int32_t> h_feature_constraints_ptr; size_t n_features_in_constraints = 0; h_feature_constraints_ptr.emplace_back(n_features_in_constraints); for (auto const& v : h_feature_constraints) { n_features_in_constraints += v.size(); h_feature_constraints_ptr.emplace_back(n_features_in_constraints); } // Copy the CSR to device. d_fconstraints_.resize(h_feature_constraints_flatten.size()); thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(), d_fconstraints_.begin()); s_fconstraints_ = dh::ToSpan(d_fconstraints_); d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size()); thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(), d_fconstraints_ptr_.begin()); s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_); // --- Compute interaction sets attached to each feature. // Use a set to eliminate duplicated entries. std::vector<std::set<int32_t> > h_features_set(n_features); int32_t cid = 0; for (auto const& constraints : h_feature_constraints) { for (auto const& feat : constraints) { h_features_set.at(feat).insert(cid); } cid++; } // Compute device sets. std::vector<int32_t> h_sets; int32_t ptr = 0; std::vector<int32_t> h_sets_ptr {ptr}; for (auto const& feature : h_features_set) { for (auto constraint_id : feature) { h_sets.emplace_back(constraint_id); } // empty set is well defined here. ptr += feature.size(); h_sets_ptr.emplace_back(ptr); } d_sets_ = h_sets; d_sets_ptr_ = h_sets_ptr; s_sets_ = dh::ToSpan(d_sets_); s_sets_ptr_ = dh::ToSpan(d_sets_ptr_); d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features)); feature_buffer_ = dh::ToSpan(d_feature_buffer_storage_); // --- Initialize result buffers. output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_)); input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_)); result_buffer_.resize(n_features); s_result_buffer_ = dh::ToSpan(result_buffer_); } FeatureInteractionConstraint::FeatureInteractionConstraint( tree::TrainParam const& param, int32_t const n_features) : has_constraint_{true}, n_sets_{0} { this->Configure(param, n_features); } void FeatureInteractionConstraint::Reset() { for (auto& node : node_constraints_storage_) { thrust::fill(node.begin(), node.end(), 0); } } __global__ void ClearBuffersKernel( LBitField64 result_buffer_output, LBitField64 result_buffer_input) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < result_buffer_output.Size()) { result_buffer_output.Clear(tid); } if (tid < result_buffer_input.Size()) { result_buffer_input.Clear(tid); } } void FeatureInteractionConstraint::ClearBuffers() { CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size()); CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads)); hipLaunchKernelGGL(( ClearBuffersKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, output_buffer_bits_, input_buffer_bits_); } common::Span<int32_t> FeatureInteractionConstraint::QueryNode(int32_t node_id) { if (!has_constraint_) { return {}; } CHECK_LT(node_id, s_node_constraints_.size()); ClearBuffers(); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); auto p_result_buffer = result_buffer_.data(); LBitField64 node_constraints = s_node_constraints_[node_id]; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, p_result_buffer, [=]__device__(int32_t pos) { bool res = node_constraints.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); return {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; } __global__ void SetInputBufferKernel(common::Span<int32_t> feature_list_input, LBitField64 result_buffer_input) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < feature_list_input.size()) { result_buffer_input.Set(feature_list_input[tid]); } } __global__ void QueryFeatureListKernel(LBitField64 node_constraints, LBitField64 result_buffer_input, LBitField64 result_buffer_output) { result_buffer_output |= node_constraints; result_buffer_output &= result_buffer_input; } common::Span<int32_t> FeatureInteractionConstraint::Query( common::Span<int32_t> feature_list, int32_t nid) { if (!has_constraint_ || nid == 0) { return feature_list; } ClearBuffers(); LBitField64 node_constraints = s_node_constraints_[nid]; CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads)); hipLaunchKernelGGL(( SetInputBufferKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, feature_list, input_buffer_bits_); hipLaunchKernelGGL(( QueryFeatureListKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, node_constraints, input_buffer_bits_, output_buffer_bits_); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); LBitField64 local_result_buffer = output_buffer_bits_; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, result_buffer_.data(), [=]__device__(int32_t pos) { bool res = local_result_buffer.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); common::Span<int32_t> result = {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; return result; } // Find interaction sets for each feature, then store all features in // those sets in a buffer. __global__ void RestoreFeatureListFromSetsKernel( LBitField64 feature_buffer, int32_t fid, common::Span<int32_t> feature_interactions, common::Span<int32_t> feature_interactions_ptr, // of size n interaction set + 1 common::Span<int32_t> interactions_list, common::Span<int32_t> interactions_list_ptr) { auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x; auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y; // painful mapping: fid -> sets related to it -> features related to sets. auto const beg = interactions_list_ptr[fid]; auto const end = interactions_list_ptr[fid+1]; auto const n_sets = end - beg; if (tid_x < n_sets) { auto const set_id_pos = beg + tid_x; auto const set_id = interactions_list[set_id_pos]; auto const set_beg = feature_interactions_ptr[set_id]; auto const set_end = feature_interactions_ptr[set_id + 1]; auto const feature_pos = set_beg + tid_y; if (feature_pos < set_end) { feature_buffer.Set(feature_interactions[feature_pos]); } } } __global__ void InteractionConstraintSplitKernel(LBitField64 feature, int32_t feature_id, LBitField64 node, LBitField64 left, LBitField64 right) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid > node.Size()) { return; } // enable constraints from feature node |= feature; // clear the buffer after use if (tid < feature.Size()) { feature.Clear(tid); } // enable constraints from parent left |= node; right |= node; if (tid == feature_id) { // enable the split feature, set all of them at last instead of // setting it for parent to avoid race. node.Set(feature_id); left.Set(feature_id); right.Set(feature_id); } } void FeatureInteractionConstraint::Split( int32_t node_id, int32_t feature_id, int32_t left_id, int32_t right_id) { if (!has_constraint_) { return; } CHECK_NE(node_id, left_id) << " Split node: " << node_id << " and its left child: " << left_id << " cannot be the same."; CHECK_NE(node_id, right_id) << " Split node: " << node_id << " and its left child: " << right_id << " cannot be the same."; CHECK_LT(right_id, s_node_constraints_.size()); CHECK_NE(s_node_constraints_.size(), 0); LBitField64 node = s_node_constraints_[node_id]; LBitField64 left = s_node_constraints_[left_id]; LBitField64 right = s_node_constraints_[right_id]; dim3 const block3(16, 64, 1); dim3 const grid3(common::DivRoundUp(n_sets_, 16), common::DivRoundUp(s_fconstraints_.size(), 64)); hipLaunchKernelGGL(( RestoreFeatureListFromSetsKernel), dim3(grid3), dim3(block3), 0, 0, feature_buffer_, feature_id, s_fconstraints_, s_fconstraints_ptr_, s_sets_, s_sets_ptr_); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>(common::DivRoundUp(node.Size(), kBlockThreads)); hipLaunchKernelGGL(( InteractionConstraintSplitKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, feature_buffer_, feature_id, node, left, right); } } // namespace xgboost
8be8a31335ee010f7e6cbbf72e1f3ca22bd7eb52.cu
/*! * Copyright 2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <algorithm> #include <bitset> #include <string> #include <sstream> #include <set> #include "xgboost/logging.h" #include "xgboost/span.h" #include "constraints.cuh" #include "param.h" #include "../common/device_helpers.cuh" namespace xgboost { size_t FeatureInteractionConstraint::Features() const { return d_sets_ptr_.size() - 1; } void FeatureInteractionConstraint::Configure( tree::TrainParam const& param, int32_t const n_features) { has_constraint_ = true; if (param.interaction_constraints.length() == 0) { has_constraint_ = false; return; } // --- Parse interaction constraints std::istringstream iss(param.interaction_constraints); dmlc::JSONReader reader(&iss); // Interaction constraints parsed from string parameter. After // parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}. std::vector<std::vector<int32_t>> h_feature_constraints; try { reader.Read(&h_feature_constraints); } catch (dmlc::Error const& e) { LOG(FATAL) << "Failed to parse feature interaction constraint:\n" << param.interaction_constraints << "\n" << "With error:\n" << e.what(); } n_sets_ = h_feature_constraints.size(); size_t const n_feat_storage = LBitField64::ComputeStorageSize(n_features); if (n_feat_storage == 0 && n_features != 0) { LOG(FATAL) << "Wrong storage size, n_features: " << n_features; } // --- Initialize allowed features attached to nodes. if (param.max_depth == 0 && param.max_leaves == 0) { LOG(FATAL) << "Max leaves and max depth cannot both be unconstrained for gpu_hist."; } int32_t n_nodes {0}; if (param.max_depth != 0) { n_nodes = std::pow(2, param.max_depth + 1); } else { n_nodes = param.max_leaves * 2 - 1; } CHECK_NE(n_nodes, 0); node_constraints_.resize(n_nodes); node_constraints_storage_.resize(n_nodes); for (auto& n : node_constraints_storage_) { n.resize(LBitField64::ComputeStorageSize(n_features)); } for (size_t i = 0; i < node_constraints_storage_.size(); ++i) { auto span = dh::ToSpan(node_constraints_storage_[i]); node_constraints_[i] = LBitField64(span); } s_node_constraints_ = common::Span<LBitField64>(node_constraints_.data(), node_constraints_.size()); // Represent constraints as CSR format, flatten is the value vector, // ptr is row_ptr vector in CSR. std::vector<int32_t> h_feature_constraints_flatten; for (auto const& constraints : h_feature_constraints) { for (int32_t c : constraints) { h_feature_constraints_flatten.emplace_back(c); } } std::vector<int32_t> h_feature_constraints_ptr; size_t n_features_in_constraints = 0; h_feature_constraints_ptr.emplace_back(n_features_in_constraints); for (auto const& v : h_feature_constraints) { n_features_in_constraints += v.size(); h_feature_constraints_ptr.emplace_back(n_features_in_constraints); } // Copy the CSR to device. d_fconstraints_.resize(h_feature_constraints_flatten.size()); thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(), d_fconstraints_.begin()); s_fconstraints_ = dh::ToSpan(d_fconstraints_); d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size()); thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(), d_fconstraints_ptr_.begin()); s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_); // --- Compute interaction sets attached to each feature. // Use a set to eliminate duplicated entries. std::vector<std::set<int32_t> > h_features_set(n_features); int32_t cid = 0; for (auto const& constraints : h_feature_constraints) { for (auto const& feat : constraints) { h_features_set.at(feat).insert(cid); } cid++; } // Compute device sets. std::vector<int32_t> h_sets; int32_t ptr = 0; std::vector<int32_t> h_sets_ptr {ptr}; for (auto const& feature : h_features_set) { for (auto constraint_id : feature) { h_sets.emplace_back(constraint_id); } // empty set is well defined here. ptr += feature.size(); h_sets_ptr.emplace_back(ptr); } d_sets_ = h_sets; d_sets_ptr_ = h_sets_ptr; s_sets_ = dh::ToSpan(d_sets_); s_sets_ptr_ = dh::ToSpan(d_sets_ptr_); d_feature_buffer_storage_.resize(LBitField64::ComputeStorageSize(n_features)); feature_buffer_ = dh::ToSpan(d_feature_buffer_storage_); // --- Initialize result buffers. output_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); output_buffer_bits_ = LBitField64(dh::ToSpan(output_buffer_bits_storage_)); input_buffer_bits_storage_.resize(LBitField64::ComputeStorageSize(n_features)); input_buffer_bits_ = LBitField64(dh::ToSpan(input_buffer_bits_storage_)); result_buffer_.resize(n_features); s_result_buffer_ = dh::ToSpan(result_buffer_); } FeatureInteractionConstraint::FeatureInteractionConstraint( tree::TrainParam const& param, int32_t const n_features) : has_constraint_{true}, n_sets_{0} { this->Configure(param, n_features); } void FeatureInteractionConstraint::Reset() { for (auto& node : node_constraints_storage_) { thrust::fill(node.begin(), node.end(), 0); } } __global__ void ClearBuffersKernel( LBitField64 result_buffer_output, LBitField64 result_buffer_input) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < result_buffer_output.Size()) { result_buffer_output.Clear(tid); } if (tid < result_buffer_input.Size()) { result_buffer_input.Clear(tid); } } void FeatureInteractionConstraint::ClearBuffers() { CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size()); CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads)); ClearBuffersKernel<<<n_grids, kBlockThreads>>>( output_buffer_bits_, input_buffer_bits_); } common::Span<int32_t> FeatureInteractionConstraint::QueryNode(int32_t node_id) { if (!has_constraint_) { return {}; } CHECK_LT(node_id, s_node_constraints_.size()); ClearBuffers(); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); auto p_result_buffer = result_buffer_.data(); LBitField64 node_constraints = s_node_constraints_[node_id]; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, p_result_buffer, [=]__device__(int32_t pos) { bool res = node_constraints.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); return {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; } __global__ void SetInputBufferKernel(common::Span<int32_t> feature_list_input, LBitField64 result_buffer_input) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < feature_list_input.size()) { result_buffer_input.Set(feature_list_input[tid]); } } __global__ void QueryFeatureListKernel(LBitField64 node_constraints, LBitField64 result_buffer_input, LBitField64 result_buffer_output) { result_buffer_output |= node_constraints; result_buffer_output &= result_buffer_input; } common::Span<int32_t> FeatureInteractionConstraint::Query( common::Span<int32_t> feature_list, int32_t nid) { if (!has_constraint_ || nid == 0) { return feature_list; } ClearBuffers(); LBitField64 node_constraints = s_node_constraints_[nid]; CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads)); SetInputBufferKernel<<<n_grids, kBlockThreads>>>(feature_list, input_buffer_bits_); QueryFeatureListKernel<<<n_grids, kBlockThreads>>>( node_constraints, input_buffer_bits_, output_buffer_bits_); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); LBitField64 local_result_buffer = output_buffer_bits_; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, result_buffer_.data(), [=]__device__(int32_t pos) { bool res = local_result_buffer.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); common::Span<int32_t> result = {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; return result; } // Find interaction sets for each feature, then store all features in // those sets in a buffer. __global__ void RestoreFeatureListFromSetsKernel( LBitField64 feature_buffer, int32_t fid, common::Span<int32_t> feature_interactions, common::Span<int32_t> feature_interactions_ptr, // of size n interaction set + 1 common::Span<int32_t> interactions_list, common::Span<int32_t> interactions_list_ptr) { auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x; auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y; // painful mapping: fid -> sets related to it -> features related to sets. auto const beg = interactions_list_ptr[fid]; auto const end = interactions_list_ptr[fid+1]; auto const n_sets = end - beg; if (tid_x < n_sets) { auto const set_id_pos = beg + tid_x; auto const set_id = interactions_list[set_id_pos]; auto const set_beg = feature_interactions_ptr[set_id]; auto const set_end = feature_interactions_ptr[set_id + 1]; auto const feature_pos = set_beg + tid_y; if (feature_pos < set_end) { feature_buffer.Set(feature_interactions[feature_pos]); } } } __global__ void InteractionConstraintSplitKernel(LBitField64 feature, int32_t feature_id, LBitField64 node, LBitField64 left, LBitField64 right) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid > node.Size()) { return; } // enable constraints from feature node |= feature; // clear the buffer after use if (tid < feature.Size()) { feature.Clear(tid); } // enable constraints from parent left |= node; right |= node; if (tid == feature_id) { // enable the split feature, set all of them at last instead of // setting it for parent to avoid race. node.Set(feature_id); left.Set(feature_id); right.Set(feature_id); } } void FeatureInteractionConstraint::Split( int32_t node_id, int32_t feature_id, int32_t left_id, int32_t right_id) { if (!has_constraint_) { return; } CHECK_NE(node_id, left_id) << " Split node: " << node_id << " and its left child: " << left_id << " cannot be the same."; CHECK_NE(node_id, right_id) << " Split node: " << node_id << " and its left child: " << right_id << " cannot be the same."; CHECK_LT(right_id, s_node_constraints_.size()); CHECK_NE(s_node_constraints_.size(), 0); LBitField64 node = s_node_constraints_[node_id]; LBitField64 left = s_node_constraints_[left_id]; LBitField64 right = s_node_constraints_[right_id]; dim3 const block3(16, 64, 1); dim3 const grid3(common::DivRoundUp(n_sets_, 16), common::DivRoundUp(s_fconstraints_.size(), 64)); RestoreFeatureListFromSetsKernel<<<grid3, block3>>> (feature_buffer_, feature_id, s_fconstraints_, s_fconstraints_ptr_, s_sets_, s_sets_ptr_); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>(common::DivRoundUp(node.Size(), kBlockThreads)); InteractionConstraintSplitKernel<<<n_grids, kBlockThreads>>> (feature_buffer_, feature_id, node, left, right); } } // namespace xgboost
8c1a7cfbeb8896d50999f55f2dfc65b36d13967f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <math.h> #include <iostream> const int N = 1000000; const int blocksize = 256; __global__ void add_two_tab(unsigned int *a, unsigned int *b, unsigned int *c, unsigned int n) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid<n) { c[tid] = a[tid] + b[tid]; } } int main() { thrust::host_vector<unsigned int> a_tab; thrust::device_vector<unsigned int> ad_tab; thrust::host_vector<unsigned int> b_tab; thrust::device_vector<unsigned int> bd_tab; thrust::host_vector<unsigned int> c_tab; thrust::device_vector<unsigned int> cd_tab; for (int i = 1; i <= N; i++) { a_tab.push_back(1); b_tab.push_back(10); c_tab.push_back(0); } ad_tab = a_tab; bd_tab = b_tab; cd_tab = c_tab; dim3 dimBlock(blocksize); dim3 dimGrid(ceil((float)N / (float)blocksize)); hipLaunchKernelGGL(( add_two_tab) , dim3(dimGrid),dim3(dimBlock) , 0, 0, ad_tab.data().get(), bd_tab.data().get(), cd_tab.data().get(), ad_tab.size()); c_tab = cd_tab; for (int i = 0; i < 10; i++) { std::cout << i << " : " << c_tab[i] << "\n"; std::cout << N-1-i << " : " << c_tab[N-1-i] << "\n"; } return 0; }
8c1a7cfbeb8896d50999f55f2dfc65b36d13967f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <math.h> #include <iostream> const int N = 1000000; const int blocksize = 256; __global__ void add_two_tab(unsigned int *a, unsigned int *b, unsigned int *c, unsigned int n) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid<n) { c[tid] = a[tid] + b[tid]; } } int main() { thrust::host_vector<unsigned int> a_tab; thrust::device_vector<unsigned int> ad_tab; thrust::host_vector<unsigned int> b_tab; thrust::device_vector<unsigned int> bd_tab; thrust::host_vector<unsigned int> c_tab; thrust::device_vector<unsigned int> cd_tab; for (int i = 1; i <= N; i++) { a_tab.push_back(1); b_tab.push_back(10); c_tab.push_back(0); } ad_tab = a_tab; bd_tab = b_tab; cd_tab = c_tab; dim3 dimBlock(blocksize); dim3 dimGrid(ceil((float)N / (float)blocksize)); add_two_tab <<< dimGrid,dimBlock >>>(ad_tab.data().get(), bd_tab.data().get(), cd_tab.data().get(), ad_tab.size()); c_tab = cd_tab; for (int i = 0; i < 10; i++) { std::cout << i << " : " << c_tab[i] << "\n"; std::cout << N-1-i << " : " << c_tab[N-1-i] << "\n"; } return 0; }
b9bde703f8aec04df961d0cbf03f5e66ceb2fcdc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "transpose.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t sz = 1; float_t *src = NULL; hipMalloc(&src, XSIZE*YSIZE); float_t *dest = NULL; hipMalloc(&dest, XSIZE*YSIZE); size_t src_width = XSIZE; size_t src_height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, sz,src,dest,src_width,src_height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, sz,src,dest,src_width,src_height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( transpose), dim3(gridBlock),dim3(threadBlock), 0, 0, sz,src,dest,src_width,src_height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b9bde703f8aec04df961d0cbf03f5e66ceb2fcdc.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "transpose.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t sz = 1; float_t *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); float_t *dest = NULL; cudaMalloc(&dest, XSIZE*YSIZE); size_t src_width = XSIZE; size_t src_height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); transpose<<<gridBlock,threadBlock>>>(sz,src,dest,src_width,src_height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { transpose<<<gridBlock,threadBlock>>>(sz,src,dest,src_width,src_height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { transpose<<<gridBlock,threadBlock>>>(sz,src,dest,src_width,src_height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
51497ec9bba1ddaa16c211c590bb1548dd1deddb.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include <claraparabricks/genomeworks/cudamapper/index.hpp> #include <claraparabricks/genomeworks/utils/cudautils.hpp> #include "index_gpu.cuh" #include "minimizer.hpp" namespace claraparabricks { namespace genomeworks { namespace cudamapper { std::unique_ptr<Index> Index::create_index(DefaultDeviceAllocator allocator, const io::FastaParser& parser, const read_id_t first_read_id, const read_id_t past_the_last_read_id, const std::uint64_t kmer_size, const std::uint64_t window_size, const bool hash_representations, const double filtering_parameter, const hipStream_t cuda_stream) { CGA_NVTX_RANGE(profiler, "create_index"); return std::make_unique<IndexGPU<Minimizer>>(allocator, parser, first_read_id, past_the_last_read_id, kmer_size, window_size, hash_representations, filtering_parameter, cuda_stream); } std::unique_ptr<IndexHostCopyBase> IndexHostCopyBase::create_cache(const Index& index, const read_id_t first_read_id, const std::uint64_t kmer_size, const std::uint64_t window_size, const hipStream_t cuda_stream) { CGA_NVTX_RANGE(profiler, "cache_D2H"); return std::make_unique<IndexHostCopy>(index, first_read_id, kmer_size, window_size, cuda_stream); } } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
51497ec9bba1ddaa16c211c590bb1548dd1deddb.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include <claraparabricks/genomeworks/cudamapper/index.hpp> #include <claraparabricks/genomeworks/utils/cudautils.hpp> #include "index_gpu.cuh" #include "minimizer.hpp" namespace claraparabricks { namespace genomeworks { namespace cudamapper { std::unique_ptr<Index> Index::create_index(DefaultDeviceAllocator allocator, const io::FastaParser& parser, const read_id_t first_read_id, const read_id_t past_the_last_read_id, const std::uint64_t kmer_size, const std::uint64_t window_size, const bool hash_representations, const double filtering_parameter, const cudaStream_t cuda_stream) { CGA_NVTX_RANGE(profiler, "create_index"); return std::make_unique<IndexGPU<Minimizer>>(allocator, parser, first_read_id, past_the_last_read_id, kmer_size, window_size, hash_representations, filtering_parameter, cuda_stream); } std::unique_ptr<IndexHostCopyBase> IndexHostCopyBase::create_cache(const Index& index, const read_id_t first_read_id, const std::uint64_t kmer_size, const std::uint64_t window_size, const cudaStream_t cuda_stream) { CGA_NVTX_RANGE(profiler, "cache_D2H"); return std::make_unique<IndexHostCopy>(index, first_read_id, kmer_size, window_size, cuda_stream); } } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks
2254f2164a04b864b1455a6d19675dd085a96b4f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "math.h" #include <hiprand/hiprand_kernel.h> #include <time.h> #include <fstream> #include "transition_matrix.hpp" #include "cardiac_twitch.hpp" #include "ta_utilities.hpp" using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Write array of floats out to text file for plotting void write_out(float * data, unsigned int num_elements) { ofstream outfile; outfile.open("./results.csv"); // for each elem for (unsigned int i = 0; i < num_elements; ++i) { outfile << data[i] << "\n"; } } // Divides floats in an array by "scalar" __global__ void cuda_div(float *in, float scalar, int n) { int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; while(globalIdx < n) { float temp = in[globalIdx] / scalar; in[globalIdx] = temp; globalIdx += blockDim.x * gridDim.x; } } // Kernel to perform cardiac tissue MCMC __global__ void mcmc(const float * transMatrix, float *masterForces, unsigned int iterations, unsigned int reps) { int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; while (globalIdx < reps) { unsigned int base_index = 0; float r = 0.0; float sum_contracted = 0; // All RUs set to initial state of 0 unsigned int RU[NUM_RUS] = {0}; // Variable to remember original RU state of current RU (see j loop) before it was updated unsigned int original_current_state = 0; // Variable to remember original RU state of the left neighbor (see j loop) before it was updated unsigned int original_left_state = 0; // Initialize random number generator, seeding with globalIdx hiprandState_t s; hiprand_init (globalIdx , 0, 0, &s); for (unsigned int i = 0; i < iterations; i++) { original_left_state = 0; // Only update non-edge RUs {1,25} for(int j = 1; j < NUM_RUS - 1; j++) { // Generate a single random number r = hiprand_uniform(&s); original_current_state = RU[j]; // linearization formula for a 5D matrix. ////index = ((((leftNeighbor * dimen2 + rightNeighbor) * dimen3 + currentState) * dimen4 + MutantBinary) * dimen5) base_index = ((((original_left_state * 6 + (RU[j+1])) * 6 + (RU[j]) ) * 2 + 0) * 6); // Offset current RU state by using the cooresponding value of M. At most, 1 of these M's will be nonzero unsigned int M1 = (r < transMatrix[base_index]) * (transMatrix[base_index + 1]); unsigned int M2 = (! ( r < transMatrix[base_index])) * ( r < transMatrix[base_index + 2]) * (transMatrix[base_index + 3]); unsigned int M3 = (! ( r < transMatrix[base_index + 2])) * (r < transMatrix[base_index + 4]) * (transMatrix[base_index + 5]); RU[j] += M1 + M2 + M3; // Get ready for next j iteration original_left_state = original_current_state; } // Count how many of the RU states, excluding edege RUs, are in the contractile state (5) for(int z = 1; z < NUM_RUS - 1; z++) { sum_contracted += (RU[z] == 5); } atomicAdd(masterForces + i, sum_contracted); sum_contracted = 0.0; } globalIdx += blockDim.x * gridDim.x; } } int main() { // These functions allow you to select the least utilized GPU // on your system as well as enforce a time limit on program execution. // Please leave these enabled as a courtesy to your fellow classmates // if you are using a shared computer. You may ignore or remove these // functions if you are running on your local machine. TA_Utilities::select_least_utilized_GPU(); int max_time_allowed_in_seconds = 30; TA_Utilities::enforce_time_limit(max_time_allowed_in_seconds); clock_t time1 = clock(); unsigned int blockSize = 512; unsigned int num_blocks = 40; unsigned int iterations = 100000; unsigned int reps = 4096; // Host input vectors (transition matrix and force vector) float * h_TM; float *h_F; // Device input vectors (transition matrix and force vector) float *d_F; float *d_TM; // Sizes of vectors. For any element at index i in the force vector, that element represents // the force fo the cardiac tissue at time i * dt where dt is defined in cardiac_twitch.hpp. size_t sizeF = iterations * sizeof(float); size_t sizeTM = transMatrixSize * sizeof(float); // Init host vectors h_F = (float*) calloc(iterations, sizeof(float)); h_TM = gen_transition_matrix(); // Allocate memory for each vector on GPU gpuErrchk( hipMalloc(&d_F, sizeF) ); gpuErrchk( hipMalloc(&d_TM, sizeTM) ); // Copy host vectors to device gpuErrchk( hipMemcpy( d_TM, h_TM, sizeTM, hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy( d_F, h_F, sizeF, hipMemcpyHostToDevice) ); // Execute the simulation hipLaunchKernelGGL(( mcmc), dim3(num_blocks), dim3(blockSize), 0, 0, d_TM, d_F, iterations, reps); // Average % activation across all repitions float normalization_constant = reps * (NUM_RUS - 2); hipLaunchKernelGGL(( cuda_div), dim3(num_blocks), dim3(blockSize), 0, 0, d_F, normalization_constant, iterations); // Copy array back to host gpuErrchk( hipMemcpy(h_F, d_F, sizeF, hipMemcpyDeviceToHost) ); // Release device memory gpuErrchk( hipFree(d_F) ); gpuErrchk( hipFree(d_TM) ); // Write results out to file for viewing write_out(h_F, iterations); // Release host memory free(h_F); free(h_TM); // Print time printf("Total time elapsed: %ld miliseconds\n", (clock() - time1) / (CLOCKS_PER_SEC / 1000)); };
2254f2164a04b864b1455a6d19675dd085a96b4f.cu
#include <stdio.h> #include <stdlib.h> #include "cuda.h" #include "curand.h" #include "math.h" #include <curand_kernel.h> #include <time.h> #include <fstream> #include "transition_matrix.hpp" #include "cardiac_twitch.hpp" #include "ta_utilities.hpp" using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Write array of floats out to text file for plotting void write_out(float * data, unsigned int num_elements) { ofstream outfile; outfile.open("./results.csv"); // for each elem for (unsigned int i = 0; i < num_elements; ++i) { outfile << data[i] << "\n"; } } // Divides floats in an array by "scalar" __global__ void cuda_div(float *in, float scalar, int n) { int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; while(globalIdx < n) { float temp = in[globalIdx] / scalar; in[globalIdx] = temp; globalIdx += blockDim.x * gridDim.x; } } // Kernel to perform cardiac tissue MCMC __global__ void mcmc(const float * transMatrix, float *masterForces, unsigned int iterations, unsigned int reps) { int globalIdx = blockIdx.x * blockDim.x + threadIdx.x; while (globalIdx < reps) { unsigned int base_index = 0; float r = 0.0; float sum_contracted = 0; // All RUs set to initial state of 0 unsigned int RU[NUM_RUS] = {0}; // Variable to remember original RU state of current RU (see j loop) before it was updated unsigned int original_current_state = 0; // Variable to remember original RU state of the left neighbor (see j loop) before it was updated unsigned int original_left_state = 0; // Initialize random number generator, seeding with globalIdx curandState s; curand_init (globalIdx , 0, 0, &s); for (unsigned int i = 0; i < iterations; i++) { original_left_state = 0; // Only update non-edge RUs {1,25} for(int j = 1; j < NUM_RUS - 1; j++) { // Generate a single random number r = curand_uniform(&s); original_current_state = RU[j]; // linearization formula for a 5D matrix. ////index = ((((leftNeighbor * dimen2 + rightNeighbor) * dimen3 + currentState) * dimen4 + MutantBinary) * dimen5) base_index = ((((original_left_state * 6 + (RU[j+1])) * 6 + (RU[j]) ) * 2 + 0) * 6); // Offset current RU state by using the cooresponding value of M. At most, 1 of these M's will be nonzero unsigned int M1 = (r < transMatrix[base_index]) * (transMatrix[base_index + 1]); unsigned int M2 = (! ( r < transMatrix[base_index])) * ( r < transMatrix[base_index + 2]) * (transMatrix[base_index + 3]); unsigned int M3 = (! ( r < transMatrix[base_index + 2])) * (r < transMatrix[base_index + 4]) * (transMatrix[base_index + 5]); RU[j] += M1 + M2 + M3; // Get ready for next j iteration original_left_state = original_current_state; } // Count how many of the RU states, excluding edege RUs, are in the contractile state (5) for(int z = 1; z < NUM_RUS - 1; z++) { sum_contracted += (RU[z] == 5); } atomicAdd(masterForces + i, sum_contracted); sum_contracted = 0.0; } globalIdx += blockDim.x * gridDim.x; } } int main() { // These functions allow you to select the least utilized GPU // on your system as well as enforce a time limit on program execution. // Please leave these enabled as a courtesy to your fellow classmates // if you are using a shared computer. You may ignore or remove these // functions if you are running on your local machine. TA_Utilities::select_least_utilized_GPU(); int max_time_allowed_in_seconds = 30; TA_Utilities::enforce_time_limit(max_time_allowed_in_seconds); clock_t time1 = clock(); unsigned int blockSize = 512; unsigned int num_blocks = 40; unsigned int iterations = 100000; unsigned int reps = 4096; // Host input vectors (transition matrix and force vector) float * h_TM; float *h_F; // Device input vectors (transition matrix and force vector) float *d_F; float *d_TM; // Sizes of vectors. For any element at index i in the force vector, that element represents // the force fo the cardiac tissue at time i * dt where dt is defined in cardiac_twitch.hpp. size_t sizeF = iterations * sizeof(float); size_t sizeTM = transMatrixSize * sizeof(float); // Init host vectors h_F = (float*) calloc(iterations, sizeof(float)); h_TM = gen_transition_matrix(); // Allocate memory for each vector on GPU gpuErrchk( cudaMalloc(&d_F, sizeF) ); gpuErrchk( cudaMalloc(&d_TM, sizeTM) ); // Copy host vectors to device gpuErrchk( cudaMemcpy( d_TM, h_TM, sizeTM, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy( d_F, h_F, sizeF, cudaMemcpyHostToDevice) ); // Execute the simulation mcmc<<<num_blocks, blockSize>>>(d_TM, d_F, iterations, reps); // Average % activation across all repitions float normalization_constant = reps * (NUM_RUS - 2); cuda_div<<<num_blocks, blockSize>>>(d_F, normalization_constant, iterations); // Copy array back to host gpuErrchk( cudaMemcpy(h_F, d_F, sizeF, cudaMemcpyDeviceToHost) ); // Release device memory gpuErrchk( cudaFree(d_F) ); gpuErrchk( cudaFree(d_TM) ); // Write results out to file for viewing write_out(h_F, iterations); // Release host memory free(h_F); free(h_TM); // Print time printf("Total time elapsed: %ld miliseconds\n", (clock() - time1) / (CLOCKS_PER_SEC / 1000)); };
3832d4509dee07cd805d8432804a1a74a3490c59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/roi_pool_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void GPUROIPoolForward( const int nthreads, const T* input_data, const int64_t* input_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int* roi_batch_id_data, T* output_data, int64_t* argmax_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const int64_t* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; int roi_start_w = round(offset_input_rois[0] * spatial_scale); int roi_start_h = round(offset_input_rois[1] * spatial_scale); int roi_end_w = round(offset_input_rois[2] * spatial_scale); int roi_end_h = round(offset_input_rois[3] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); int hstart = static_cast<int>(floor(static_cast<double>(ph) * static_cast<double>(roi_height) / static_cast<double>(pooled_height))); int wstart = static_cast<int>(floor(static_cast<double>(pw) * static_cast<double>(roi_width) / static_cast<double>(pooled_width))); int hend = static_cast<int>(ceil(static_cast<double>(ph + 1) * static_cast<double>(roi_height) / static_cast<double>(pooled_height))); int wend = static_cast<int>(ceil(static_cast<double>(pw + 1) * static_cast<double>(roi_width) / static_cast<double>(pooled_width))); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); T maxval = is_empty ? 0 : -std::numeric_limits<T>::max(); int maxidx = -1; const T* offset_input_data = input_data + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_data_index = h * width + w; if (offset_input_data[input_data_index] > maxval) { maxval = offset_input_data[input_data_index]; maxidx = input_data_index; } } } output_data[i] = maxval; if (argmax_data) { argmax_data[i] = maxidx; } } } template <typename T> __global__ void GPUROIPoolBackward( const int nthreads, const int64_t* input_rois, const T* output_grad, const int64_t* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int* roi_batch_id_data, T* input_grad) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; int roi_batch_ind = roi_batch_id_data[n]; int input_offset = (roi_batch_ind * channels + c) * height * width; int output_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_output_grad = output_grad + output_offset; T* offset_input_grad = input_grad + input_offset; const int64_t* offset_argmax_data = argmax_data + output_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { platform::CudaAtomicAdd( offset_input_grad + argmax, static_cast<T>(offset_output_grad[ph * pooled_width + pw])); } } } template <typename Place, typename T> class GPUROIPoolOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto* argmax = ctx.Output<Tensor>("Argmax"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; auto in_stride = framework::stride(in_dims); int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; framework::Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, "The rois_batch_size and imgs batch_size must be the same."); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, "The rois_num from input and lod must be the same."); for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } framework::Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), ctx.device_context(), &roi_batch_id_list_gpu); hipLaunchKernelGGL(( GPUROIPoolForward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_size, in->data<T>(), rois->data<int64_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, roi_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace()), argmax->mutable_data<int64_t>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUROIPoolGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* argmax = ctx.Input<Tensor>("Argmax"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* x_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (x_grad) { framework::Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } framework::Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), ctx.device_context(), &roi_batch_id_list_gpu); x_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), x_grad, static_cast<T>(0)); int output_grad_size = out_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { hipLaunchKernelGGL(( GPUROIPoolBackward< T>), dim3(blocks), dim3(threads), 0, ctx.cuda_device_context().stream(), output_grad_size, rois->data<int64_t>(), out_grad->data<T>(), argmax->data<int64_t>(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, roi_batch_id_list_gpu.data<int>(), x_grad->mutable_data<T>(ctx.GetPlace())); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roi_pool, ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( roi_pool_grad, ops::GPUROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>);
3832d4509dee07cd805d8432804a1a74a3490c59.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/roi_pool_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T> __global__ void GPUROIPoolForward( const int nthreads, const T* input_data, const int64_t* input_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int* roi_batch_id_data, T* output_data, int64_t* argmax_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const int64_t* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; int roi_start_w = round(offset_input_rois[0] * spatial_scale); int roi_start_h = round(offset_input_rois[1] * spatial_scale); int roi_end_w = round(offset_input_rois[2] * spatial_scale); int roi_end_h = round(offset_input_rois[3] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); int hstart = static_cast<int>(floor(static_cast<double>(ph) * static_cast<double>(roi_height) / static_cast<double>(pooled_height))); int wstart = static_cast<int>(floor(static_cast<double>(pw) * static_cast<double>(roi_width) / static_cast<double>(pooled_width))); int hend = static_cast<int>(ceil(static_cast<double>(ph + 1) * static_cast<double>(roi_height) / static_cast<double>(pooled_height))); int wend = static_cast<int>(ceil(static_cast<double>(pw + 1) * static_cast<double>(roi_width) / static_cast<double>(pooled_width))); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); T maxval = is_empty ? 0 : -std::numeric_limits<T>::max(); int maxidx = -1; const T* offset_input_data = input_data + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_data_index = h * width + w; if (offset_input_data[input_data_index] > maxval) { maxval = offset_input_data[input_data_index]; maxidx = input_data_index; } } } output_data[i] = maxval; if (argmax_data) { argmax_data[i] = maxidx; } } } template <typename T> __global__ void GPUROIPoolBackward( const int nthreads, const int64_t* input_rois, const T* output_grad, const int64_t* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int* roi_batch_id_data, T* input_grad) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; int roi_batch_ind = roi_batch_id_data[n]; int input_offset = (roi_batch_ind * channels + c) * height * width; int output_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_output_grad = output_grad + output_offset; T* offset_input_grad = input_grad + input_offset; const int64_t* offset_argmax_data = argmax_data + output_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { platform::CudaAtomicAdd( offset_input_grad + argmax, static_cast<T>(offset_output_grad[ph * pooled_width + pw])); } } } template <typename Place, typename T> class GPUROIPoolOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto* argmax = ctx.Output<Tensor>("Argmax"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto in_dims = in->dims(); int batch_size = in_dims[0]; auto in_stride = framework::stride(in_dims); int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; framework::Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, "The rois_batch_size and imgs batch_size must be the same."); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, "The rois_num from input and lod must be the same."); for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } framework::Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), ctx.device_context(), &roi_batch_id_list_gpu); GPUROIPoolForward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_size, in->data<T>(), rois->data<int64_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, roi_batch_id_list_gpu.data<int>(), out->mutable_data<T>(ctx.GetPlace()), argmax->mutable_data<int64_t>(ctx.GetPlace())); } }; template <typename Place, typename T> class GPUROIPoolGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* argmax = ctx.Input<Tensor>("Argmax"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* x_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (x_grad) { framework::Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } framework::Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, ctx.GetPlace(), ctx.device_context(), &roi_batch_id_list_gpu); x_grad->mutable_data<T>(ctx.GetPlace()); math::SetConstant<Place, T> set_zero; set_zero(ctx.cuda_device_context(), x_grad, static_cast<T>(0)); int output_grad_size = out_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { GPUROIPoolBackward< T><<<blocks, threads, 0, ctx.cuda_device_context().stream()>>>( output_grad_size, rois->data<int64_t>(), out_grad->data<T>(), argmax->data<int64_t>(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, roi_batch_id_list_gpu.data<int>(), x_grad->mutable_data<T>(ctx.GetPlace())); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roi_pool, ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( roi_pool_grad, ops::GPUROIPoolGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIPoolOpKernel<paddle::platform::CUDADeviceContext, double>);
007a3818b9e0c2c599253bea9d4c70bc7e5c28f7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "datadef.h" #include "warp_device.cuh" #include "check_cuda.h" #include "wfloat3.h" __global__ void check_pointers_kernel(unsigned N, unsigned dex0, unsigned dex1, cross_section_data* d_xsdata ){ // declare shared variables __shared__ unsigned n_isotopes; __shared__ unsigned energy_grid_len; __shared__ unsigned total_reaction_channels; __shared__ unsigned* rxn_numbers; __shared__ unsigned* rxn_numbers_total; __shared__ float* energy_grid; __shared__ float* rxn_Q; __shared__ float* xs; __shared__ float* awr; __shared__ float* temp; __shared__ dist_container* dist_scatter; __shared__ dist_container* dist_energy; // have thread 0 of block copy all pointers and static info into shared memory if (threadIdx.x == 0){ n_isotopes = d_xsdata[0].n_isotopes; energy_grid_len = d_xsdata[0].energy_grid_len; total_reaction_channels = d_xsdata[0].total_reaction_channels; rxn_numbers = d_xsdata[0].rxn_numbers; rxn_numbers_total = d_xsdata[0].rxn_numbers_total; energy_grid = d_xsdata[0].energy_grid; rxn_Q = d_xsdata[0].Q; xs = d_xsdata[0].xs; awr = d_xsdata[0].awr; temp = d_xsdata[0].temp; dist_scatter = d_xsdata[0].dist_scatter; dist_energy = d_xsdata[0].dist_energy; } // make sure shared loads happen before anything else __syncthreads(); // return immediately if out of bounds int tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid >= N){return;} printf("INDEX %u -> energy pointer = %p -> lower %p upper %p\n",dex0+tid,dist_energy, dist_energy[ dex0+tid].upper,dist_energy[ dex0+tid].lower); printf("INDEX %u -> scatter pointer = %p -> lower %p upper %p\n",dex0+tid,dist_scatter,dist_scatter[dex0+tid].upper,dist_scatter[dex0+tid].lower); } /** * \brief a * \details b * * @param[in] NUM_THREADS - the number of threads to run per thread block * @param[in] dex0 - starting index * @param[in] dex1 - ending index * @param[in] d_xsdata - device pointer to cross section data pointer array */ void check_pointers(unsigned NUM_THREADS, unsigned dex0, unsigned dex1, cross_section_data* d_xsdata){ int N = dex1-dex0+1; if (N<1){printf("Negative range in check_pointers! dex0 %u dex1 %u -> N = %d\n",dex0,dex1,N);return;} unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS; hipLaunchKernelGGL(( check_pointers_kernel) , dim3(blks), dim3(NUM_THREADS) , 0, 0, N, dex0, dex1, d_xsdata ); check_cuda(hipDeviceSynchronize()); }
007a3818b9e0c2c599253bea9d4c70bc7e5c28f7.cu
#include <cuda.h> #include <stdio.h> #include "datadef.h" #include "warp_device.cuh" #include "check_cuda.h" #include "wfloat3.h" __global__ void check_pointers_kernel(unsigned N, unsigned dex0, unsigned dex1, cross_section_data* d_xsdata ){ // declare shared variables __shared__ unsigned n_isotopes; __shared__ unsigned energy_grid_len; __shared__ unsigned total_reaction_channels; __shared__ unsigned* rxn_numbers; __shared__ unsigned* rxn_numbers_total; __shared__ float* energy_grid; __shared__ float* rxn_Q; __shared__ float* xs; __shared__ float* awr; __shared__ float* temp; __shared__ dist_container* dist_scatter; __shared__ dist_container* dist_energy; // have thread 0 of block copy all pointers and static info into shared memory if (threadIdx.x == 0){ n_isotopes = d_xsdata[0].n_isotopes; energy_grid_len = d_xsdata[0].energy_grid_len; total_reaction_channels = d_xsdata[0].total_reaction_channels; rxn_numbers = d_xsdata[0].rxn_numbers; rxn_numbers_total = d_xsdata[0].rxn_numbers_total; energy_grid = d_xsdata[0].energy_grid; rxn_Q = d_xsdata[0].Q; xs = d_xsdata[0].xs; awr = d_xsdata[0].awr; temp = d_xsdata[0].temp; dist_scatter = d_xsdata[0].dist_scatter; dist_energy = d_xsdata[0].dist_energy; } // make sure shared loads happen before anything else __syncthreads(); // return immediately if out of bounds int tid = threadIdx.x+blockIdx.x*blockDim.x; if (tid >= N){return;} printf("INDEX %u -> energy pointer = %p -> lower %p upper %p\n",dex0+tid,dist_energy, dist_energy[ dex0+tid].upper,dist_energy[ dex0+tid].lower); printf("INDEX %u -> scatter pointer = %p -> lower %p upper %p\n",dex0+tid,dist_scatter,dist_scatter[dex0+tid].upper,dist_scatter[dex0+tid].lower); } /** * \brief a * \details b * * @param[in] NUM_THREADS - the number of threads to run per thread block * @param[in] dex0 - starting index * @param[in] dex1 - ending index * @param[in] d_xsdata - device pointer to cross section data pointer array */ void check_pointers(unsigned NUM_THREADS, unsigned dex0, unsigned dex1, cross_section_data* d_xsdata){ int N = dex1-dex0+1; if (N<1){printf("Negative range in check_pointers! dex0 %u dex1 %u -> N = %d\n",dex0,dex1,N);return;} unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS; check_pointers_kernel <<< blks, NUM_THREADS >>> ( N, dex0, dex1, d_xsdata ); check_cuda(cudaThreadSynchronize()); }
39662bc393ed7e5ba192426fefb182c3aa6c3655.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// impact of prefetching the data - CUDA lab #include <stdio.h> #include <stdlib.h> #include <chrono> using namespace std::chrono; void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main(int argc, char** argv) { int deviceId; int numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); char* pEnd; const int N = 2<<strtol(argv[1], &pEnd, 10); //2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); FILE *f; f = fopen(argv[2], "a"); if (strtol(argv[1], &pEnd, 10) == 10) { fprintf(f, "NumElement\t\tInit\t\tDevice\n"); } fprintf(f, "%d\t\t", N); auto start = high_resolution_clock::now(); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); auto stop = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(stop - start); printf("Init: %d us \n", duration.count()); fprintf(f, "%d\t\t", duration.count()); /* * Add asynchronous prefetching after the data is initialized, * and before launching the kernel, to avoid host to GPU page * faulting. */ hipMemPrefetchAsync(a, size, deviceId); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; hipError_t addVectorsErr; hipError_t asyncErr; start = high_resolution_clock::now(); hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N); addVectorsErr = hipGetLastError(); if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr)); asyncErr = hipDeviceSynchronize(); if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr)); stop = high_resolution_clock::now(); duration = duration_cast<microseconds>(stop - start); printf("Device: %d us \n", duration.count()); fprintf(f, "%d\n", duration.count()); fclose(f); checkElementsAre(7, c, N); hipFree(a); hipFree(b); hipFree(c); }
39662bc393ed7e5ba192426fefb182c3aa6c3655.cu
/// impact of prefetching the data - CUDA lab #include <stdio.h> #include <stdlib.h> #include <chrono> using namespace std::chrono; void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main(int argc, char** argv) { int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); printf("Device ID: %d\tNumber of SMs: %d\n", deviceId, numberOfSMs); char* pEnd; const int N = 2<<strtol(argv[1], &pEnd, 10); //2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); FILE *f; f = fopen(argv[2], "a"); if (strtol(argv[1], &pEnd, 10) == 10) { fprintf(f, "NumElement\t\tInit\t\tDevice\n"); } fprintf(f, "%d\t\t", N); auto start = high_resolution_clock::now(); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); auto stop = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(stop - start); printf("Init: %d us \n", duration.count()); fprintf(f, "%d\t\t", duration.count()); /* * Add asynchronous prefetching after the data is initialized, * and before launching the kernel, to avoid host to GPU page * faulting. */ cudaMemPrefetchAsync(a, size, deviceId); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; cudaError_t addVectorsErr; cudaError_t asyncErr; start = high_resolution_clock::now(); addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N); addVectorsErr = cudaGetLastError(); if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); stop = high_resolution_clock::now(); duration = duration_cast<microseconds>(stop - start); printf("Device: %d us \n", duration.count()); fprintf(f, "%d\n", duration.count()); fclose(f); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
d1ba10ace94886564c5e38fb1ee904257429839c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> using namespace std; #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <errno.h> #include <math.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> // rand() from matlab #include "RndC_ifc.h" #include "RndCState.h" #include "cs_dbg.h" #include "cs_helper.h" #include "cs_whm_encode_b.h" #include "cs_header.h" #include "cs_block.h" #include "cs_perm_mlseq.h" #include "cs_expand.h" #include "cs_interpolate.h" #include "cs_perm_selection.h" #include "cs_copy_box.h" #include "cs_edge_detect_v2.h" #include "cs_motion_detect_v2.h" #include "cs_motion_report.h" #include "cs_ipcam.h" #include "cs_config.h" #include "cs_dct.h" #define CUDA_DBG #define DBG_CP_DOWN 0x1 #define DBG_BLKING 0x2 #define DBG_WHM 0x4 #define DBG_PERM_R 0x8 #define DBG_PERM_L 0x10 #define DBG_INTER 0x20 #define DBG_SWAP 0x40 #define DBG_EXPAND 0x80 #define DBG_ANALYSIS 0x100 #define DBG_ED 0x200 #define DBG_MOTION 0x400 #define DBG_L1_NORM 0x800 #define DBG_COPY_DONE 0x1000 #define DBG_MT_IDX 0x2000 #define DBG_MT_STEP0 0x4000 #define DBG_MT_STEP1 0x8000 #define DBG_MT_STEP2 0x10000 #define DBG_MT_STEP3 0x20000 #define DBG_MT_STEP4 0x40000 #define DBG_C_2_I 0x80000 // proto void fix_it ( int fin, int fout ) ; int allocate_d_mem() ; int allocate_h_mem() ; int setup_perm_tbls( char *, char *) ; int do_measurement ( int fin, int fout ) ; int make_one_component( int fout, char *hin_a, int in_size, int x, int y, int xbdim, int ybdim, int need_interpolate, struct frame_list *fp ) ; // misc buffer size in elements and size static int blk_size_e, blk_size_i, perm_size_e, perm_size_i ; static int perm_blk_size_e ; // stat static int total_d_mem = 0 ; // output file FILE *md_filep = NULL ; // host buffers static char *ybufp = NULL, *vbufp = NULL, *ubufp = NULL ; static int *outbufp = NULL ; static int nblk_in_x = 1, nblk_in_y = 1 ; // for expand u/v to the same size as y static int do_interpolate = 1 ; // default is yes // device buffers ... *dperm_lp is one of cube_info[i].cube_perm int *dperm_rp = NULL, *din_b1 = NULL, *din_b2 = NULL ; int *dperm_ml_rp = NULL, *dperm_ml_lp = NULL ; // read the ML file // 0:inner, 1:side, 2:corner static struct cube cube_info[ CUBE_INFO_CNT ] ; static struct cube wcube_info[ CUBE_INFO_CNT ] ; // a working copy from cube_info above static double comp_ratio_f = 100.0 ; static int do_shift = 0 ; // for analysis ... static struct cs_xyz *d_cs_xyzp = NULL ; static int *d_host_io = NULL ; // misc static int inner_cube_size = 0 ; int cs_config_check( struct cs_config *csp ) ; // rand struct RndCState rnd_state_1 ; struct RndCState rnd_state_2 ; static int blocks_processed = 0 ; // total number of frame blocks processed ... static int first_block = 1 ; static int in_block_to = 0 ; // next "empty" block, for overlap in T domain static int *cudadbgp = NULL ; // 256k entry static struct cs_config csc ; enum { CS_TIMER_TOTAL, CS_TIMER_MEMCPY_DOWN, CS_TIMER_C_TO_I, CS_TIMER_EXPANSION, CS_TIMER_INTER, CS_TIMER_BLOCKING, CS_TIMER_PERMR, CS_TIMER_MEA, CS_TIMER_PERML, CS_TIMER_SWAP, CS_TIMER_MEMCPY_UP, CS_TIMER_ANALYSIS, CS_TIMER_ANALYSIS_EDGE, CS_TIMER_ANALYSIS_MD0, CS_TIMER_ANALYSIS_MD1, CS_TIMER_ANALYSIS_MD2, CS_TIMER_ANALYSIS_MD3, CS_TIMER_ANALYSIS_MD4, CS_TIMER_COUNT } ; static const char *timer_name[] = { "timer total", "memcpy to device", "expand c to i", "expansion", "interpolation", "blocking", "perm R", "measurement", "perm L", "swap", "memcpy to host", "analysis", "analysis edge", "analysis md0", "analysis md1", "analysis md2", "analysis md3", "analysis md4", "the end" } ; void pusage( const char *s ) { printf("Usage: %s -f configfilename.json\n", s ) ; } main( int ac, char *av[] ) { int fin, fout ; char opt ; // char opt, *finname = NULL, *foutname = NULL ; char *configfile = NULL ; setbuf( stdout, NULL ) ; setbuf( stderr, NULL ) ; while ((opt = getopt(ac, av, "f:")) != -1) { printf(" opt %c \n", opt ) ; switch (opt) { case 'f' : configfile = optarg ; break ; } } if ( configfile == NULL ) { pusage( av[0] ) ; return ( 1 ) ; } cs_config_init( &csc ) ; if ( !cs_config ( configfile, &csc )) { pusage( av[0] ) ; return ( 2 ) ; } cs_config_p ( &csc ) ; if ( !cs_config_check( &csc )) { pusage( av[0] ) ; return ( 1 ) ; } comp_ratio_f = (( double ) csc.comp_ratio / 100.0 ) ; fprintf( stderr, "x/y (%d, %d) blk x/y/z ( %d, %d, %d) in %s out %s yonly %d " "swap %d\n", csc.frame_x, csc.frame_y, csc.x_block, csc.y_block, csc.z_block, csc.finname, csc.foutname, csc.y_only, csc.do_swap ) ; fprintf( stderr, "adj x/y ( %d, %d ) expand x/y/z ( %d, %d, %d ) \n", csc.adj_x, csc.adj_y, csc.xadd, csc.yadd, csc.zadd ) ; fprintf( stderr, "weight %d dbg %x\n", csc.weight_scheme, csc.dbg_flag ) ; fprintf( stderr, "cube x/y/z %d %d %d comp %f\n", csc.cubex, csc.cubey, csc.cubez, comp_ratio_f ) ; fprintf( stderr, "edge x/y %d %d\n", csc.edge_x, csc.edge_y ) ; if ( csc.overlap_z ) fprintf( stderr, "overlap %d\n", csc.overlap_z ) ; fprintf( stderr, "perm %d %s\n", csc.do_permutation, csc.permdir ) ; if (strlen(csc.finname)) { fin = open( csc.finname, O_RDONLY ) ; if ( fin == -1 ) { printf("file %s does not exist\n", av[1]) ; exit( 1 ) ; } } fout = open( csc.foutname, O_CREAT | O_TRUNC | O_WRONLY, S_IRWXU ) ; if ( fout == -1 ) { printf("file %s open failed %d\n", csc.foutname, errno ) ; exit( 1 ) ; } dbg_init ( 256 * 1024 * 1024 * sizeof ( 4 )) ; if ( !allocate_d_mem()) { printf("%s: d_mem allocation failed\n", __func__ ) ; exit( 1 ) ; } if (( cudadbgp = dbg_d_malloc_i ( 1024 * 256 )) == NULL ) { exit( 1 ) ; } clear_device_mem_i( cudadbgp, 1024 * 256 ) ; if ( !allocate_h_mem()) { printf("%s: h_mem allocation failed\n", __func__ ) ; exit( 1 ) ; } #if __BYTE_ORDER__ == __BIG_ENDIAN opt = CS_CO_BIGENDIAN ; #else opt = 0 ; #endif if ( strlen( csc.ipcam_string)) { if ( !cs_ipcam_init ( csc.z_block, csc.frame_x, csc.frame_y, csc.ipcam_string, nblk_in_x, nblk_in_y, csc.md_x, csc.md_y, csc.disp_th_x, csc.disp_th_y )) { printf("ipcam_init failed\n") ; exit( 1 ) ; } } if ( opt ) // local machine is big endian { if ( csc.do_swap ) opt = 0 ; // back to little endian } else { if ( csc.do_swap ) opt = CS_CO_BIGENDIAN ; } if ( csc.do_permutation ) opt |= ML_PERM ; if ( csc.do_cube ) opt |= DOUBLE_PERM ; fprintf( stderr, "%s: size of header %d\n", __func__, sizeof ( struct cs_header )) ; // + 2 ... 1 is for the center of the edge detection rectangle // the other 1 is for the shift/move to make sense if (( cube_info[0].x <= (( csc.edge_x << 1 ) + 2 )) || ( cube_info[1].x <= (( csc.edge_x << 1 ) + 2 )) || ( cube_info[2].x <= (( csc.edge_x << 1 ) + 2 )) || ( cube_info[0].y <= (( csc.edge_y << 1 ) + 2 )) || ( cube_info[1].y <= (( csc.edge_y << 1 ) + 2 )) || ( cube_info[2].y <= (( csc.edge_y << 1 ) + 2 ))) { fprintf( stderr, "%s: error cube x %d %d %d edge x %d cube y %d %d %d edge y %d\n", __func__, cube_info[0].x, cube_info[1].x, cube_info[2].x, csc.edge_x, cube_info[0].y, cube_info[1].y, cube_info[2].y, csc.edge_y ) ; exit( 23 ) ; } if ( !cs_put_header ( fout, CS_CD_YUV420P, (( csc.y_only )? Y_COMP_ONLY : 0 ) | opt, WALSH_HADAMARD_MATRIX, csc.frame_x, csc.frame_y, csc.x_block, csc.y_block, csc.z_block, cube_info[0].x, cube_info[0].y, cube_info[0].z, cube_info[1].x, cube_info[1].y, cube_info[1].z, cube_info[2].x, cube_info[2].y, cube_info[2].z, csc.overlap_x, csc.overlap_y, csc.overlap_z, csc.xadd, csc.yadd, csc.zadd, csc.adj_x, csc.adj_y, csc.edge_x, csc.edge_y, csc.md_x, csc.md_y, csc.md_z, csc.weight_scheme )) { printf("can't write header\n") ; exit( 1 ) ; } fprintf( stderr, "%s: do_swap %d do_interpolate %d do_permutation %d do_cube %d " "do_comp_ratio %d do_block %d do_analysis %d do_one %d do_not_seek %d \n", __func__, csc.do_swap, csc.do_interpolate, csc.do_permutation, csc.do_cube, csc.do_comp_ratio, csc.do_block, csc.do_analysis, csc.do_one, csc.do_not_seek ) ; if ( md_filep ) ma_report_header ( md_filep, csc.frame_y, csc.frame_x, 0, 1, 2, 1 ) ; if ( !do_measurement( fin, fout )) { printf("do_measurement: failed\n") ; exit( 1 ) ; } close ( fin ) ; close ( fout ) ; if ( md_filep ) fclose ( md_filep ) ; } void do_tst_longlong() { fprintf(stderr, "%s: size of ll %d\n", __func__, sizeof ( long long )) ; h_tst_longlong (( long long *)din_b2, 100 ) ; dbg_p_d_data_ll ("tst long long",( long long *)din_b2, 100 * sizeof ( long long )) ; } // allocate the d mem ... and init the perm tables int allocate_d_mem() { int ana_size, ocube_size = 0, cube_size, *hdp = NULL, nmea, nz, i, j, k, xx, yy, zz, x, y, z ; if ( csc.do_cube ) { if (( k = hipMalloc( &d_cs_xyzp, sizeof ( *d_cs_xyzp ) * CUBE_INFO_CNT + sizeof ( int ) * 10 )) != hipSuccess ) { printf("%s: cs_xyzp alloc failed %d\n", __func__, k ) ; return ( 0 ) ; } total_d_mem += sizeof ( *d_cs_xyzp ) * CUBE_INFO_CNT + sizeof ( int ) * 10 ; d_host_io = ( int * )( d_cs_xyzp + CUBE_INFO_CNT ) ; fprintf( stderr, "%s: d_cs_xyzp %p d_host_io %p\n", __func__, d_cs_xyzp, d_host_io ) ; nmea = ( int )(( double )( csc.x_block * csc.y_block * csc.z_block ) * comp_ratio_f ) ; xx = csc.x_block + csc.adj_x ; yy = csc.y_block + csc.adj_y ; zz = csc.z_block + csc.zadd ; ana_size = 0 ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { x = csc.cubex ; y = csc.cubey ; z = csc.cubez ; #ifdef CUDA_DBG printf("%s:i %d mea %d -- xx %d %d %d x %d %d %d\n", __func__, i, nmea, xx, yy, zz, x, y, z ) ; #endif // inside is 1, side is 1/2, corner is 1/4 j = ( int )pow((double)2,(double)i) ; k = nmea / j ; printf("%s: i %d nmea %d k %d j %d comp %f\n", __func__, i, nmea, k, j, comp_ratio_f ) ; // the "+ 2" is to make sure at least the x and y has 2x2 block to cmp if ( h_do_find_perm_size ( xx, yy, zz, &x, &y, &z, csc.z_block, k, ( csc.edge_x + csc.md_x ) * 2 + 2, ( csc.edge_y + csc.md_y ) * 2 + 2) == 0 ) { printf( "%s: cube 1 failed \n", __func__ ) ; return ( 0 ) ; } cube_size = x * y * z * sizeof ( int ) ; if (( k = hipMalloc( &cube_info[i].dp, cube_size )) != hipSuccess ) { printf("%s: cube alloc failed %d %d %d \n", __func__, i, cube_size, k ) ; return ( 0 ) ; } total_d_mem += cube_size ; if ( hdp == NULL ) { ocube_size = cube_size ; hdp = ( int * )malloc( cube_size + 10 ) ; if ( hdp == NULL ) { printf("%s: cube host alloc failed %d %d \n", __func__, i, cube_size ) ; return ( 0 ) ; } } cube_info[i].x = x ; cube_info[i].y = y ; cube_info[i].z = z ; cube_info[i].size = x * y * z ; printf("%s: i %d x/y/z %d %d %d\n", __func__, i, x, y, z ); cube_info[i].cube_perm = NULL ; if (( x * y * z * sizeof ( int )) > ocube_size ) { printf("%s: cube size error %d %d %d %d \n", __func__, x, y, z, ocube_size ) ; return ( 0 ) ; } h_do_get_perm_matrix( hdp, xx, yy, zz, x, y, z, &cube_info[i].sink ) ; if (( k = hipMemcpy( cube_info[i].dp, hdp, cube_size, hipMemcpyHostToDevice)) != hipSuccess ) { printf("%s:cube download fail: loop %d %d\n", __func__, i, k ) ; return ( 0 ) ; } x = cube_info[i]. x - ( csc.edge_x << 1 ) - ( csc.md_x << 1 ) ; y = cube_info[i]. y - ( csc.edge_y << 1 ) - ( csc.md_y << 1 ) ; if (( x <= 0 ) || ( y <= 0 )) { printf("%s: i %d x/y %d %d cube %d %d edge %d %d motion detect %d %d \n", __func__, i, x, y, cube_info[i].x, cube_info[i].y, csc.edge_x, csc.edge_y, csc.md_x, csc.md_y ) ; return ( 0 ) ; } k = ( x * y * ( cube_info[i].z - csc.md_z + 1 ) + NUM_OF_HVT_INDEX ) * ((( csc.md_x << 1 ) + 1 ) * (( csc.md_y << 1 ) + 1) * ( csc.md_z - 1 ) + 1 ) ; printf("%s: x %d y %d k %d ana_size %d\n", __func__, x, y, k, ana_size ) ; printf("%s: md_x/y/z %d %d %d cube.z %d\n", __func__, csc.md_x, csc.md_y, csc.md_z, cube_info[i].z ) ; if ( ana_size < k ) ana_size = k ; #ifdef CUDA_OBS dbg_p_d_data_i ( "cube tbl", cube_info[i].dp, cube_info[i].size ) ; #endif } // set the config h_set_config( d_cs_xyzp, cube_info ) ; #ifdef CUDA_DBG dbg_p_d_data_i ( "config", ( int *)d_cs_xyzp, 3 * CUBE_INFO_CNT ) ; #endif printf("%s: ana_size %d\n", __func__, ana_size ) ; free ( hdp ) ; } if ( csc.do_reconstruction ) { if ( !h_do_dct_init()) { printf("%s: h_do_dct_init failed \n", __FILE__) ; return ( 0 ) ; } } x = csc.frame_x + ( csc.xadd << 1 ) ; y = csc.frame_y + ( csc.yadd << 1 ) ; nblk_in_x = ( x - csc.overlap_x ) / ( csc.x_block - csc.overlap_x ) ; j = ( x - csc.overlap_x ) % ( csc.x_block - csc.overlap_x ) ; if ( j ) { fprintf( stderr, "%s: x %d x_block %d overlap_x %d nblk_in_x %d j %d\n", __func__, x, csc.x_block, csc.overlap_x, nblk_in_x , j ) ; return ( 0 ) ; } nblk_in_y = ( y - csc.overlap_y ) / ( csc.y_block - csc.overlap_y ) ; j = ( y - csc.overlap_y ) % ( csc.y_block - csc.overlap_y ) ; if ( j ) { fprintf( stderr, "%s: y %d y_block %d overlap_y %d nblk_in_y %d j %d\n", __func__, y, csc.y_block, csc.overlap_y, nblk_in_y, j ) ; return ( 0 ) ; } if (( nblk_in_x < 2 ) || ( nblk_in_y < 2 )) { fprintf( stderr, "%s: not enough blks nblk_x %d nblk_y %d x/y %d %d ox/y %d %d\n", __func__, nblk_in_x, nblk_in_y, x, y, csc.overlap_x, csc.overlap_y ) ; return ( 0 ) ; } nz = csc.z_block + csc.zadd ; blk_size_e = ( csc.x_block + csc.adj_x ) * ( csc.y_block + csc.adj_y ) * nz ; #ifdef CUDA_DBG fprintf( stderr, "%s: blk_size %d nblk_in_x %d nblk_in_y %d nz %d x_b %d" " y_b %d ax %d ay %d\n", __func__, blk_size_e, nblk_in_x, nblk_in_y, nz, csc.x_block, csc.y_block, csc.adj_x, csc.adj_y ) ; #endif if ( csc.do_permutation ) blk_size_e++ ; perm_size_e = max_log2 ( blk_size_e ) ; if ( csc.do_analysis ) ana_size = ( ana_size > perm_size_e ) ? ana_size : perm_size_e ; else ana_size = perm_size_e ; blk_size_e = ana_size * nblk_in_x * nblk_in_y ; perm_blk_size_e = perm_size_e * nblk_in_x * nblk_in_y ; #ifdef CUDA_DBG fprintf( stderr, "%s: do_perm %d perm_size %d blk_size %d ana_size %d perm_blk_size %d\n", __func__, csc.do_permutation, perm_size_e, blk_size_e, ana_size, perm_blk_size_e ) ; #endif do_shift = weight_sft( csc.weight_scheme, perm_size_e, csc.x_block, csc.y_block ) ; blk_size_i = sizeof ( int ) * blk_size_e ; perm_size_i = sizeof ( int ) * perm_size_e ; if (( i = hipMalloc( &din_b1, blk_size_i )) != hipSuccess ) { printf("%s: din_b1 failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += blk_size_i ; if (( i = hipMalloc( &din_b2, blk_size_i )) != hipSuccess ) { printf("%s: din_b2 failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += blk_size_i ; if (( i = hipMalloc( &dperm_ml_lp, perm_size_i )) != hipSuccess ) { printf("%s: dperm_ml_lp failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; for ( k = 0 ; k < CUBE_INFO_CNT ; k++ ) { if (( i = hipMalloc( &cube_info[k].cube_perm, perm_size_i )) != hipSuccess ) { printf("%s: dperm_cube failed %d %d \n", __func__, k, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; } if (( i = hipMalloc( &dperm_ml_rp, perm_size_i )) != hipSuccess ) { printf("%s: dperm_ml_rp failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; if (( i = hipMalloc( &dperm_rp, perm_size_i )) != hipSuccess ) { printf("%s: dperm_rp failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; printf("%s: DEV GOOD == b1 %p b2 %p bsize %d \n" " pR %p psize %d mllp %p mlrp %p total_d_mem %d\n", __func__, din_b1, din_b2, blk_size_i, dperm_rp, perm_size_i, dperm_ml_lp, dperm_ml_rp, total_d_mem ) ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { fprintf( stderr, "%s: dp %p xyz %d %d %d size %d sk %d perm %p\n", __func__, cube_info[i].dp, cube_info[i].x, cube_info[i].y, cube_info[i].z, cube_info[i].size, cube_info[i].sink, cube_info[i].cube_perm ) ; } i = ( int )( log2(( double ) perm_size_e )) ; if ( !permutation_load( i, csc.permdir, dperm_ml_lp, dperm_ml_rp )) { printf("%s: y perm load failed\n", __func__ ) ; return ( 0 ) ; } return ( 1 ) ; } /* allocate_h_mem: allocate the buffers for used in memory. need: frame_x, frame_y, z_block, blk_size_i, y_only */ int allocate_h_mem() { int ysize, uvsize ; // for YUV420 ysize = csc.frame_x * csc.frame_y * csc.z_block ; uvsize = ysize >> 2 ; if ( !strlen(csc.ipcam_string)) { ybufp = ( char * ) malloc ( ysize ) ; } if ( !csc.y_only ) { ubufp = ( char * ) malloc ( uvsize ) ; vbufp = ( char * ) malloc ( uvsize ) ; } else { ubufp = vbufp = NULL ; } if (( !ybufp && ( !strlen(csc.ipcam_string))) || ( !csc.y_only && ( !ubufp || !vbufp ))) { printf("%s: 1 malloc failed \n", __func__ ) ; return ( 0 ) ; } outbufp = ( int * ) malloc ( blk_size_i ) ; if ( outbufp == NULL ) { printf("%s: 2 malloc failed \n", __func__ ) ; return ( 0 ) ; } printf("%s: HOST GOOD ysize %d uv %d ybufp %p\n" " u %p v %p o %p blksize %d\n", __func__, ysize, uvsize, ybufp, ubufp, vbufp, outbufp, blk_size_i ) ; return ( 1 ) ; } int setup_perm_tbls( int *d_bp ) { int i ; unsigned int ran ; RndC_uint32 ran1 ; // assume the folloing // 1. left selection tbls are in cube_info[i].cube_perm // 2. ml seq perm tbl is in perm_ml_lp and perm_ml_rp ; // 3. dperm_rp will be updated ... // right perm first ... // ran = rand() ; randi_RndC( &rnd_state_1, RndC_uint32( perm_size_e ), (size_t)1, &ran1 ) ; ran = ran1 - 1 ; #ifdef CUDA_OBS fprintf( stderr, "%s: right ran %d perm_size %d \n", __func__, ran, perm_size_e ) ; #endif h_do_perm_selection_R ( d_bp, perm_size_e, ran ) ; #ifdef CUDA_OBS dbg_p_d_data_i("right selection", d_bp, perm_size_e) ; #endif h_do_permutation_double ( d_bp, dperm_ml_rp, dperm_rp, perm_size_e ) ; #ifdef CUDA_OBS dbg_p_d_data_i("right merge", dperm_rp, perm_size_e ) ; #endif // now the left randi_RndC( &rnd_state_2, RndC_uint32( perm_size_e ), (unsigned int)1, &ran1 ) ; ran = ran1 - 1 ; #ifdef CUDA_OBS fprintf( stderr, "%s: left ran %d perm_size %d \n", __func__, ran, perm_size_e ) ; #endif for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { h_do_perm_selection_L ( d_bp, perm_size_e, cube_info[i].dp, cube_info[i].size, ran, cube_info[i].sink ) ; h_do_permutation_double ( d_bp, dperm_ml_lp, cube_info[i].cube_perm, perm_size_e ) ; // orig one } return ( 1 ) ; } int get_one_chunk( int fin, char *bp, int size ) { int i ; if (( i = read ( fin, bp, size )) < 0 ) { printf("%s: y read failed errno %d\n", __func__, errno ) ; return ( 0 ) ; } if ( !i ) return ( 2 ) ; if ( i != size ) { printf("%s: read failed i %d\n", __func__, i ) ; return ( 0 ) ; } return ( 1 ) ; } int get_block_input ( int fin ) { int offsety, offsetuv, sizey, sizeuv, i ; int cnt ; offsety = 0 ; offsetuv = 0 ; sizey = csc.frame_x * csc.frame_y ; sizeuv = sizey >> 2 ; #ifdef CUDA_OBS fprintf( stderr, "%s: fin %d x %d y %d zb %d yin %x uin %x vin %x\n", __func__, fin, csc.frame_x, csc.frame_y, csc.z_block, ybufp, ubufp, vbufp ) ; #endif cnt = csc.z_block ; if ( !first_block ) cnt -= csc.overlap_z ; while ( cnt-- ) { offsety = in_block_to * sizey ; i = get_one_chunk( fin, &ybufp[ offsety ], sizey ) ; if ( i != 1 ) return ( i ) ; if ( csc.y_only ) { if ( !csc.do_not_seek ) { if (( i = lseek ( fin, sizeuv << 1, SEEK_CUR )) < 0 ) { printf("%s: failed lseek %d\n", __func__, errno ) ; return ( 0 ) ; } } } else { offsetuv = in_block_to * sizeuv ; i = get_one_chunk( fin, &ubufp[ offsetuv ], sizeuv ) ; if ( i != 1 ) return ( i ) ; i = get_one_chunk( fin, &vbufp[ offsetuv ], sizeuv ) ; if ( i != 1 ) return ( i ) ; } in_block_to++ ; if ( in_block_to == csc.z_block ) in_block_to = 0 ; } #ifdef CUDA_OBS dbg_pdata_c ( "get_block_input: ybuf", ybufp, sizey * z_block ) ; if ( !y_only ) { dbg_pdata_c ( "get_block_input: ubuf", ubufp, sizeuv * z_block ) ; dbg_pdata_c ( "get_block_input: vbuf", vbufp, sizeuv * z_block ) ; } #endif return ( 1 ) ; } int do_measurement ( int fin, int fout ) { int cnt, i, frame_cnt ; double d, ad, aut, ast ; clock_t tut, tst ; struct frame_list *fp ; static RndC_uint32 rand_key1 = 0 ; static RndC_uint32 rand_key2 = 0 ; // d_in_p2 will be used in the fast transform ... // d_in_p has data before blocking ... omp_timer_init( CS_TIMER_COUNT ) ; cs_timer_init( 1 ) ; if ( strlen(csc.ipcam_string)) cs_ipcam_start() ; frame_cnt = 0 ; first_block = 1 ; while ( 1 ) { if ( csc.do_permutation ) { init_RndC( &rnd_state_1, rand_key1 ) ; init_RndC( &rnd_state_2, rand_key2 ) ; setup_perm_tbls( din_b1 ) ; rand_key1++ ; rand_key2++ ; // printf("randkey1 %d randkey2 %d\n", rand_key1, rand_key2 ) ; } if ( csc.do_one-- ) { if (strlen( csc.ipcam_string) == 0 ) { i = get_block_input ( fin ) ; fp = NULL ; } else { #ifdef CUDA_DBG printf("do_measurement: OUT ===\n") ; #endif fp = cs_ipcam_get() ; #ifdef CUDA_DBG printf("do_measurement: IN ===\n") ; #endif ybufp = fp->gbp ; i = 1 ; } } else i = 2 ; // for debug only do once // dbg_pdata_c ( "after get block input", ybufp, frame_x * frame_y * zb ) ; if ( !i ) { printf("%s:failed %d\n", __func__, frame_cnt ) ; return ( 0 ) ; } if ( i == 2 ) { printf("\n%s: frame_cnt %d size %d\n", __func__, frame_cnt, frame_cnt * csc.frame_x * csc.frame_y ) ; printf("=== counters below are based on the block cnt\n") ; for ( i = 0 ; i < CS_TIMER_COUNT ; i++ ) { omp_timer_get ( i, &d, &cnt, &ad ) ; printf("%s ::: %f cnt %d average %f ms\n", timer_name[i], d, cnt, ad ) ; } cs_timer_get ( 0, &tst, &tut, &cnt, &ast, &aut ) ; printf("overall st %d ut %d cnt %d ast %f aut %f == total %f ms\n", tst, tut, cnt, ast, aut, ast + aut ) ; return ( 1 ) ; } memcpy( wcube_info, cube_info, sizeof ( cube_info )) ; h_set_config ( d_cs_xyzp, wcube_info ) ; // do y i = make_one_component( fout, ybufp, csc.frame_x * csc.frame_y * csc.z_block, csc.frame_x, csc.frame_y, csc.x_block, csc.y_block, 0, fp ) ; if ( !i ) { printf("%s:failed frame_cnt %d\n", __func__, frame_cnt ) ; return ( 0 ) ; } if ( !csc.y_only ) { // should revisit din_size_u ... due to the expand // do y if ( !make_one_component( fout, ubufp, csc.frame_x * csc.frame_y * csc.z_block >> 2 , csc.frame_x >> 1, csc.frame_y >> 1, csc.x_block >> 1, csc.y_block >> 1, 1, fp )) { printf("%s:failed i %d frame %d\n", __func__, i, frame_cnt ) ; return ( 0 ) ; } if ( !make_one_component( fout, vbufp, csc.frame_x * csc.frame_y * csc.z_block >> 2, csc.frame_x >> 1, csc.frame_y >> 1, csc.x_block >> 1, csc.y_block >> 1, 1, fp )) { printf("%s:failed i %d frame %d\n", __func__, i, frame_cnt ) ; return ( 0 ) ; } } frame_cnt += ( csc.z_block - csc.overlap_z ) ; first_block = 0 ; } } int * the_other_d_buf ( int *p ) { if ( p == din_b1 ) return ( din_b2 ) ; return ( din_b1 ) ; } /* fout : out file descriptor db_1p : add of input buffer on device dout_a : add of output buffer on device hin_a : add of host buffer for input from file ysize : size of input for this comp in byte // real dimension side dout_size : size of output buffer on device in byte // log2 size din_size : size of input buffer on device in byte // log2 size din_a2 : add of input buffer on device before block // NULL if block is not needed x, y : frame dimension xbdim, ybdim : block x/y dimension blk_dst_size : log2 of x/y/z block size d_out_p2: add of output buffer on device for do_perm fp: frame_list pointer ... for ipcam */ int make_one_component( int fout, char *hin_a, int in_size, int x, int y, int xbdim, int ybdim, int need_interpolate, struct frame_list *fp ) { int *outp, *d_currp, *d_nextp ; char *d_curr_cp ; int orig, hvt_size, overall_size, i, rec_size, j ; int k, frame_size = x * y ; #ifdef CUDA_DBG fprintf( stderr, "%s: inbuf %p in_size %d x/y %d %d blk x/y %d %d\n" "interpo %d\n", __func__, hin_a, in_size, x, y, xbdim, ybdim, need_interpolate ) ; #endif d_currp = din_b1 ; // clear_device_mem_c( d_currp, din_size ) ; d_nextp = din_b2 ; // clear_device_mem_c( d_nextp, din_size ) ; omp_timer_on( CS_TIMER_TOTAL ) ; cs_timer_on( 0 ) ; // copy the frame data ( could be y, u, v ) from host to device // take care of z_overlap d_curr_cp = ( char * ) d_currp ; omp_timer_on( CS_TIMER_MEMCPY_DOWN ) ; if (( i = hipMemcpy( d_currp, hin_a + in_block_to * frame_size, ( csc.z_block - in_block_to ) * frame_size, hipMemcpyHostToDevice)) != hipSuccess ) { printf("%s:download fail: first %d\n", __func__, i ) ; return ( 0 ) ; } if ( in_block_to ) { if (( i = hipMemcpy( d_curr_cp + ( csc.z_block - in_block_to ) * frame_size, hin_a, in_block_to * frame_size, hipMemcpyHostToDevice)) != hipSuccess ) { printf("%s:download fail: second %d\n", __func__, i ) ; return ( 0 ) ; } } omp_timer_off( CS_TIMER_MEMCPY_DOWN ) ; if ( csc.dbg_flag & DBG_CP_DOWN ) dbg_p_d_data_c_mn ( "after memcpy cp in", ( char * )d_currp, in_size, x, y, x ) ; d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on( CS_TIMER_C_TO_I ) ; h_expand_c_to_i (( char * ) d_currp, d_nextp, in_size ) ; omp_timer_off( CS_TIMER_C_TO_I ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_C_2_I ) dbg_p_d_data_i_mn ( "after c to i", d_currp, in_size, x, y, x ) ; // to bring u/v component to same size as y if ( do_interpolate && need_interpolate ) { exit( 34 ) ; // LDL QQQ d_nextp = the_other_d_buf ( d_currp ) ; // might need to adjust for the interpolation, // like blk_size_i ... which is based on 'y' // and used in h_make_block ... LDL omp_timer_on ( CS_TIMER_INTER ) ; if ( !h_make_interpolate ( d_currp, d_nextp, x, y, csc.z_block, INT_YUV420 )) { printf("%s: make_interpolate failed\n", __func__ ) ; return ( 0 ) ; } omp_timer_off ( CS_TIMER_INTER ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_INTER ) dbg_p_d_data_c ( "after interpolate",( char *)d_currp, in_size ) ; xbdim <<= 1 ; ybdim <<= 1 ; x <<= 1 ; y <<= 1 ; in_size *= 4 ; } // expand the data d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on( CS_TIMER_EXPANSION ) ; h_expand_frame ( d_currp, d_nextp, x, y, csc.xadd, csc.yadd, csc.zadd, csc.z_block ) ; omp_timer_off( CS_TIMER_EXPANSION ) ; d_currp = d_nextp ; frame_size = ( x + ( csc.xadd << 1 )) * ( y + ( csc.yadd << 1 )) ; if ( csc.dbg_flag & DBG_EXPAND ) { i = frame_size * ( csc.z_block + csc.zadd ) ; dbg_p_d_data_i_mn ( "after expand", d_currp, i, x + ( csc.xadd << 1 ), y + ( csc.yadd << 1 ), x + ( csc.xadd << 1 )) ; } // do the blocking ... move data/append 0/weight if ( csc.do_block ) { d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on( CS_TIMER_BLOCKING ) ; set_device_mem_i ( d_nextp, perm_blk_size_e, 0 ) ; // this is needed ... // to set the adj_x/adj_y to 0 h_make_block( d_currp, d_nextp, x + ( csc.xadd << 1 ), y + ( csc.yadd << 1 ), frame_size, xbdim, ybdim, csc.z_block, perm_size_e, csc.do_permutation, xbdim - csc.overlap_x, ybdim - csc.overlap_y, nblk_in_x, nblk_in_y, csc.adj_x, csc.adj_y, csc.weight_scheme, do_shift ) ; omp_timer_off( CS_TIMER_BLOCKING ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_BLKING ) dbg_p_d_data_i_mn_skip ( "after blking", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; #ifdef CUDA_OBS dbg_p_d_data_i_mn ( "after blking", do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + adj_x, ybdim + adj_y, xbdim + adj_x ) ; #endif // NOTE: if do_permutation, then take out the first element } // do the R permutation here ... if ( csc.do_permutation ) { d_nextp = the_other_d_buf ( d_currp ) ; #ifdef CUDA_OBS { int *dp, i1 ; dp = outbufp ; for ( i1= 0 ; i1 < perm_size_e ; i1++ ) *dp++ = i1 ; if (( i1 = hipMemcpy( d_currp, outbufp, perm_size_e * sizeof ( int ) , hipMemcpyHostToDevice)) != hipSuccess ) { printf("%s:test cpy fail: %d \n", __func__, i1 ) ; return ( 0 ) ; } dbg_p_d_data_i("R before ", d_currp, perm_size_e) ; } #endif #ifdef CUDA_OBS dbg_p_d_data_i("R before ", d_currp + perm_size_e, perm_size_e) ; #endif omp_timer_on( CS_TIMER_PERMR ) ; // input is din_a ... h_do_permutation_R ( d_currp, d_nextp, dperm_rp, perm_blk_size_e, perm_size_e ) ; omp_timer_off( CS_TIMER_PERMR ) ; d_currp = d_nextp ; #ifdef CUDA_OBS dbg_p_d_data_i("R after ", d_currp + perm_size_e, perm_size_e) ; #endif if ( csc.dbg_flag & DBG_PERM_R ) dbg_p_d_data_i_mn_skip ( "after perm-R", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; } // do the transformation ... if ( csc.dbg_flag & DBG_WHM ) dbg_p_d_data_i_mn_skip ( "before whm", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; omp_timer_on( CS_TIMER_MEA ) ; cs_whm_measurement_b( d_currp, perm_blk_size_e, perm_size_e ) ; omp_timer_off( CS_TIMER_MEA ) ; if ( csc.dbg_flag & DBG_WHM ) dbg_p_d_data_i_mn_skip ( "after whm", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; // do the L permutation here ... if ( csc.do_permutation ) { d_nextp = the_other_d_buf ( d_currp ) ; // input is din_a ... omp_timer_on( CS_TIMER_PERML ) ; h_do_permutation_Lv2 ( d_currp, d_nextp, wcube_info[0].cube_perm, wcube_info[1].cube_perm, wcube_info[2].cube_perm, perm_blk_size_e, perm_size_e, nblk_in_x, nblk_in_y ) ; omp_timer_off( CS_TIMER_PERML ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_PERM_L ) dbg_p_d_data_i_mn_skip ( "after L-perm", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; // dbg_p_d_data_i ( "after l perm", d_currp, perm_blk_size_e ) ; } // copy_vec ... the to-size is the "inner block" size d_nextp = the_other_d_buf ( d_currp ) ; inner_cube_size = wcube_info[0].size ; overall_size = inner_cube_size * nblk_in_x * nblk_in_y ; #ifdef CUDA_DBG set_device_mem_i( d_nextp, perm_blk_size_e, 101 ) ; #endif if ( !( k = h_do_copy_vec ( d_currp, d_nextp, overall_size, perm_size_e, inner_cube_size ))) { printf("%s: copy_vec failed %d i %d \n", __func__, k, i ) ; return ( 0 ) ; } d_currp = d_nextp ; if ( csc.dbg_flag & DBG_COPY_DONE ) { dbg_p_d_data_i_mn_v2 ( "copy after", d_currp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; } blocks_processed++ ; // from here and on ... only the L-selected measurements if ( csc.do_analysis ) { // edge detection omp_timer_on ( CS_TIMER_ANALYSIS ) ; d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on ( CS_TIMER_ANALYSIS_EDGE ) ; h_do_edge_detection_v2 ( d_currp, d_nextp, overall_size, d_cs_xyzp, csc.edge_x, csc.edge_y, nblk_in_x, nblk_in_y, cube_info) ; omp_timer_off ( CS_TIMER_ANALYSIS_EDGE ) ; if ( csc.dbg_flag & DBG_ED ) { dbg_p_d_data_i_mn_v2 ( "edge_v2 orig", d_currp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; dbg_p_d_data_i_mn_v2 ( "edge_v2 get", d_nextp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; } d_currp = d_nextp ; d_nextp = the_other_d_buf ( d_currp ) ; if (!(i = h_do_copy_box_v2 ( d_currp, d_nextp, overall_size, csc.edge_x, csc.edge_y, nblk_in_x, nblk_in_y, d_cs_xyzp, cube_info ))) { printf("%s: copy_box failed i %d size %d \n", __func__, i, overall_size ) ; return ( 0 ) ; } d_currp = d_nextp ; d_nextp = the_other_d_buf ( d_currp ) ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { wcube_info[i].x -= csc.edge_x * 2 ; wcube_info[i].y -= csc.edge_y * 2 ; wcube_info[i].size = wcube_info[i].x * wcube_info[i].y * wcube_info[i].z ; } h_set_config ( d_cs_xyzp, wcube_info ) ; inner_cube_size = wcube_info[0].size ; overall_size = inner_cube_size * nblk_in_x * nblk_in_y ; if ( csc.dbg_flag & DBG_ED ) { dbg_p_d_data_i_mn_v2 ( "edge_v2 after copy", d_currp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; } // motion detection #ifdef CUDA_DBG set_device_mem_i( d_nextp, blk_size_e, 111 ) ; #endif k = h_do_motion_idx_v2 ( d_nextp, blk_size_e, &orig, nblk_in_x, nblk_in_y, wcube_info, csc.md_x, csc.md_y, csc.md_z, &rec_size ) ; #ifdef CUDA_DBG printf("%s: rec_size %d orig %d nblk_in_x/y %d %d \n", __func__, rec_size, orig, nblk_in_x, nblk_in_y ) ; #endif if ( !k ) { printf("%s: motion failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_IDX ) printf("orig idx is %d size %d\n", orig, rec_size ) ; hvt_size = ( csc.md_x * 2 + 1 ) * ( csc.md_y * 2 + 1 ) * ( csc.md_z - 1 ) + 1 ; if ( csc.dbg_flag & DBG_MT_IDX ) dbg_p_d_data_i_mn ( "idx original ", d_nextp, ( rec_size + NUM_OF_HVT_INDEX ) * ( hvt_size * nblk_in_x * nblk_in_y ), rec_size + NUM_OF_HVT_INDEX, hvt_size * nblk_in_x * nblk_in_y, 6 ) ; omp_timer_on ( CS_TIMER_ANALYSIS_MD0 ) ; // step 0 : copy data ... k = h_do_motion_detection_step0_v2 ( d_currp, d_nextp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, csc.md_x * 2, csc.md_y * 2, csc.md_z, d_cs_xyzp, hvt_size, inner_cube_size ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD0 ) ; if ( !k ) { printf("%s: step0 failed", __func__ ) ; return( 0 ) ; } d_currp = d_nextp ; if ( csc.dbg_flag & DBG_MT_STEP0 ) dbg_p_d_data_i_mn ( "motion 0", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ; // step 1 ... yk-y0 for ( k = 0 ; k < CUBE_INFO_CNT ; k++ ) { wcube_info[k].x -= csc.md_x * 2 ; wcube_info[k].y -= csc.md_y * 2 ; wcube_info[k].z -= ( csc.md_z - 1 ) ; wcube_info[k].size = wcube_info[k].x * wcube_info[k].y * wcube_info[k].z ; } h_set_config ( d_cs_xyzp, wcube_info ) ; // d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on ( CS_TIMER_ANALYSIS_MD1 ) ; h_do_l1_norm_step1_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, orig, hvt_size ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD1 ) ; if ( csc.dbg_flag & DBG_MT_STEP1 ) dbg_p_d_data_i_mn ( "motion 1", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ; // step 2 -- do the sum omp_timer_on ( CS_TIMER_ANALYSIS_MD2 ) ; k = h_do_l1_norm_step2_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, wcube_info, d_cs_xyzp, d_host_io ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD2 ) ; if ( !k ) { printf("%s: step2 failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_STEP2 ) dbg_p_d_data_i_mn ( "motion 2", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ; // step 3 -- get 1-|y0-yk|/|y0| omp_timer_on ( CS_TIMER_ANALYSIS_MD3 ) ; k = h_do_l1_norm_step3_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, orig, hvt_size ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD3 ) ; printf("%s: step3 done, k %d outbufp %p\n", __func__, k, outbufp ) ; if ( !k ) { printf("%s: step3 failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_STEP3 ) dbg_p_d_data_i_mn ( "motion 3", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, 6 ) ; // step 4 -- find the winner in each of the blocks omp_timer_on ( CS_TIMER_ANALYSIS_MD4 ) ; k = h_do_l1_norm_step4_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, orig, hvt_size, outbufp, ( csc.md_y * 2 + 1 + 1 ) * csc.md_x ) ; // last param is to inidcate the block which is right after the orig // in time domain and without the vertical/horizontal shifting. omp_timer_off ( CS_TIMER_ANALYSIS_MD4 ) ; printf("%s: step4 done k %d\n", __func__, k ) ; if ( !k ) { printf("%s: step4 failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_STEP4 ) dbg_p_d_data_i_mn ( "motion 4", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, 8 ) ; printf("%s: step4 done\n", __func__ ) ; if ( csc.dbg_flag & DBG_MT_STEP4 ) { dbg_p_d_data_i_mn ( "motion 4 host", d_currp, ( 1 + NUM_OF_HVT_INDEX ) * nblk_in_x * nblk_in_y * 2, ( 1 + NUM_OF_HVT_INDEX ) * 2, nblk_in_x * nblk_in_y, ( 1 + NUM_OF_HVT_INDEX ) * 2) ; dbg_p_data_i_mn ( "motion 4 host", outbufp, ( 1 + NUM_OF_HVT_INDEX ) * nblk_in_x * nblk_in_y * 2, ( 1 + NUM_OF_HVT_INDEX ) * 2, nblk_in_x * nblk_in_y, ( 1 + NUM_OF_HVT_INDEX ) * 2) ; } #ifdef CUDA_DBG dbg_p_data_i_mn ( "return values", outbufp, ( 1 + NUM_OF_HVT_INDEX ) * nblk_in_x * nblk_in_y * 2, ( 1 + NUM_OF_HVT_INDEX ) * 2, nblk_in_x * nblk_in_y, ( 1 + NUM_OF_HVT_INDEX ) * 2) ; #endif omp_timer_off ( CS_TIMER_ANALYSIS ) ; omp_timer_off( CS_TIMER_TOTAL ) ; cs_timer_off( 0 ) ; if ( md_filep ) ma_report_record ( md_filep, outbufp, blocks_processed, csc.x_block, csc.y_block, csc.z_block, nblk_in_x, nblk_in_y, csc.overlap_x, csc.overlap_y, csc.overlap_z, ( csc.weight_scheme == WEIGHT_LINEAR )? 1 : 0, ( csc.weight_scheme == WEIGHT_LINEAR )? 2 : 0 ) ; // need to #define if ( strlen(csc.ipcam_string) ) { cs_ipcam_record ( outbufp, fp->outp ) ; cs_ipcam_put ( fp ) ; } } else { // NOTE: LDL need work here ... since the size will be different. if ( csc.do_swap ) { omp_timer_on( CS_TIMER_SWAP ) ; htonl_device_mem_i( d_currp, blk_size_e ) ; omp_timer_off( CS_TIMER_SWAP ) ; if ( csc.dbg_flag & DBG_SWAP ) dbg_p_d_data_i ( "after swap", d_currp, blk_size_e ) ; } outp = outbufp ; j = blk_size_i ; omp_timer_on( CS_TIMER_MEMCPY_UP ) ; if (( i = hipMemcpy(( char * )outp, d_currp, j, hipMemcpyDeviceToHost)) != hipSuccess ) { printf("make_one_component:upload fail: %d\n", i ) ; return ( 0 ) ; } omp_timer_off( CS_TIMER_MEMCPY_UP ) ; omp_timer_off( CS_TIMER_TOTAL ) ; cs_timer_off( 0 ) ; if ( csc.do_permutation ) { outp++ ; // take out 1st entry j -= sizeof ( int ) ; } // LDL ... prepend block header ... if ( write ( fout, outp, j ) != ( j )) { printf("make_one_component: write failed errno %d\n", errno ) ; return ( 0 ) ; } } return ( 1 ) ; } int cs_config_check( struct cs_config *csp ) { int err = 0 ; if (( csp->adj_x < 0 ) || ( csp->adj_y < 0 )) { fprintf( stderr, "error: adj %d %d \n", csp->adj_x, csp->adj_y ) ; err++ ; } else if ( csp->adj_x || csp->adj_y ) csp->do_block++ ; if (( csp->comp_ratio > 100 ) || ( csp->comp_ratio <= 0 )) { fprintf( stderr, "comp_ratio error %d\n", csp->comp_ratio ) ; err++ ; } if ( csp->do_analysis && ( ( csp->md_x <= 0 ) || ( csp->md_y <= 0 ) || ( csp->md_z <= 0 ))) { fprintf( stderr, "negative motion detection size %d %d %d \n", csp->md_x, csp->md_y, csp->md_z ) ; err++ ; } if ( csp->do_cube && ( ( csp->cubex <= 0 ) || ( csp->cubey <= 0 ) || ( csp->cubez <= 0 ))) { fprintf( stderr, "negative cube size %d %d %d \n", csp->cubex, csp->cubey, csp->cubez ) ; err++ ; } if (( csp->xadd < 0 ) || ( csp->xadd < 0 ) || ( csp->xadd < 0 )) { fprintf( stderr, "negative expansion size %d %d %d \n", csp->xadd, csp->yadd, csp->zadd ) ; err++ ; } if ( csp->do_analysis && ( ( csp->edge_x <= 0 ) || ( csp->edge_y <= 0 ))) { fprintf( stderr, "non positive edge size %d %d \n", csp->edge_x, csp->edge_y ) ; err++ ; } if ( csp->do_display && ( ( csp->disp_th_x < 0 ) || ( csp->disp_th_y < 0 ))) { fprintf( stderr, "negative display threshold %d %d \n", csp->disp_th_x, csp->disp_th_y ) ; err++ ; } if (( csp->frame_x <= 0 ) || ( csp->frame_y <= 0 )) { fprintf( stderr, "frame size error %d %d \n", csp->frame_x, csp->frame_y ) ; err++ ; } if ( csp->do_block && ( ( csp->overlap_x < 0 ) || ( csp->overlap_y < 0 ) || ( csp->overlap_z < 0 ))) { fprintf( stderr, "negative overlap size %d %d %d \n", csp->overlap_x, csp->overlap_y, csp->overlap_z ) ; err++ ; } if (( csp->weight_scheme != WEIGHT_LINEAR ) && ( csp->weight_scheme != NO_WEIGHT )) { fprintf( stderr, "weight scheme err %d\n", csp->weight_scheme ) ; err++ ; } if ( csp->do_block && ( ( csp->x_block <= 0 ) || ( csp->y_block <= 0 ) || ( csp->z_block <= 0 ))) { fprintf( stderr, "non positive block size %d %d %d \n", csp->x_block, csp->y_block, csp->z_block ) ; err++ ; } // if ( csp->do_cube && !csp->do_permutation ) { fprintf( stderr, "do_cube and not do_permutation\n") ; err++ ; } if ( csp->ipcam_string && !csp->y_only ) { fprintf( stderr, "ipcam but not y_only\n") ; err++ ; } if ( csp->do_cube && (( csp->cubex > csp->x_block ) || ( csp->cubey > csp->y_block ) || ( csp->cubez > csp->z_block ))) { fprintf( stderr, "Error: cube/block sizes mismatch\n") ; err++ ; } if ( csp->do_cube && !csp->y_only ) { fprintf( stderr, "do_cube and not y_only\n") ; err++ ; } if ( csp->overlap_z >= csp->z_block ) { fprintf( stderr, "Error: overlap_z %d z_block %d\n", csp->overlap_z, csp->z_block ) ; err++ ; } if ( csc.do_permutation && ( !strlen(csc.permdir))) { fprintf( stderr, "Error: do perm with no perm dir\n") ; err++ ; } if (( csp->frame_x < 0 ) || ( csp->frame_y < 0 ) || ( csp->x_block < 0 ) || ( csp->y_block < 0 ) || ( csp->z_block < 0 ) || ( !strlen(csp->finname) && !strlen(csp->ipcam_string)) || !strlen(csp->foutname) || ( strlen(csp->finname) && strlen(csp->ipcam_string))) { fprintf( stderr, "Error: misc \n") ; err++ ; } if( strlen(csc.md_outputfile)) { if (( md_filep = fopen ( csc.md_outputfile, "w+")) == NULL ) { fprintf(stderr, "Error: openfile %s\n", csc.md_outputfile ) ; err++ ; } } return ( !err ) ; }
d1ba10ace94886564c5e38fb1ee904257429839c.cu
#include <iostream> using namespace std; #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #include <fcntl.h> #include <errno.h> #include <math.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> // rand() from matlab #include "RndC_ifc.h" #include "RndCState.h" #include "cs_dbg.h" #include "cs_helper.h" #include "cs_whm_encode_b.h" #include "cs_header.h" #include "cs_block.h" #include "cs_perm_mlseq.h" #include "cs_expand.h" #include "cs_interpolate.h" #include "cs_perm_selection.h" #include "cs_copy_box.h" #include "cs_edge_detect_v2.h" #include "cs_motion_detect_v2.h" #include "cs_motion_report.h" #include "cs_ipcam.h" #include "cs_config.h" #include "cs_dct.h" #define CUDA_DBG #define DBG_CP_DOWN 0x1 #define DBG_BLKING 0x2 #define DBG_WHM 0x4 #define DBG_PERM_R 0x8 #define DBG_PERM_L 0x10 #define DBG_INTER 0x20 #define DBG_SWAP 0x40 #define DBG_EXPAND 0x80 #define DBG_ANALYSIS 0x100 #define DBG_ED 0x200 #define DBG_MOTION 0x400 #define DBG_L1_NORM 0x800 #define DBG_COPY_DONE 0x1000 #define DBG_MT_IDX 0x2000 #define DBG_MT_STEP0 0x4000 #define DBG_MT_STEP1 0x8000 #define DBG_MT_STEP2 0x10000 #define DBG_MT_STEP3 0x20000 #define DBG_MT_STEP4 0x40000 #define DBG_C_2_I 0x80000 // proto void fix_it ( int fin, int fout ) ; int allocate_d_mem() ; int allocate_h_mem() ; int setup_perm_tbls( char *, char *) ; int do_measurement ( int fin, int fout ) ; int make_one_component( int fout, char *hin_a, int in_size, int x, int y, int xbdim, int ybdim, int need_interpolate, struct frame_list *fp ) ; // misc buffer size in elements and size static int blk_size_e, blk_size_i, perm_size_e, perm_size_i ; static int perm_blk_size_e ; // stat static int total_d_mem = 0 ; // output file FILE *md_filep = NULL ; // host buffers static char *ybufp = NULL, *vbufp = NULL, *ubufp = NULL ; static int *outbufp = NULL ; static int nblk_in_x = 1, nblk_in_y = 1 ; // for expand u/v to the same size as y static int do_interpolate = 1 ; // default is yes // device buffers ... *dperm_lp is one of cube_info[i].cube_perm int *dperm_rp = NULL, *din_b1 = NULL, *din_b2 = NULL ; int *dperm_ml_rp = NULL, *dperm_ml_lp = NULL ; // read the ML file // 0:inner, 1:side, 2:corner static struct cube cube_info[ CUBE_INFO_CNT ] ; static struct cube wcube_info[ CUBE_INFO_CNT ] ; // a working copy from cube_info above static double comp_ratio_f = 100.0 ; static int do_shift = 0 ; // for analysis ... static struct cs_xyz *d_cs_xyzp = NULL ; static int *d_host_io = NULL ; // misc static int inner_cube_size = 0 ; int cs_config_check( struct cs_config *csp ) ; // rand struct RndCState rnd_state_1 ; struct RndCState rnd_state_2 ; static int blocks_processed = 0 ; // total number of frame blocks processed ... static int first_block = 1 ; static int in_block_to = 0 ; // next "empty" block, for overlap in T domain static int *cudadbgp = NULL ; // 256k entry static struct cs_config csc ; enum { CS_TIMER_TOTAL, CS_TIMER_MEMCPY_DOWN, CS_TIMER_C_TO_I, CS_TIMER_EXPANSION, CS_TIMER_INTER, CS_TIMER_BLOCKING, CS_TIMER_PERMR, CS_TIMER_MEA, CS_TIMER_PERML, CS_TIMER_SWAP, CS_TIMER_MEMCPY_UP, CS_TIMER_ANALYSIS, CS_TIMER_ANALYSIS_EDGE, CS_TIMER_ANALYSIS_MD0, CS_TIMER_ANALYSIS_MD1, CS_TIMER_ANALYSIS_MD2, CS_TIMER_ANALYSIS_MD3, CS_TIMER_ANALYSIS_MD4, CS_TIMER_COUNT } ; static const char *timer_name[] = { "timer total", "memcpy to device", "expand c to i", "expansion", "interpolation", "blocking", "perm R", "measurement", "perm L", "swap", "memcpy to host", "analysis", "analysis edge", "analysis md0", "analysis md1", "analysis md2", "analysis md3", "analysis md4", "the end" } ; void pusage( const char *s ) { printf("Usage: %s -f configfilename.json\n", s ) ; } main( int ac, char *av[] ) { int fin, fout ; char opt ; // char opt, *finname = NULL, *foutname = NULL ; char *configfile = NULL ; setbuf( stdout, NULL ) ; setbuf( stderr, NULL ) ; while ((opt = getopt(ac, av, "f:")) != -1) { printf(" opt %c \n", opt ) ; switch (opt) { case 'f' : configfile = optarg ; break ; } } if ( configfile == NULL ) { pusage( av[0] ) ; return ( 1 ) ; } cs_config_init( &csc ) ; if ( !cs_config ( configfile, &csc )) { pusage( av[0] ) ; return ( 2 ) ; } cs_config_p ( &csc ) ; if ( !cs_config_check( &csc )) { pusage( av[0] ) ; return ( 1 ) ; } comp_ratio_f = (( double ) csc.comp_ratio / 100.0 ) ; fprintf( stderr, "x/y (%d, %d) blk x/y/z ( %d, %d, %d) in %s out %s yonly %d " "swap %d\n", csc.frame_x, csc.frame_y, csc.x_block, csc.y_block, csc.z_block, csc.finname, csc.foutname, csc.y_only, csc.do_swap ) ; fprintf( stderr, "adj x/y ( %d, %d ) expand x/y/z ( %d, %d, %d ) \n", csc.adj_x, csc.adj_y, csc.xadd, csc.yadd, csc.zadd ) ; fprintf( stderr, "weight %d dbg %x\n", csc.weight_scheme, csc.dbg_flag ) ; fprintf( stderr, "cube x/y/z %d %d %d comp %f\n", csc.cubex, csc.cubey, csc.cubez, comp_ratio_f ) ; fprintf( stderr, "edge x/y %d %d\n", csc.edge_x, csc.edge_y ) ; if ( csc.overlap_z ) fprintf( stderr, "overlap %d\n", csc.overlap_z ) ; fprintf( stderr, "perm %d %s\n", csc.do_permutation, csc.permdir ) ; if (strlen(csc.finname)) { fin = open( csc.finname, O_RDONLY ) ; if ( fin == -1 ) { printf("file %s does not exist\n", av[1]) ; exit( 1 ) ; } } fout = open( csc.foutname, O_CREAT | O_TRUNC | O_WRONLY, S_IRWXU ) ; if ( fout == -1 ) { printf("file %s open failed %d\n", csc.foutname, errno ) ; exit( 1 ) ; } dbg_init ( 256 * 1024 * 1024 * sizeof ( 4 )) ; if ( !allocate_d_mem()) { printf("%s: d_mem allocation failed\n", __func__ ) ; exit( 1 ) ; } if (( cudadbgp = dbg_d_malloc_i ( 1024 * 256 )) == NULL ) { exit( 1 ) ; } clear_device_mem_i( cudadbgp, 1024 * 256 ) ; if ( !allocate_h_mem()) { printf("%s: h_mem allocation failed\n", __func__ ) ; exit( 1 ) ; } #if __BYTE_ORDER__ == __BIG_ENDIAN opt = CS_CO_BIGENDIAN ; #else opt = 0 ; #endif if ( strlen( csc.ipcam_string)) { if ( !cs_ipcam_init ( csc.z_block, csc.frame_x, csc.frame_y, csc.ipcam_string, nblk_in_x, nblk_in_y, csc.md_x, csc.md_y, csc.disp_th_x, csc.disp_th_y )) { printf("ipcam_init failed\n") ; exit( 1 ) ; } } if ( opt ) // local machine is big endian { if ( csc.do_swap ) opt = 0 ; // back to little endian } else { if ( csc.do_swap ) opt = CS_CO_BIGENDIAN ; } if ( csc.do_permutation ) opt |= ML_PERM ; if ( csc.do_cube ) opt |= DOUBLE_PERM ; fprintf( stderr, "%s: size of header %d\n", __func__, sizeof ( struct cs_header )) ; // + 2 ... 1 is for the center of the edge detection rectangle // the other 1 is for the shift/move to make sense if (( cube_info[0].x <= (( csc.edge_x << 1 ) + 2 )) || ( cube_info[1].x <= (( csc.edge_x << 1 ) + 2 )) || ( cube_info[2].x <= (( csc.edge_x << 1 ) + 2 )) || ( cube_info[0].y <= (( csc.edge_y << 1 ) + 2 )) || ( cube_info[1].y <= (( csc.edge_y << 1 ) + 2 )) || ( cube_info[2].y <= (( csc.edge_y << 1 ) + 2 ))) { fprintf( stderr, "%s: error cube x %d %d %d edge x %d cube y %d %d %d edge y %d\n", __func__, cube_info[0].x, cube_info[1].x, cube_info[2].x, csc.edge_x, cube_info[0].y, cube_info[1].y, cube_info[2].y, csc.edge_y ) ; exit( 23 ) ; } if ( !cs_put_header ( fout, CS_CD_YUV420P, (( csc.y_only )? Y_COMP_ONLY : 0 ) | opt, WALSH_HADAMARD_MATRIX, csc.frame_x, csc.frame_y, csc.x_block, csc.y_block, csc.z_block, cube_info[0].x, cube_info[0].y, cube_info[0].z, cube_info[1].x, cube_info[1].y, cube_info[1].z, cube_info[2].x, cube_info[2].y, cube_info[2].z, csc.overlap_x, csc.overlap_y, csc.overlap_z, csc.xadd, csc.yadd, csc.zadd, csc.adj_x, csc.adj_y, csc.edge_x, csc.edge_y, csc.md_x, csc.md_y, csc.md_z, csc.weight_scheme )) { printf("can't write header\n") ; exit( 1 ) ; } fprintf( stderr, "%s: do_swap %d do_interpolate %d do_permutation %d do_cube %d " "do_comp_ratio %d do_block %d do_analysis %d do_one %d do_not_seek %d \n", __func__, csc.do_swap, csc.do_interpolate, csc.do_permutation, csc.do_cube, csc.do_comp_ratio, csc.do_block, csc.do_analysis, csc.do_one, csc.do_not_seek ) ; if ( md_filep ) ma_report_header ( md_filep, csc.frame_y, csc.frame_x, 0, 1, 2, 1 ) ; if ( !do_measurement( fin, fout )) { printf("do_measurement: failed\n") ; exit( 1 ) ; } close ( fin ) ; close ( fout ) ; if ( md_filep ) fclose ( md_filep ) ; } void do_tst_longlong() { fprintf(stderr, "%s: size of ll %d\n", __func__, sizeof ( long long )) ; h_tst_longlong (( long long *)din_b2, 100 ) ; dbg_p_d_data_ll ("tst long long",( long long *)din_b2, 100 * sizeof ( long long )) ; } // allocate the d mem ... and init the perm tables int allocate_d_mem() { int ana_size, ocube_size = 0, cube_size, *hdp = NULL, nmea, nz, i, j, k, xx, yy, zz, x, y, z ; if ( csc.do_cube ) { if (( k = cudaMalloc( &d_cs_xyzp, sizeof ( *d_cs_xyzp ) * CUBE_INFO_CNT + sizeof ( int ) * 10 )) != cudaSuccess ) { printf("%s: cs_xyzp alloc failed %d\n", __func__, k ) ; return ( 0 ) ; } total_d_mem += sizeof ( *d_cs_xyzp ) * CUBE_INFO_CNT + sizeof ( int ) * 10 ; d_host_io = ( int * )( d_cs_xyzp + CUBE_INFO_CNT ) ; fprintf( stderr, "%s: d_cs_xyzp %p d_host_io %p\n", __func__, d_cs_xyzp, d_host_io ) ; nmea = ( int )(( double )( csc.x_block * csc.y_block * csc.z_block ) * comp_ratio_f ) ; xx = csc.x_block + csc.adj_x ; yy = csc.y_block + csc.adj_y ; zz = csc.z_block + csc.zadd ; ana_size = 0 ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { x = csc.cubex ; y = csc.cubey ; z = csc.cubez ; #ifdef CUDA_DBG printf("%s:i %d mea %d -- xx %d %d %d x %d %d %d\n", __func__, i, nmea, xx, yy, zz, x, y, z ) ; #endif // inside is 1, side is 1/2, corner is 1/4 j = ( int )pow((double)2,(double)i) ; k = nmea / j ; printf("%s: i %d nmea %d k %d j %d comp %f\n", __func__, i, nmea, k, j, comp_ratio_f ) ; // the "+ 2" is to make sure at least the x and y has 2x2 block to cmp if ( h_do_find_perm_size ( xx, yy, zz, &x, &y, &z, csc.z_block, k, ( csc.edge_x + csc.md_x ) * 2 + 2, ( csc.edge_y + csc.md_y ) * 2 + 2) == 0 ) { printf( "%s: cube 1 failed \n", __func__ ) ; return ( 0 ) ; } cube_size = x * y * z * sizeof ( int ) ; if (( k = cudaMalloc( &cube_info[i].dp, cube_size )) != cudaSuccess ) { printf("%s: cube alloc failed %d %d %d \n", __func__, i, cube_size, k ) ; return ( 0 ) ; } total_d_mem += cube_size ; if ( hdp == NULL ) { ocube_size = cube_size ; hdp = ( int * )malloc( cube_size + 10 ) ; if ( hdp == NULL ) { printf("%s: cube host alloc failed %d %d \n", __func__, i, cube_size ) ; return ( 0 ) ; } } cube_info[i].x = x ; cube_info[i].y = y ; cube_info[i].z = z ; cube_info[i].size = x * y * z ; printf("%s: i %d x/y/z %d %d %d\n", __func__, i, x, y, z ); cube_info[i].cube_perm = NULL ; if (( x * y * z * sizeof ( int )) > ocube_size ) { printf("%s: cube size error %d %d %d %d \n", __func__, x, y, z, ocube_size ) ; return ( 0 ) ; } h_do_get_perm_matrix( hdp, xx, yy, zz, x, y, z, &cube_info[i].sink ) ; if (( k = cudaMemcpy( cube_info[i].dp, hdp, cube_size, cudaMemcpyHostToDevice)) != cudaSuccess ) { printf("%s:cube download fail: loop %d %d\n", __func__, i, k ) ; return ( 0 ) ; } x = cube_info[i]. x - ( csc.edge_x << 1 ) - ( csc.md_x << 1 ) ; y = cube_info[i]. y - ( csc.edge_y << 1 ) - ( csc.md_y << 1 ) ; if (( x <= 0 ) || ( y <= 0 )) { printf("%s: i %d x/y %d %d cube %d %d edge %d %d motion detect %d %d \n", __func__, i, x, y, cube_info[i].x, cube_info[i].y, csc.edge_x, csc.edge_y, csc.md_x, csc.md_y ) ; return ( 0 ) ; } k = ( x * y * ( cube_info[i].z - csc.md_z + 1 ) + NUM_OF_HVT_INDEX ) * ((( csc.md_x << 1 ) + 1 ) * (( csc.md_y << 1 ) + 1) * ( csc.md_z - 1 ) + 1 ) ; printf("%s: x %d y %d k %d ana_size %d\n", __func__, x, y, k, ana_size ) ; printf("%s: md_x/y/z %d %d %d cube.z %d\n", __func__, csc.md_x, csc.md_y, csc.md_z, cube_info[i].z ) ; if ( ana_size < k ) ana_size = k ; #ifdef CUDA_OBS dbg_p_d_data_i ( "cube tbl", cube_info[i].dp, cube_info[i].size ) ; #endif } // set the config h_set_config( d_cs_xyzp, cube_info ) ; #ifdef CUDA_DBG dbg_p_d_data_i ( "config", ( int *)d_cs_xyzp, 3 * CUBE_INFO_CNT ) ; #endif printf("%s: ana_size %d\n", __func__, ana_size ) ; free ( hdp ) ; } if ( csc.do_reconstruction ) { if ( !h_do_dct_init()) { printf("%s: h_do_dct_init failed \n", __FILE__) ; return ( 0 ) ; } } x = csc.frame_x + ( csc.xadd << 1 ) ; y = csc.frame_y + ( csc.yadd << 1 ) ; nblk_in_x = ( x - csc.overlap_x ) / ( csc.x_block - csc.overlap_x ) ; j = ( x - csc.overlap_x ) % ( csc.x_block - csc.overlap_x ) ; if ( j ) { fprintf( stderr, "%s: x %d x_block %d overlap_x %d nblk_in_x %d j %d\n", __func__, x, csc.x_block, csc.overlap_x, nblk_in_x , j ) ; return ( 0 ) ; } nblk_in_y = ( y - csc.overlap_y ) / ( csc.y_block - csc.overlap_y ) ; j = ( y - csc.overlap_y ) % ( csc.y_block - csc.overlap_y ) ; if ( j ) { fprintf( stderr, "%s: y %d y_block %d overlap_y %d nblk_in_y %d j %d\n", __func__, y, csc.y_block, csc.overlap_y, nblk_in_y, j ) ; return ( 0 ) ; } if (( nblk_in_x < 2 ) || ( nblk_in_y < 2 )) { fprintf( stderr, "%s: not enough blks nblk_x %d nblk_y %d x/y %d %d ox/y %d %d\n", __func__, nblk_in_x, nblk_in_y, x, y, csc.overlap_x, csc.overlap_y ) ; return ( 0 ) ; } nz = csc.z_block + csc.zadd ; blk_size_e = ( csc.x_block + csc.adj_x ) * ( csc.y_block + csc.adj_y ) * nz ; #ifdef CUDA_DBG fprintf( stderr, "%s: blk_size %d nblk_in_x %d nblk_in_y %d nz %d x_b %d" " y_b %d ax %d ay %d\n", __func__, blk_size_e, nblk_in_x, nblk_in_y, nz, csc.x_block, csc.y_block, csc.adj_x, csc.adj_y ) ; #endif if ( csc.do_permutation ) blk_size_e++ ; perm_size_e = max_log2 ( blk_size_e ) ; if ( csc.do_analysis ) ana_size = ( ana_size > perm_size_e ) ? ana_size : perm_size_e ; else ana_size = perm_size_e ; blk_size_e = ana_size * nblk_in_x * nblk_in_y ; perm_blk_size_e = perm_size_e * nblk_in_x * nblk_in_y ; #ifdef CUDA_DBG fprintf( stderr, "%s: do_perm %d perm_size %d blk_size %d ana_size %d perm_blk_size %d\n", __func__, csc.do_permutation, perm_size_e, blk_size_e, ana_size, perm_blk_size_e ) ; #endif do_shift = weight_sft( csc.weight_scheme, perm_size_e, csc.x_block, csc.y_block ) ; blk_size_i = sizeof ( int ) * blk_size_e ; perm_size_i = sizeof ( int ) * perm_size_e ; if (( i = cudaMalloc( &din_b1, blk_size_i )) != cudaSuccess ) { printf("%s: din_b1 failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += blk_size_i ; if (( i = cudaMalloc( &din_b2, blk_size_i )) != cudaSuccess ) { printf("%s: din_b2 failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += blk_size_i ; if (( i = cudaMalloc( &dperm_ml_lp, perm_size_i )) != cudaSuccess ) { printf("%s: dperm_ml_lp failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; for ( k = 0 ; k < CUBE_INFO_CNT ; k++ ) { if (( i = cudaMalloc( &cube_info[k].cube_perm, perm_size_i )) != cudaSuccess ) { printf("%s: dperm_cube failed %d %d \n", __func__, k, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; } if (( i = cudaMalloc( &dperm_ml_rp, perm_size_i )) != cudaSuccess ) { printf("%s: dperm_ml_rp failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; if (( i = cudaMalloc( &dperm_rp, perm_size_i )) != cudaSuccess ) { printf("%s: dperm_rp failed %d \n", __func__, i ) ; return ( 0 ) ; } total_d_mem += perm_size_i ; printf("%s: DEV GOOD == b1 %p b2 %p bsize %d \n" " pR %p psize %d mllp %p mlrp %p total_d_mem %d\n", __func__, din_b1, din_b2, blk_size_i, dperm_rp, perm_size_i, dperm_ml_lp, dperm_ml_rp, total_d_mem ) ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { fprintf( stderr, "%s: dp %p xyz %d %d %d size %d sk %d perm %p\n", __func__, cube_info[i].dp, cube_info[i].x, cube_info[i].y, cube_info[i].z, cube_info[i].size, cube_info[i].sink, cube_info[i].cube_perm ) ; } i = ( int )( log2(( double ) perm_size_e )) ; if ( !permutation_load( i, csc.permdir, dperm_ml_lp, dperm_ml_rp )) { printf("%s: y perm load failed\n", __func__ ) ; return ( 0 ) ; } return ( 1 ) ; } /* allocate_h_mem: allocate the buffers for used in memory. need: frame_x, frame_y, z_block, blk_size_i, y_only */ int allocate_h_mem() { int ysize, uvsize ; // for YUV420 ysize = csc.frame_x * csc.frame_y * csc.z_block ; uvsize = ysize >> 2 ; if ( !strlen(csc.ipcam_string)) { ybufp = ( char * ) malloc ( ysize ) ; } if ( !csc.y_only ) { ubufp = ( char * ) malloc ( uvsize ) ; vbufp = ( char * ) malloc ( uvsize ) ; } else { ubufp = vbufp = NULL ; } if (( !ybufp && ( !strlen(csc.ipcam_string))) || ( !csc.y_only && ( !ubufp || !vbufp ))) { printf("%s: 1 malloc failed \n", __func__ ) ; return ( 0 ) ; } outbufp = ( int * ) malloc ( blk_size_i ) ; if ( outbufp == NULL ) { printf("%s: 2 malloc failed \n", __func__ ) ; return ( 0 ) ; } printf("%s: HOST GOOD ysize %d uv %d ybufp %p\n" " u %p v %p o %p blksize %d\n", __func__, ysize, uvsize, ybufp, ubufp, vbufp, outbufp, blk_size_i ) ; return ( 1 ) ; } int setup_perm_tbls( int *d_bp ) { int i ; unsigned int ran ; RndC_uint32 ran1 ; // assume the folloing // 1. left selection tbls are in cube_info[i].cube_perm // 2. ml seq perm tbl is in perm_ml_lp and perm_ml_rp ; // 3. dperm_rp will be updated ... // right perm first ... // ran = rand() ; randi_RndC( &rnd_state_1, RndC_uint32( perm_size_e ), (size_t)1, &ran1 ) ; ran = ran1 - 1 ; #ifdef CUDA_OBS fprintf( stderr, "%s: right ran %d perm_size %d \n", __func__, ran, perm_size_e ) ; #endif h_do_perm_selection_R ( d_bp, perm_size_e, ran ) ; #ifdef CUDA_OBS dbg_p_d_data_i("right selection", d_bp, perm_size_e) ; #endif h_do_permutation_double ( d_bp, dperm_ml_rp, dperm_rp, perm_size_e ) ; #ifdef CUDA_OBS dbg_p_d_data_i("right merge", dperm_rp, perm_size_e ) ; #endif // now the left randi_RndC( &rnd_state_2, RndC_uint32( perm_size_e ), (unsigned int)1, &ran1 ) ; ran = ran1 - 1 ; #ifdef CUDA_OBS fprintf( stderr, "%s: left ran %d perm_size %d \n", __func__, ran, perm_size_e ) ; #endif for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { h_do_perm_selection_L ( d_bp, perm_size_e, cube_info[i].dp, cube_info[i].size, ran, cube_info[i].sink ) ; h_do_permutation_double ( d_bp, dperm_ml_lp, cube_info[i].cube_perm, perm_size_e ) ; // orig one } return ( 1 ) ; } int get_one_chunk( int fin, char *bp, int size ) { int i ; if (( i = read ( fin, bp, size )) < 0 ) { printf("%s: y read failed errno %d\n", __func__, errno ) ; return ( 0 ) ; } if ( !i ) return ( 2 ) ; if ( i != size ) { printf("%s: read failed i %d\n", __func__, i ) ; return ( 0 ) ; } return ( 1 ) ; } int get_block_input ( int fin ) { int offsety, offsetuv, sizey, sizeuv, i ; int cnt ; offsety = 0 ; offsetuv = 0 ; sizey = csc.frame_x * csc.frame_y ; sizeuv = sizey >> 2 ; #ifdef CUDA_OBS fprintf( stderr, "%s: fin %d x %d y %d zb %d yin %x uin %x vin %x\n", __func__, fin, csc.frame_x, csc.frame_y, csc.z_block, ybufp, ubufp, vbufp ) ; #endif cnt = csc.z_block ; if ( !first_block ) cnt -= csc.overlap_z ; while ( cnt-- ) { offsety = in_block_to * sizey ; i = get_one_chunk( fin, &ybufp[ offsety ], sizey ) ; if ( i != 1 ) return ( i ) ; if ( csc.y_only ) { if ( !csc.do_not_seek ) { if (( i = lseek ( fin, sizeuv << 1, SEEK_CUR )) < 0 ) { printf("%s: failed lseek %d\n", __func__, errno ) ; return ( 0 ) ; } } } else { offsetuv = in_block_to * sizeuv ; i = get_one_chunk( fin, &ubufp[ offsetuv ], sizeuv ) ; if ( i != 1 ) return ( i ) ; i = get_one_chunk( fin, &vbufp[ offsetuv ], sizeuv ) ; if ( i != 1 ) return ( i ) ; } in_block_to++ ; if ( in_block_to == csc.z_block ) in_block_to = 0 ; } #ifdef CUDA_OBS dbg_pdata_c ( "get_block_input: ybuf", ybufp, sizey * z_block ) ; if ( !y_only ) { dbg_pdata_c ( "get_block_input: ubuf", ubufp, sizeuv * z_block ) ; dbg_pdata_c ( "get_block_input: vbuf", vbufp, sizeuv * z_block ) ; } #endif return ( 1 ) ; } int do_measurement ( int fin, int fout ) { int cnt, i, frame_cnt ; double d, ad, aut, ast ; clock_t tut, tst ; struct frame_list *fp ; static RndC_uint32 rand_key1 = 0 ; static RndC_uint32 rand_key2 = 0 ; // d_in_p2 will be used in the fast transform ... // d_in_p has data before blocking ... omp_timer_init( CS_TIMER_COUNT ) ; cs_timer_init( 1 ) ; if ( strlen(csc.ipcam_string)) cs_ipcam_start() ; frame_cnt = 0 ; first_block = 1 ; while ( 1 ) { if ( csc.do_permutation ) { init_RndC( &rnd_state_1, rand_key1 ) ; init_RndC( &rnd_state_2, rand_key2 ) ; setup_perm_tbls( din_b1 ) ; rand_key1++ ; rand_key2++ ; // printf("randkey1 %d randkey2 %d\n", rand_key1, rand_key2 ) ; } if ( csc.do_one-- ) { if (strlen( csc.ipcam_string) == 0 ) { i = get_block_input ( fin ) ; fp = NULL ; } else { #ifdef CUDA_DBG printf("do_measurement: OUT ===\n") ; #endif fp = cs_ipcam_get() ; #ifdef CUDA_DBG printf("do_measurement: IN ===\n") ; #endif ybufp = fp->gbp ; i = 1 ; } } else i = 2 ; // for debug only do once // dbg_pdata_c ( "after get block input", ybufp, frame_x * frame_y * zb ) ; if ( !i ) { printf("%s:failed %d\n", __func__, frame_cnt ) ; return ( 0 ) ; } if ( i == 2 ) { printf("\n%s: frame_cnt %d size %d\n", __func__, frame_cnt, frame_cnt * csc.frame_x * csc.frame_y ) ; printf("=== counters below are based on the block cnt\n") ; for ( i = 0 ; i < CS_TIMER_COUNT ; i++ ) { omp_timer_get ( i, &d, &cnt, &ad ) ; printf("%s ::: %f cnt %d average %f ms\n", timer_name[i], d, cnt, ad ) ; } cs_timer_get ( 0, &tst, &tut, &cnt, &ast, &aut ) ; printf("overall st %d ut %d cnt %d ast %f aut %f == total %f ms\n", tst, tut, cnt, ast, aut, ast + aut ) ; return ( 1 ) ; } memcpy( wcube_info, cube_info, sizeof ( cube_info )) ; h_set_config ( d_cs_xyzp, wcube_info ) ; // do y i = make_one_component( fout, ybufp, csc.frame_x * csc.frame_y * csc.z_block, csc.frame_x, csc.frame_y, csc.x_block, csc.y_block, 0, fp ) ; if ( !i ) { printf("%s:failed frame_cnt %d\n", __func__, frame_cnt ) ; return ( 0 ) ; } if ( !csc.y_only ) { // should revisit din_size_u ... due to the expand // do y if ( !make_one_component( fout, ubufp, csc.frame_x * csc.frame_y * csc.z_block >> 2 , csc.frame_x >> 1, csc.frame_y >> 1, csc.x_block >> 1, csc.y_block >> 1, 1, fp )) { printf("%s:failed i %d frame %d\n", __func__, i, frame_cnt ) ; return ( 0 ) ; } if ( !make_one_component( fout, vbufp, csc.frame_x * csc.frame_y * csc.z_block >> 2, csc.frame_x >> 1, csc.frame_y >> 1, csc.x_block >> 1, csc.y_block >> 1, 1, fp )) { printf("%s:failed i %d frame %d\n", __func__, i, frame_cnt ) ; return ( 0 ) ; } } frame_cnt += ( csc.z_block - csc.overlap_z ) ; first_block = 0 ; } } int * the_other_d_buf ( int *p ) { if ( p == din_b1 ) return ( din_b2 ) ; return ( din_b1 ) ; } /* fout : out file descriptor db_1p : add of input buffer on device dout_a : add of output buffer on device hin_a : add of host buffer for input from file ysize : size of input for this comp in byte // real dimension side dout_size : size of output buffer on device in byte // log2 size din_size : size of input buffer on device in byte // log2 size din_a2 : add of input buffer on device before block // NULL if block is not needed x, y : frame dimension xbdim, ybdim : block x/y dimension blk_dst_size : log2 of x/y/z block size d_out_p2: add of output buffer on device for do_perm fp: frame_list pointer ... for ipcam */ int make_one_component( int fout, char *hin_a, int in_size, int x, int y, int xbdim, int ybdim, int need_interpolate, struct frame_list *fp ) { int *outp, *d_currp, *d_nextp ; char *d_curr_cp ; int orig, hvt_size, overall_size, i, rec_size, j ; int k, frame_size = x * y ; #ifdef CUDA_DBG fprintf( stderr, "%s: inbuf %p in_size %d x/y %d %d blk x/y %d %d\n" "interpo %d\n", __func__, hin_a, in_size, x, y, xbdim, ybdim, need_interpolate ) ; #endif d_currp = din_b1 ; // clear_device_mem_c( d_currp, din_size ) ; d_nextp = din_b2 ; // clear_device_mem_c( d_nextp, din_size ) ; omp_timer_on( CS_TIMER_TOTAL ) ; cs_timer_on( 0 ) ; // copy the frame data ( could be y, u, v ) from host to device // take care of z_overlap d_curr_cp = ( char * ) d_currp ; omp_timer_on( CS_TIMER_MEMCPY_DOWN ) ; if (( i = cudaMemcpy( d_currp, hin_a + in_block_to * frame_size, ( csc.z_block - in_block_to ) * frame_size, cudaMemcpyHostToDevice)) != cudaSuccess ) { printf("%s:download fail: first %d\n", __func__, i ) ; return ( 0 ) ; } if ( in_block_to ) { if (( i = cudaMemcpy( d_curr_cp + ( csc.z_block - in_block_to ) * frame_size, hin_a, in_block_to * frame_size, cudaMemcpyHostToDevice)) != cudaSuccess ) { printf("%s:download fail: second %d\n", __func__, i ) ; return ( 0 ) ; } } omp_timer_off( CS_TIMER_MEMCPY_DOWN ) ; if ( csc.dbg_flag & DBG_CP_DOWN ) dbg_p_d_data_c_mn ( "after memcpy cp in", ( char * )d_currp, in_size, x, y, x ) ; d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on( CS_TIMER_C_TO_I ) ; h_expand_c_to_i (( char * ) d_currp, d_nextp, in_size ) ; omp_timer_off( CS_TIMER_C_TO_I ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_C_2_I ) dbg_p_d_data_i_mn ( "after c to i", d_currp, in_size, x, y, x ) ; // to bring u/v component to same size as y if ( do_interpolate && need_interpolate ) { exit( 34 ) ; // LDL QQQ d_nextp = the_other_d_buf ( d_currp ) ; // might need to adjust for the interpolation, // like blk_size_i ... which is based on 'y' // and used in h_make_block ... LDL omp_timer_on ( CS_TIMER_INTER ) ; if ( !h_make_interpolate ( d_currp, d_nextp, x, y, csc.z_block, INT_YUV420 )) { printf("%s: make_interpolate failed\n", __func__ ) ; return ( 0 ) ; } omp_timer_off ( CS_TIMER_INTER ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_INTER ) dbg_p_d_data_c ( "after interpolate",( char *)d_currp, in_size ) ; xbdim <<= 1 ; ybdim <<= 1 ; x <<= 1 ; y <<= 1 ; in_size *= 4 ; } // expand the data d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on( CS_TIMER_EXPANSION ) ; h_expand_frame ( d_currp, d_nextp, x, y, csc.xadd, csc.yadd, csc.zadd, csc.z_block ) ; omp_timer_off( CS_TIMER_EXPANSION ) ; d_currp = d_nextp ; frame_size = ( x + ( csc.xadd << 1 )) * ( y + ( csc.yadd << 1 )) ; if ( csc.dbg_flag & DBG_EXPAND ) { i = frame_size * ( csc.z_block + csc.zadd ) ; dbg_p_d_data_i_mn ( "after expand", d_currp, i, x + ( csc.xadd << 1 ), y + ( csc.yadd << 1 ), x + ( csc.xadd << 1 )) ; } // do the blocking ... move data/append 0/weight if ( csc.do_block ) { d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on( CS_TIMER_BLOCKING ) ; set_device_mem_i ( d_nextp, perm_blk_size_e, 0 ) ; // this is needed ... // to set the adj_x/adj_y to 0 h_make_block( d_currp, d_nextp, x + ( csc.xadd << 1 ), y + ( csc.yadd << 1 ), frame_size, xbdim, ybdim, csc.z_block, perm_size_e, csc.do_permutation, xbdim - csc.overlap_x, ybdim - csc.overlap_y, nblk_in_x, nblk_in_y, csc.adj_x, csc.adj_y, csc.weight_scheme, do_shift ) ; omp_timer_off( CS_TIMER_BLOCKING ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_BLKING ) dbg_p_d_data_i_mn_skip ( "after blking", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; #ifdef CUDA_OBS dbg_p_d_data_i_mn ( "after blking", do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + adj_x, ybdim + adj_y, xbdim + adj_x ) ; #endif // NOTE: if do_permutation, then take out the first element } // do the R permutation here ... if ( csc.do_permutation ) { d_nextp = the_other_d_buf ( d_currp ) ; #ifdef CUDA_OBS { int *dp, i1 ; dp = outbufp ; for ( i1= 0 ; i1 < perm_size_e ; i1++ ) *dp++ = i1 ; if (( i1 = cudaMemcpy( d_currp, outbufp, perm_size_e * sizeof ( int ) , cudaMemcpyHostToDevice)) != cudaSuccess ) { printf("%s:test cpy fail: %d \n", __func__, i1 ) ; return ( 0 ) ; } dbg_p_d_data_i("R before ", d_currp, perm_size_e) ; } #endif #ifdef CUDA_OBS dbg_p_d_data_i("R before ", d_currp + perm_size_e, perm_size_e) ; #endif omp_timer_on( CS_TIMER_PERMR ) ; // input is din_a ... h_do_permutation_R ( d_currp, d_nextp, dperm_rp, perm_blk_size_e, perm_size_e ) ; omp_timer_off( CS_TIMER_PERMR ) ; d_currp = d_nextp ; #ifdef CUDA_OBS dbg_p_d_data_i("R after ", d_currp + perm_size_e, perm_size_e) ; #endif if ( csc.dbg_flag & DBG_PERM_R ) dbg_p_d_data_i_mn_skip ( "after perm-R", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; } // do the transformation ... if ( csc.dbg_flag & DBG_WHM ) dbg_p_d_data_i_mn_skip ( "before whm", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; omp_timer_on( CS_TIMER_MEA ) ; cs_whm_measurement_b( d_currp, perm_blk_size_e, perm_size_e ) ; omp_timer_off( CS_TIMER_MEA ) ; if ( csc.dbg_flag & DBG_WHM ) dbg_p_d_data_i_mn_skip ( "after whm", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; // do the L permutation here ... if ( csc.do_permutation ) { d_nextp = the_other_d_buf ( d_currp ) ; // input is din_a ... omp_timer_on( CS_TIMER_PERML ) ; h_do_permutation_Lv2 ( d_currp, d_nextp, wcube_info[0].cube_perm, wcube_info[1].cube_perm, wcube_info[2].cube_perm, perm_blk_size_e, perm_size_e, nblk_in_x, nblk_in_y ) ; omp_timer_off( CS_TIMER_PERML ) ; d_currp = d_nextp ; if ( csc.dbg_flag & DBG_PERM_L ) dbg_p_d_data_i_mn_skip ( "after L-perm", csc.do_permutation ? d_currp + 1 : d_currp, perm_blk_size_e, xbdim + csc.adj_x, ybdim + csc.adj_y, csc.z_block + csc.zadd, xbdim + csc.adj_x, perm_size_e ) ; // dbg_p_d_data_i ( "after l perm", d_currp, perm_blk_size_e ) ; } // copy_vec ... the to-size is the "inner block" size d_nextp = the_other_d_buf ( d_currp ) ; inner_cube_size = wcube_info[0].size ; overall_size = inner_cube_size * nblk_in_x * nblk_in_y ; #ifdef CUDA_DBG set_device_mem_i( d_nextp, perm_blk_size_e, 101 ) ; #endif if ( !( k = h_do_copy_vec ( d_currp, d_nextp, overall_size, perm_size_e, inner_cube_size ))) { printf("%s: copy_vec failed %d i %d \n", __func__, k, i ) ; return ( 0 ) ; } d_currp = d_nextp ; if ( csc.dbg_flag & DBG_COPY_DONE ) { dbg_p_d_data_i_mn_v2 ( "copy after", d_currp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; } blocks_processed++ ; // from here and on ... only the L-selected measurements if ( csc.do_analysis ) { // edge detection omp_timer_on ( CS_TIMER_ANALYSIS ) ; d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on ( CS_TIMER_ANALYSIS_EDGE ) ; h_do_edge_detection_v2 ( d_currp, d_nextp, overall_size, d_cs_xyzp, csc.edge_x, csc.edge_y, nblk_in_x, nblk_in_y, cube_info) ; omp_timer_off ( CS_TIMER_ANALYSIS_EDGE ) ; if ( csc.dbg_flag & DBG_ED ) { dbg_p_d_data_i_mn_v2 ( "edge_v2 orig", d_currp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; dbg_p_d_data_i_mn_v2 ( "edge_v2 get", d_nextp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; } d_currp = d_nextp ; d_nextp = the_other_d_buf ( d_currp ) ; if (!(i = h_do_copy_box_v2 ( d_currp, d_nextp, overall_size, csc.edge_x, csc.edge_y, nblk_in_x, nblk_in_y, d_cs_xyzp, cube_info ))) { printf("%s: copy_box failed i %d size %d \n", __func__, i, overall_size ) ; return ( 0 ) ; } d_currp = d_nextp ; d_nextp = the_other_d_buf ( d_currp ) ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { wcube_info[i].x -= csc.edge_x * 2 ; wcube_info[i].y -= csc.edge_y * 2 ; wcube_info[i].size = wcube_info[i].x * wcube_info[i].y * wcube_info[i].z ; } h_set_config ( d_cs_xyzp, wcube_info ) ; inner_cube_size = wcube_info[0].size ; overall_size = inner_cube_size * nblk_in_x * nblk_in_y ; if ( csc.dbg_flag & DBG_ED ) { dbg_p_d_data_i_mn_v2 ( "edge_v2 after copy", d_currp, overall_size, wcube_info[0].x, wcube_info, nblk_in_x, nblk_in_y ) ; } // motion detection #ifdef CUDA_DBG set_device_mem_i( d_nextp, blk_size_e, 111 ) ; #endif k = h_do_motion_idx_v2 ( d_nextp, blk_size_e, &orig, nblk_in_x, nblk_in_y, wcube_info, csc.md_x, csc.md_y, csc.md_z, &rec_size ) ; #ifdef CUDA_DBG printf("%s: rec_size %d orig %d nblk_in_x/y %d %d \n", __func__, rec_size, orig, nblk_in_x, nblk_in_y ) ; #endif if ( !k ) { printf("%s: motion failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_IDX ) printf("orig idx is %d size %d\n", orig, rec_size ) ; hvt_size = ( csc.md_x * 2 + 1 ) * ( csc.md_y * 2 + 1 ) * ( csc.md_z - 1 ) + 1 ; if ( csc.dbg_flag & DBG_MT_IDX ) dbg_p_d_data_i_mn ( "idx original ", d_nextp, ( rec_size + NUM_OF_HVT_INDEX ) * ( hvt_size * nblk_in_x * nblk_in_y ), rec_size + NUM_OF_HVT_INDEX, hvt_size * nblk_in_x * nblk_in_y, 6 ) ; omp_timer_on ( CS_TIMER_ANALYSIS_MD0 ) ; // step 0 : copy data ... k = h_do_motion_detection_step0_v2 ( d_currp, d_nextp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, csc.md_x * 2, csc.md_y * 2, csc.md_z, d_cs_xyzp, hvt_size, inner_cube_size ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD0 ) ; if ( !k ) { printf("%s: step0 failed", __func__ ) ; return( 0 ) ; } d_currp = d_nextp ; if ( csc.dbg_flag & DBG_MT_STEP0 ) dbg_p_d_data_i_mn ( "motion 0", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ; // step 1 ... yk-y0 for ( k = 0 ; k < CUBE_INFO_CNT ; k++ ) { wcube_info[k].x -= csc.md_x * 2 ; wcube_info[k].y -= csc.md_y * 2 ; wcube_info[k].z -= ( csc.md_z - 1 ) ; wcube_info[k].size = wcube_info[k].x * wcube_info[k].y * wcube_info[k].z ; } h_set_config ( d_cs_xyzp, wcube_info ) ; // d_nextp = the_other_d_buf ( d_currp ) ; omp_timer_on ( CS_TIMER_ANALYSIS_MD1 ) ; h_do_l1_norm_step1_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, orig, hvt_size ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD1 ) ; if ( csc.dbg_flag & DBG_MT_STEP1 ) dbg_p_d_data_i_mn ( "motion 1", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ; // step 2 -- do the sum omp_timer_on ( CS_TIMER_ANALYSIS_MD2 ) ; k = h_do_l1_norm_step2_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, wcube_info, d_cs_xyzp, d_host_io ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD2 ) ; if ( !k ) { printf("%s: step2 failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_STEP2 ) dbg_p_d_data_i_mn ( "motion 2", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, rec_size + NUM_OF_HVT_INDEX ) ; // step 3 -- get 1-|y0-yk|/|y0| omp_timer_on ( CS_TIMER_ANALYSIS_MD3 ) ; k = h_do_l1_norm_step3_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, orig, hvt_size ) ; omp_timer_off ( CS_TIMER_ANALYSIS_MD3 ) ; printf("%s: step3 done, k %d outbufp %p\n", __func__, k, outbufp ) ; if ( !k ) { printf("%s: step3 failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_STEP3 ) dbg_p_d_data_i_mn ( "motion 3", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, 6 ) ; // step 4 -- find the winner in each of the blocks omp_timer_on ( CS_TIMER_ANALYSIS_MD4 ) ; k = h_do_l1_norm_step4_v2( d_currp, rec_size * hvt_size * nblk_in_x * nblk_in_y, rec_size, orig, hvt_size, outbufp, ( csc.md_y * 2 + 1 + 1 ) * csc.md_x ) ; // last param is to inidcate the block which is right after the orig // in time domain and without the vertical/horizontal shifting. omp_timer_off ( CS_TIMER_ANALYSIS_MD4 ) ; printf("%s: step4 done k %d\n", __func__, k ) ; if ( !k ) { printf("%s: step4 failed", __func__ ) ; return( 0 ) ; } if ( csc.dbg_flag & DBG_MT_STEP4 ) dbg_p_d_data_i_mn ( "motion 4", d_currp, ( rec_size + NUM_OF_HVT_INDEX ) * hvt_size * nblk_in_x * nblk_in_y, rec_size + NUM_OF_HVT_INDEX, hvt_size, 8 ) ; printf("%s: step4 done\n", __func__ ) ; if ( csc.dbg_flag & DBG_MT_STEP4 ) { dbg_p_d_data_i_mn ( "motion 4 host", d_currp, ( 1 + NUM_OF_HVT_INDEX ) * nblk_in_x * nblk_in_y * 2, ( 1 + NUM_OF_HVT_INDEX ) * 2, nblk_in_x * nblk_in_y, ( 1 + NUM_OF_HVT_INDEX ) * 2) ; dbg_p_data_i_mn ( "motion 4 host", outbufp, ( 1 + NUM_OF_HVT_INDEX ) * nblk_in_x * nblk_in_y * 2, ( 1 + NUM_OF_HVT_INDEX ) * 2, nblk_in_x * nblk_in_y, ( 1 + NUM_OF_HVT_INDEX ) * 2) ; } #ifdef CUDA_DBG dbg_p_data_i_mn ( "return values", outbufp, ( 1 + NUM_OF_HVT_INDEX ) * nblk_in_x * nblk_in_y * 2, ( 1 + NUM_OF_HVT_INDEX ) * 2, nblk_in_x * nblk_in_y, ( 1 + NUM_OF_HVT_INDEX ) * 2) ; #endif omp_timer_off ( CS_TIMER_ANALYSIS ) ; omp_timer_off( CS_TIMER_TOTAL ) ; cs_timer_off( 0 ) ; if ( md_filep ) ma_report_record ( md_filep, outbufp, blocks_processed, csc.x_block, csc.y_block, csc.z_block, nblk_in_x, nblk_in_y, csc.overlap_x, csc.overlap_y, csc.overlap_z, ( csc.weight_scheme == WEIGHT_LINEAR )? 1 : 0, ( csc.weight_scheme == WEIGHT_LINEAR )? 2 : 0 ) ; // need to #define if ( strlen(csc.ipcam_string) ) { cs_ipcam_record ( outbufp, fp->outp ) ; cs_ipcam_put ( fp ) ; } } else { // NOTE: LDL need work here ... since the size will be different. if ( csc.do_swap ) { omp_timer_on( CS_TIMER_SWAP ) ; htonl_device_mem_i( d_currp, blk_size_e ) ; omp_timer_off( CS_TIMER_SWAP ) ; if ( csc.dbg_flag & DBG_SWAP ) dbg_p_d_data_i ( "after swap", d_currp, blk_size_e ) ; } outp = outbufp ; j = blk_size_i ; omp_timer_on( CS_TIMER_MEMCPY_UP ) ; if (( i = cudaMemcpy(( char * )outp, d_currp, j, cudaMemcpyDeviceToHost)) != cudaSuccess ) { printf("make_one_component:upload fail: %d\n", i ) ; return ( 0 ) ; } omp_timer_off( CS_TIMER_MEMCPY_UP ) ; omp_timer_off( CS_TIMER_TOTAL ) ; cs_timer_off( 0 ) ; if ( csc.do_permutation ) { outp++ ; // take out 1st entry j -= sizeof ( int ) ; } // LDL ... prepend block header ... if ( write ( fout, outp, j ) != ( j )) { printf("make_one_component: write failed errno %d\n", errno ) ; return ( 0 ) ; } } return ( 1 ) ; } int cs_config_check( struct cs_config *csp ) { int err = 0 ; if (( csp->adj_x < 0 ) || ( csp->adj_y < 0 )) { fprintf( stderr, "error: adj %d %d \n", csp->adj_x, csp->adj_y ) ; err++ ; } else if ( csp->adj_x || csp->adj_y ) csp->do_block++ ; if (( csp->comp_ratio > 100 ) || ( csp->comp_ratio <= 0 )) { fprintf( stderr, "comp_ratio error %d\n", csp->comp_ratio ) ; err++ ; } if ( csp->do_analysis && ( ( csp->md_x <= 0 ) || ( csp->md_y <= 0 ) || ( csp->md_z <= 0 ))) { fprintf( stderr, "negative motion detection size %d %d %d \n", csp->md_x, csp->md_y, csp->md_z ) ; err++ ; } if ( csp->do_cube && ( ( csp->cubex <= 0 ) || ( csp->cubey <= 0 ) || ( csp->cubez <= 0 ))) { fprintf( stderr, "negative cube size %d %d %d \n", csp->cubex, csp->cubey, csp->cubez ) ; err++ ; } if (( csp->xadd < 0 ) || ( csp->xadd < 0 ) || ( csp->xadd < 0 )) { fprintf( stderr, "negative expansion size %d %d %d \n", csp->xadd, csp->yadd, csp->zadd ) ; err++ ; } if ( csp->do_analysis && ( ( csp->edge_x <= 0 ) || ( csp->edge_y <= 0 ))) { fprintf( stderr, "non positive edge size %d %d \n", csp->edge_x, csp->edge_y ) ; err++ ; } if ( csp->do_display && ( ( csp->disp_th_x < 0 ) || ( csp->disp_th_y < 0 ))) { fprintf( stderr, "negative display threshold %d %d \n", csp->disp_th_x, csp->disp_th_y ) ; err++ ; } if (( csp->frame_x <= 0 ) || ( csp->frame_y <= 0 )) { fprintf( stderr, "frame size error %d %d \n", csp->frame_x, csp->frame_y ) ; err++ ; } if ( csp->do_block && ( ( csp->overlap_x < 0 ) || ( csp->overlap_y < 0 ) || ( csp->overlap_z < 0 ))) { fprintf( stderr, "negative overlap size %d %d %d \n", csp->overlap_x, csp->overlap_y, csp->overlap_z ) ; err++ ; } if (( csp->weight_scheme != WEIGHT_LINEAR ) && ( csp->weight_scheme != NO_WEIGHT )) { fprintf( stderr, "weight scheme err %d\n", csp->weight_scheme ) ; err++ ; } if ( csp->do_block && ( ( csp->x_block <= 0 ) || ( csp->y_block <= 0 ) || ( csp->z_block <= 0 ))) { fprintf( stderr, "non positive block size %d %d %d \n", csp->x_block, csp->y_block, csp->z_block ) ; err++ ; } // if ( csp->do_cube && !csp->do_permutation ) { fprintf( stderr, "do_cube and not do_permutation\n") ; err++ ; } if ( csp->ipcam_string && !csp->y_only ) { fprintf( stderr, "ipcam but not y_only\n") ; err++ ; } if ( csp->do_cube && (( csp->cubex > csp->x_block ) || ( csp->cubey > csp->y_block ) || ( csp->cubez > csp->z_block ))) { fprintf( stderr, "Error: cube/block sizes mismatch\n") ; err++ ; } if ( csp->do_cube && !csp->y_only ) { fprintf( stderr, "do_cube and not y_only\n") ; err++ ; } if ( csp->overlap_z >= csp->z_block ) { fprintf( stderr, "Error: overlap_z %d z_block %d\n", csp->overlap_z, csp->z_block ) ; err++ ; } if ( csc.do_permutation && ( !strlen(csc.permdir))) { fprintf( stderr, "Error: do perm with no perm dir\n") ; err++ ; } if (( csp->frame_x < 0 ) || ( csp->frame_y < 0 ) || ( csp->x_block < 0 ) || ( csp->y_block < 0 ) || ( csp->z_block < 0 ) || ( !strlen(csp->finname) && !strlen(csp->ipcam_string)) || !strlen(csp->foutname) || ( strlen(csp->finname) && strlen(csp->ipcam_string))) { fprintf( stderr, "Error: misc \n") ; err++ ; } if( strlen(csc.md_outputfile)) { if (( md_filep = fopen ( csc.md_outputfile, "w+")) == NULL ) { fprintf(stderr, "Error: openfile %s\n", csc.md_outputfile ) ; err++ ; } } return ( !err ) ; }
f04066475495a3f23975b172340858059970f0ba.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> // Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid // block effects here. __global__ void KernConvolve(float *data, float *kernels, float *dataOut, int signalLength, int kernelWidth, int numPtsPerBlock) { // first fetch the data for this block. extern __shared__ float sData[] ; float *arrData = (float *) &sData[0] ; float *arrKernel = (float *) &sData[numPtsPerBlock+kernelWidth-1] ; // copy first the data vector. int dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x - kernelWidth/2; int numPtsToCopy = numPtsPerBlock + kernelWidth-1 ; for (int index = threadIdx.x ; index < numPtsToCopy ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { if (dataIndexInSignal < 0 || dataIndexInSignal >= signalLength) arrData[index] = 0 ; else arrData[index] = data[dataIndexInSignal] ; } __syncthreads() ; // copy the kernel next. int dataIndexInKernel = blockIdx.x * kernelWidth + threadIdx.x ; for (int index = threadIdx.x ; index < kernelWidth ; index+= blockDim.x, dataIndexInKernel += blockDim.x) { arrKernel[index] = kernels[dataIndexInKernel] ; } __syncthreads() ; // perform the convolution and write out the result. //output position. dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x ; for (int index = threadIdx.x ; index < numPtsPerBlock && dataIndexInSignal < signalLength ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { float val = 0.0 ; for (int wtIndex = 0 ; wtIndex < kernelWidth ; wtIndex++) { val += arrKernel[wtIndex] * arrData[index+wtIndex] ; } // index of output data point in signal int outIndex = blockIdx.x * signalLength + dataIndexInSignal ; dataOut[outIndex] = val ; } } // Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid // block effects here. __global__ void KernPartialConvolve(float *dPtrSignal1, float *dPtrSignal2, float *dPtrBlockProducts, int signalLength, int kernelWidth, int numPtsPerBlock) { extern __shared__ float sData[] ; float *arrData1 = (float *) &sData[0] ; float *arrData2 = (float *) &sData[numPtsPerBlock] ; // copy first data vector. int dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x ; for (int index = threadIdx.x ; index < numPtsPerBlock && dataIndexInSignal < signalLength ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { arrData1[index] = dPtrSignal1[dataIndexInSignal] ; } __syncthreads() ; // copy second data vector. int numPtsPerBlock2 = numPtsPerBlock + kernelWidth - 1 ; int signalIndex = blockIdx.x * signalLength ; dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x - (kernelWidth-1)/2; for (int index = threadIdx.x ; index < numPtsPerBlock2 && dataIndexInSignal < signalLength ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { if (dataIndexInSignal < 0) arrData2[index] = 0 ; else arrData2[index] = dPtrSignal2[signalIndex+dataIndexInSignal] ; } __syncthreads() ; dataIndexInSignal = blockIdx.y * numPtsPerBlock ; int maxIndex = numPtsPerBlock+kernelWidth-1 ; if (signalLength + (kernelWidth-1)/2-dataIndexInSignal < maxIndex) maxIndex = signalLength + (kernelWidth-1)/2 - dataIndexInSignal ; int dataIndexInBlock = blockIdx.x*gridDim.y*kernelWidth + blockIdx.y ; for (int shift = threadIdx.x ; shift < kernelWidth ; shift+=blockDim.x) { float val = 0.0 ; for (int index = 0 ; index < numPtsPerBlock ; index++) { if (index+shift >= maxIndex) break ; val += arrData1[index]*arrData2[index+shift] ; } dPtrBlockProducts[dataIndexInBlock + (kernelWidth-1-shift)*gridDim.y] = val ; } } // Kernel that sums the results from PartialConvolve. // Each kernel will have dimension of kernelWidth, so kernelWidth sums have to be computed per block. // Each block will handle one dimension of a kernel sum. So the number of blocks is (kernel dimension) x (# of kernels) __global__ void KernPartialConvolveSum(float *dPtrBlockProducts, float *dPtrResults, int kernelWidth, int numPiecesPerKernel, int numKernels) { // Results from partial convolve resulted in numPiecesPerKernel partial sums for // every dimension of a kernel. Here a block has to sum these together. int numPiecesPerThread = numPiecesPerKernel/blockDim.x ; if (blockDim.x*numPiecesPerThread < numPiecesPerKernel) numPiecesPerThread++ ; int startKernelIndex = blockIdx.x * numPiecesPerKernel * kernelWidth ; int startDataIndexForBlock = startKernelIndex + blockIdx.y * numPiecesPerKernel ; int startDataIndexForThread = numPiecesPerThread*threadIdx.x ; extern __shared__ float sData[] ; int numToCopy1 = numPiecesPerThread ; if (startDataIndexForThread + numToCopy1 > numPiecesPerKernel) numToCopy1 = numPiecesPerKernel - startDataIndexForThread ; float val = 0 ; for (int index = 0 ; index < numToCopy1 ; index++) { val += dPtrBlockProducts[startDataIndexForBlock+startDataIndexForThread+index] ; } sData[threadIdx.x] = val ; __syncthreads() ; for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (threadIdx.x < s) sData[threadIdx.x] += sData[threadIdx.x + s]; __syncthreads(); } if (threadIdx.x == 0) { dPtrResults[blockIdx.y+kernelWidth*blockIdx.x] = sData[0] ; } } __global__ void KernAddSignals(float *signals, float *sumSignals, int signalLength, int numSignals, int numPtsPerBlock, int numPtsPerThread) { int startIndex = blockIdx.x * numPtsPerBlock + threadIdx.x * numPtsPerThread ; for (int ptNum = 0 ; ptNum < numPtsPerThread; ptNum++) { // index of data point in signal int index = startIndex + ptNum ; if (index >= signalLength) break ; float val = 0 ; for (int signalNum = 0 ; signalNum < numSignals ; signalNum++) { val += signals[index+signalNum*signalLength] ; } sumSignals[index] = val ; } } // Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid // block effects here. __global__ void KernReverseConvolve(float *signals, float *kernels, float *dataOut, int signalLength, int kernelWidth, int numKernels, int numPtsPerBlock, int numPtsPerThread) { // can probably speed this up well by fetching kernels to local memory or put it in constant memory. int kernelIndex = blockIdx.x ; int signalStartIndex = kernelIndex * signalLength ; int startIndex = blockIdx.y * numPtsPerBlock + threadIdx.x * numPtsPerThread ; for (int ptNum = 0 ; ptNum < numPtsPerThread; ptNum++) { // index of data point in signal int index = startIndex + ptNum ; if (index >= signalLength) break ; float val = 0 ; int startIndexInKernel = kernelIndex*kernelWidth ; for (int wtIndex = 0 ; wtIndex < kernelWidth ; wtIndex++) { if (wtIndex + index >= signalLength + (kernelWidth-1)/2) break ; if (index+wtIndex < (kernelWidth-1)/2) continue ; val += kernels[kernelWidth-1-wtIndex+startIndexInKernel] * signals[signalStartIndex + index + wtIndex - (kernelWidth-1)/2] ; } dataOut[signalStartIndex+index] = val ; } }
f04066475495a3f23975b172340858059970f0ba.cu
#include <cuda.h> // Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid // block effects here. __global__ void KernConvolve(float *data, float *kernels, float *dataOut, int signalLength, int kernelWidth, int numPtsPerBlock) { // first fetch the data for this block. extern __shared__ float sData[] ; float *arrData = (float *) &sData[0] ; float *arrKernel = (float *) &sData[numPtsPerBlock+kernelWidth-1] ; // copy first the data vector. int dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x - kernelWidth/2; int numPtsToCopy = numPtsPerBlock + kernelWidth-1 ; for (int index = threadIdx.x ; index < numPtsToCopy ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { if (dataIndexInSignal < 0 || dataIndexInSignal >= signalLength) arrData[index] = 0 ; else arrData[index] = data[dataIndexInSignal] ; } __syncthreads() ; // copy the kernel next. int dataIndexInKernel = blockIdx.x * kernelWidth + threadIdx.x ; for (int index = threadIdx.x ; index < kernelWidth ; index+= blockDim.x, dataIndexInKernel += blockDim.x) { arrKernel[index] = kernels[dataIndexInKernel] ; } __syncthreads() ; // perform the convolution and write out the result. //output position. dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x ; for (int index = threadIdx.x ; index < numPtsPerBlock && dataIndexInSignal < signalLength ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { float val = 0.0 ; for (int wtIndex = 0 ; wtIndex < kernelWidth ; wtIndex++) { val += arrKernel[wtIndex] * arrData[index+wtIndex] ; } // index of output data point in signal int outIndex = blockIdx.x * signalLength + dataIndexInSignal ; dataOut[outIndex] = val ; } } // Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid // block effects here. __global__ void KernPartialConvolve(float *dPtrSignal1, float *dPtrSignal2, float *dPtrBlockProducts, int signalLength, int kernelWidth, int numPtsPerBlock) { extern __shared__ float sData[] ; float *arrData1 = (float *) &sData[0] ; float *arrData2 = (float *) &sData[numPtsPerBlock] ; // copy first data vector. int dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x ; for (int index = threadIdx.x ; index < numPtsPerBlock && dataIndexInSignal < signalLength ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { arrData1[index] = dPtrSignal1[dataIndexInSignal] ; } __syncthreads() ; // copy second data vector. int numPtsPerBlock2 = numPtsPerBlock + kernelWidth - 1 ; int signalIndex = blockIdx.x * signalLength ; dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x - (kernelWidth-1)/2; for (int index = threadIdx.x ; index < numPtsPerBlock2 && dataIndexInSignal < signalLength ; index+= blockDim.x, dataIndexInSignal += blockDim.x) { if (dataIndexInSignal < 0) arrData2[index] = 0 ; else arrData2[index] = dPtrSignal2[signalIndex+dataIndexInSignal] ; } __syncthreads() ; dataIndexInSignal = blockIdx.y * numPtsPerBlock ; int maxIndex = numPtsPerBlock+kernelWidth-1 ; if (signalLength + (kernelWidth-1)/2-dataIndexInSignal < maxIndex) maxIndex = signalLength + (kernelWidth-1)/2 - dataIndexInSignal ; int dataIndexInBlock = blockIdx.x*gridDim.y*kernelWidth + blockIdx.y ; for (int shift = threadIdx.x ; shift < kernelWidth ; shift+=blockDim.x) { float val = 0.0 ; for (int index = 0 ; index < numPtsPerBlock ; index++) { if (index+shift >= maxIndex) break ; val += arrData1[index]*arrData2[index+shift] ; } dPtrBlockProducts[dataIndexInBlock + (kernelWidth-1-shift)*gridDim.y] = val ; } } // Kernel that sums the results from PartialConvolve. // Each kernel will have dimension of kernelWidth, so kernelWidth sums have to be computed per block. // Each block will handle one dimension of a kernel sum. So the number of blocks is (kernel dimension) x (# of kernels) __global__ void KernPartialConvolveSum(float *dPtrBlockProducts, float *dPtrResults, int kernelWidth, int numPiecesPerKernel, int numKernels) { // Results from partial convolve resulted in numPiecesPerKernel partial sums for // every dimension of a kernel. Here a block has to sum these together. int numPiecesPerThread = numPiecesPerKernel/blockDim.x ; if (blockDim.x*numPiecesPerThread < numPiecesPerKernel) numPiecesPerThread++ ; int startKernelIndex = blockIdx.x * numPiecesPerKernel * kernelWidth ; int startDataIndexForBlock = startKernelIndex + blockIdx.y * numPiecesPerKernel ; int startDataIndexForThread = numPiecesPerThread*threadIdx.x ; extern __shared__ float sData[] ; int numToCopy1 = numPiecesPerThread ; if (startDataIndexForThread + numToCopy1 > numPiecesPerKernel) numToCopy1 = numPiecesPerKernel - startDataIndexForThread ; float val = 0 ; for (int index = 0 ; index < numToCopy1 ; index++) { val += dPtrBlockProducts[startDataIndexForBlock+startDataIndexForThread+index] ; } sData[threadIdx.x] = val ; __syncthreads() ; for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (threadIdx.x < s) sData[threadIdx.x] += sData[threadIdx.x + s]; __syncthreads(); } if (threadIdx.x == 0) { dPtrResults[blockIdx.y+kernelWidth*blockIdx.x] = sData[0] ; } } __global__ void KernAddSignals(float *signals, float *sumSignals, int signalLength, int numSignals, int numPtsPerBlock, int numPtsPerThread) { int startIndex = blockIdx.x * numPtsPerBlock + threadIdx.x * numPtsPerThread ; for (int ptNum = 0 ; ptNum < numPtsPerThread; ptNum++) { // index of data point in signal int index = startIndex + ptNum ; if (index >= signalLength) break ; float val = 0 ; for (int signalNum = 0 ; signalNum < numSignals ; signalNum++) { val += signals[index+signalNum*signalLength] ; } sumSignals[index] = val ; } } // Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid // block effects here. __global__ void KernReverseConvolve(float *signals, float *kernels, float *dataOut, int signalLength, int kernelWidth, int numKernels, int numPtsPerBlock, int numPtsPerThread) { // can probably speed this up well by fetching kernels to local memory or put it in constant memory. int kernelIndex = blockIdx.x ; int signalStartIndex = kernelIndex * signalLength ; int startIndex = blockIdx.y * numPtsPerBlock + threadIdx.x * numPtsPerThread ; for (int ptNum = 0 ; ptNum < numPtsPerThread; ptNum++) { // index of data point in signal int index = startIndex + ptNum ; if (index >= signalLength) break ; float val = 0 ; int startIndexInKernel = kernelIndex*kernelWidth ; for (int wtIndex = 0 ; wtIndex < kernelWidth ; wtIndex++) { if (wtIndex + index >= signalLength + (kernelWidth-1)/2) break ; if (index+wtIndex < (kernelWidth-1)/2) continue ; val += kernels[kernelWidth-1-wtIndex+startIndexInKernel] * signals[signalStartIndex + index + wtIndex - (kernelWidth-1)/2] ; } dataOut[signalStartIndex+index] = val ; } }
5e74e782bf5b562eb2a70220f810c7f984f2737a.hip
// !!! This is a file automatically generated by hipify!!! /* % Author Information: % Hendrik Dirks % Institute for Computational and Applied Mathematics % University of Muenster, Germany % % Contact: [email protected] % % % Version 1.0 % Date: 2015-06-17 % All Rights Reserved % % Permission to use, copy, modify, and distribute this software and its % documentation for any purpose other than its incorporation into a % commercial product is hereby granted without fee, provided that the % above copyright notice appear in all copies and that both that % copyright notice and this permission notice appear in supporting % documentation, and that the name of the author and University of Muenster not be used in % advertising or publicity pertaining to distribution of the software % without specific, written prior permission. */ //uncomment later and compiler directive //#define __HIPCC__ 1 //#define DO_CUDA_CHECK 1 #define IS_MATLAB 1 #include "mex.h" #include "math.h" #include <omp.h> #include <iostream> #include <stdio.h> #include <sys/types.h> #include <string.h> #include <cstddef> #include <ctime> #include "tools.h" #include "operator/flexLinearOperator.h" #include "operator/flexIdentityOperator.h" #include "operator/flexZeroOperator.h" #include "operator/flexDiagonalOperator.h" #include "operator/flexMatrix.h" #include "operator/flexGradientOperator.h" #include "operator/flexSuperpixelOperator.h" #include "operator/flexConcatOperator.h" #include "flexBox.h" #include "term/flexTerm.h" //prox #include "prox/flexProxDualDataL1.h" #include "prox/flexProxDualDataL2.h" #include "prox/flexProxDualDataKL.h" #include "prox/flexProxDualL1Aniso.h" #include "prox/flexProxDualL1Iso.h" #include "prox/flexProxDualL2.h" #include "prox/flexProxDualLInf.h" #include "prox/flexProxDualL2Inf.h" #include "prox/flexProxDualHuber.h" #include "prox/flexProxDualFrobenius.h" #include "prox/flexProxDualBoxConstraint.h" #include "prox/flexProxDualInnerProduct.h" #include "prox/flexProxDualLabeling.h" typedef float floatingType; flexBox<floatingType>* mainObject = NULL; #ifdef __HIPCC__ using namespace thrust; #include "operator/flexMatrixGPU.h" typedef thrust::device_vector<floatingType> vectorData; #else using namespace std; typedef std::vector<floatingType> vectorData; #endif flexLinearOperator<floatingType>* transformMatlabToFlexOperator(mxArray *pointerA, int verbose, int operatorNumber); void copyToVector(std::vector<floatingType> &vector, const double *input, int numElements); bool checkClassType(mxArray *object, const std::string& className); bool checkSparse(mxArray *object); bool checkProx(mxArray *inputClass,const char* proxName); void copyMatlabToFlexmatrix(const mxArray *input, flexMatrix<floatingType> *output); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { bool isGPU = false; #ifdef __HIPCC__ isGPU = true; #endif // read firstRun Variable mxArray *firstRunMX = mxGetProperty(prhs[0], 0, "firstRun"); bool firstRun = static_cast<bool>(mxGetScalar(firstRunMX)); if (firstRun) //(re-)init flexBox { if (mainObject != NULL) delete mainObject; mainObject = new flexBox<floatingType>(); mainObject->isMATLAB = true; mexPrintf("FIRST RUN!!!!!\n"); } else { mexPrintf("ALREADY RUN!!!!!\n"); mainObject->setFirstRun(false); } // read params mxArray *params = mxGetProperty(prhs[0],0,"params"); int numMaxIt = mxGetFieldNumber(params, "maxIt"); if (numMaxIt >= 0) { mainObject->maxIterations = (int)mxGetScalar(mxGetFieldByNumber(params, 0, numMaxIt)); } int numVerbose = mxGetFieldNumber(params, "verbose"); if (numVerbose >= 0) { mainObject->verbose = (int)mxGetScalar(mxGetFieldByNumber(params, 0, numVerbose)); } int numTol = mxGetFieldNumber(params, "tol"); if (numTol >= 0) { mainObject->tol = (float)mxGetScalar(mxGetFieldByNumber(params, 0, numTol)); } int numCheckError = mxGetFieldNumber(params, "checkError"); if (numCheckError >= 0) { mainObject->checkError = (int)mxGetScalar(mxGetFieldByNumber(params, 0, numCheckError)); } int verbose = mainObject->verbose; if (verbose > 0) { printf("Parameters:\n"); printf("maxIterations: %d\n",mainObject->maxIterations); printf("verbose: %d\n",mainObject->verbose); printf("tol: %f\n", mainObject->tol); printf("checkError: %d\n", mainObject->checkError); } mxArray *x = mxGetProperty(prhs[0], 0, "x"); mxArray* y = mxGetProperty(prhs[0], 0, "y"); int numPrimalVars = (int)mxGetN(x)*(int)mxGetM(x); int numberDualVars; if (firstRun) { mxArray *dims = mxGetProperty(prhs[0], 0, "dims"); for (int i = 0; i < numPrimalVars; ++i) { std::vector<int> _dims; double *input_dims = mxGetPr(mxGetCell(dims, i)); int numberOfElementsVector = 1; for (int j = 0; j < mxGetN(mxGetCell(dims, i)) * mxGetM(mxGetCell(dims, i)); ++j) { _dims.push_back((int)input_dims[j]); numberOfElementsVector *= (int)input_dims[j]; } //add primal variable mainObject->addPrimalVar(_dims); //copy matlab variable to c++ variable std::vector<floatingType> tmpVector(numberOfElementsVector, 0.0); copyToVector(tmpVector, mxGetPr(mxGetCell(x, i)), numberOfElementsVector); mainObject->setPrimal(i, tmpVector); } // copy primal terms mxArray *duals = mxGetProperty(prhs[0], 0, "duals"); mxArray *dcp = mxGetProperty(prhs[0], 0, "DcP"); //numbers of primal variables corresponding to dual terms mxArray *dcd = mxGetProperty(prhs[0], 0, "DcD"); //numbers of dual variables corresponding to dual terms int numDualTerms = (int)mxGetN(duals) * (int)mxGetM(duals); for (int i = 0; i < numDualTerms; ++i) { mxArray* classPointer = mxGetCell(duals, i); const char* class_name = mxGetClassName(mxGetCell(duals, i)); //weight float alpha = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "factor")); if (verbose > 1) { mexPrintf("Dual term %i is of type %s with alpha = %f\n", i, mxGetClassName(mxGetCell(duals, i)), alpha); } double *input_correspondingPrimals = mxGetPr(mxGetCell(dcp, i)); std::vector<int> _correspondingPrimals; for (int j = 0; j < mxGetN(mxGetCell(dcp, i)) * mxGetM(mxGetCell(dcp, i)); ++j) { //decrease number by 1 because C++ internal counter starts at 0 _correspondingPrimals.push_back((int)input_correspondingPrimals[j] - 1); if (verbose > 1) { printf("Dual term #%d corresponds to primal var #%d\n", i, (int)input_correspondingPrimals[j] - 1); } } //create list of operators mxArray *matlabOperatorList = mxGetProperty(mxGetCell(duals, i), 0, "operator"); int numberOfOperators = (int)mxGetN(matlabOperatorList) * (int)mxGetM(matlabOperatorList); std::vector<flexLinearOperator<floatingType>*> operatorList; for (int k = 0; k < numberOfOperators; ++k) { int correspondingNumberPrimalVar = k%_correspondingPrimals.size(); mxArray *pointerA = mxGetCell(matlabOperatorList, k); operatorList.push_back(transformMatlabToFlexOperator(pointerA, verbose, k)); } flexProx<floatingType>* myProx; if (checkProx(classPointer, "L1IsoProxDual")) { myProx = new flexProxDualL1Iso<floatingType>(); } else if (checkProx(classPointer, "L1AnisoProxDual")) { myProx = new flexProxDualL1Aniso<floatingType>(); } else if (checkProx(classPointer, "L2proxDual")) { myProx = new flexProxDualL2<floatingType>(); } else if (checkProx(classPointer, "L2InfProxDual")) { myProx = new flexProxDualL2Inf<floatingType>(); } else if (checkProx(classPointer, "LInfProxDual")) { myProx = new flexProxDualLInf<floatingType>(); } else if (checkProx(classPointer, "HuberProxDual")) { float huberEpsilon = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "epsi")); myProx = new flexProxDualHuber<floatingType>(huberEpsilon); } else if (checkProx(classPointer, "FrobeniusProxDual")) { myProx = new flexProxDualFrobenius<floatingType>(); } //data else if (checkProx(classPointer, "L2DataProxDual")) { myProx = new flexProxDualDataL2<floatingType>(); } else if (checkProx(classPointer, "L1DataProxDual")) { myProx = new flexProxDualDataL1<floatingType>(); } else if (checkProx(classPointer, "KLDataProxDual")) { myProx = new flexProxDualDataKL<floatingType>(); } else if (checkProx(classPointer, "constraintBoxDualized")) { float minVal = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "minVal")); float maxVal = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "maxVal")); myProx = new flexProxDualBoxConstraint<floatingType>(minVal, maxVal); } else if (checkProx(classPointer, "innerProductProxDual")) { myProx = new flexProxDualInnerProduct<floatingType>(); } else if (checkProx(classPointer, "labelingProxDual")) { myProx = new flexProxDualLabeling<floatingType>(); } else { mexPrintf("Prox not found"); mexErrMsgTxt("Aborting"); } mxArray* fListInput = mxGetProperty(mxGetCell(duals, i), 0, "f"); int sizeFList = (int)mxGetN(fListInput) * (int)mxGetM(fListInput); std::vector<std::vector<floatingType>> fList; fList.resize(sizeFList); for (int k = 0; k < sizeFList; ++k) { mxArray* fListInputElement = mxGetCell(fListInput, k); //copy elements from matlab to fList vector copyToVector(fList[k], mxGetPr(fListInputElement), (int)mxGetN(fListInputElement) * (int)mxGetM(fListInputElement)); } mainObject->addTerm(new flexTerm<floatingType>(myProx, alpha, (int)_correspondingPrimals.size(), operatorList, fList), _correspondingPrimals); } // copy content for dual vars from MATLAB numberDualVars = mainObject->getNumDualVars(); for (int i = 0; i < numberDualVars; ++i) { mxArray* yElement = mxGetCell(y, i); int numberOfElementsVector = (int)mxGetN(yElement) * (int)mxGetM(yElement); //copy matlab variable to c++ variable std::vector<floatingType> tmpVector(numberOfElementsVector, 0.0); copyToVector(tmpVector, mxGetPr(yElement), numberOfElementsVector); mainObject->setDual(i, tmpVector); } //clean up mxDestroyArray(dims); mxDestroyArray(duals); mxDestroyArray(dcd); mxDestroyArray(dcp); } else numberDualVars = mainObject->getNumDualVars(); //more cleanup mxDestroyArray(params); mxDestroyArray(x); mxDestroyArray(y); mainObject->runAlgorithm(); //send content of primal vars //retrieve results from FlexBox for (int i = 0; i < numPrimalVars; ++i) { std::vector<floatingType> flexResult = mainObject->getPrimal(i); size_t *resultSize = new size_t[2]; resultSize[0] = flexResult.size(); resultSize[1] = 1; plhs[i] = mxCreateNumericArray(2, resultSize, mxDOUBLE_CLASS, mxREAL); double *ptrResult = mxGetPr(plhs[i]); for (int j = 0; j < resultSize[0]; ++j) { ptrResult[j] = flexResult[j]; } delete[] resultSize; } //send content of dual vars //retrieve results from FlexBox for (int i = 0; i < numberDualVars; ++i) { std::vector<floatingType> flexResult = mainObject->getDual(i); size_t *resultSize = new size_t[2]; resultSize[0] = flexResult.size(); resultSize[1] = 1; plhs[numPrimalVars + i] = mxCreateNumericArray(2, resultSize, mxDOUBLE_CLASS, mxREAL); double *ptrResult = mxGetPr(plhs[numPrimalVars+i]); for (int j = 0; j < resultSize[0]; ++j) { ptrResult[j] = flexResult[j]; } delete[] resultSize; } } flexLinearOperator<floatingType>* transformMatlabToFlexOperator(mxArray *pointerA, int verbose, int operatorNumber) { flexLinearOperator<floatingType>*A; bool isGPU = false; #ifdef __HIPCC__ isGPU = true; #endif bool isMinus = false; if (mxGetProperty(pointerA, 0, "isMinus") != NULL) //matrix does not have this property { isMinus = mxGetScalar(mxGetProperty(pointerA, 0, "isMinus")) > 0; } if (verbose > 1) { printf("isMinus is set to %d\n", isMinus); } if (checkClassType(pointerA, std::string("gradientOperator"))) { if (verbose > 1) { printf("Operator %d is type <gradientOperator>\n", operatorNumber); } char *gradientTypeString = mxArrayToString(mxGetProperty(pointerA, 0, "type")); int gradientDirection = static_cast<int>(mxGetScalar(mxGetProperty(pointerA, 0, "gradDirection"))) - 1; //substract one! gradientType gradT = gradientType::forward; if (strcmp(gradientTypeString, "backward") == 0) { gradT = backward; } else if (strcmp(gradientTypeString, "central") == 0) { gradT = central; } auto inputDimensionMatlab = mxGetProperty(pointerA, 0, "inputDimension"); double *inputDimensionMatlabPtr = mxGetPr(inputDimensionMatlab); std::vector<int> tmpDiagonal(mxGetM(inputDimensionMatlab) * mxGetN(inputDimensionMatlab), 0); for (int l = 0; l < mxGetM(inputDimensionMatlab) * mxGetN(inputDimensionMatlab); ++l) { tmpDiagonal[l] = static_cast<int>(inputDimensionMatlabPtr[l]); } A = new flexGradientOperator<floatingType>(tmpDiagonal, gradientDirection, gradT, isMinus); } else if (checkClassType(pointerA, std::string("identityOperator"))) { if (verbose > 1) { printf("Operator %d is type <identityOperator>\n", operatorNumber); } int nPx = static_cast<int>(mxGetScalar(mxGetProperty(pointerA, 0, "nPx"))); A = new flexIdentityOperator<floatingType>(nPx, nPx, isMinus); } else if (checkClassType(pointerA, std::string("zeroOperator"))) { if (verbose > 1) { printf("Operator %d is type <zeroOperator>\n", operatorNumber); } int nPx = static_cast<int>(mxGetScalar(mxGetProperty(pointerA, 0, "nPx"))); A = new flexZeroOperator<floatingType>(nPx, nPx, isMinus); } else if (checkClassType(pointerA, std::string("diagonalOperator"))) { if (verbose > 1) { printf("Operator %d is type <diagonalOperator>\n", operatorNumber); } //copy diagonal vector auto diagElements = mxGetProperty(pointerA, 0, "diagonalElements"); double *tmpDiagonalVector = mxGetPr(diagElements); std::vector<floatingType> tmpDiagonal(mxGetM(diagElements) * mxGetN(diagElements), static_cast<floatingType>(0)); for (int l = 0; l < mxGetM(diagElements) * mxGetN(diagElements); ++l) { tmpDiagonal[l] = static_cast<floatingType>(tmpDiagonalVector[l]); } A = new flexDiagonalOperator<floatingType>(tmpDiagonal, isMinus); } else if (checkClassType(pointerA, std::string("concatOperator"))) { if (verbose > 1) { printf("Operator %d is type <concatOperator>\n", operatorNumber); } std::string operationMatlab = mxArrayToString(mxGetProperty(pointerA, 0, "operation")); mySign operation; if (operationMatlab.compare("composition") == 0) { operation = COMPOSE; } else if (operationMatlab.compare("addition") == 0) { operation = PLUS; } else if (operationMatlab.compare("difference") == 0) { operation = MINUS; } else { mexErrMsgTxt("Unknown operation for concatOperator\n"); } auto operator1 = transformMatlabToFlexOperator(mxGetProperty(pointerA, 0, "A"), verbose, operatorNumber); auto operator2 = transformMatlabToFlexOperator(mxGetProperty(pointerA, 0, "B"), verbose, operatorNumber); A = new flexConcatOperator<floatingType>(operator1, operator2, operation, isMinus); } else if (checkClassType(pointerA, std::string("superpixelOperator")) && isGPU == false) { if (verbose > 1) { printf("Operator %d is type <superpixelOperator>\n", operatorNumber); } float factor = (float)mxGetScalar(mxGetProperty(pointerA, 0, "factor"));// factor that f is being upsized //dimension of data f auto targetDimensionStruct = mxGetProperty(pointerA, 0, "targetDimension"); double *targetDimensionInput = mxGetPr(targetDimensionStruct); int targetDimensionSize = (int)(mxGetN(targetDimensionStruct) * mxGetM(targetDimensionStruct)); std::vector<int> targetDimension(targetDimensionSize, 0); for (int l = 0; l < targetDimensionSize; ++l) { targetDimension[l] = (int)targetDimensionInput[l]; } A = new flexSuperpixelOperator<floatingType>(targetDimension, factor, isMinus); } else if (checkSparse(pointerA) || (checkClassType(pointerA, std::string("superpixelOperator")) && isGPU == true)) { if (verbose > 1) { printf("Operator %d is type <matrix>\n", operatorNumber); } //check if super pixel operator if (checkClassType(pointerA, std::string("superpixelOperator"))) { pointerA = mxGetProperty(pointerA, 0, "matrix"); } #ifdef __HIPCC__ mwIndex *ir, *jc; jc = mxGetJc(pointerA); ir = mxGetIr(pointerA); double* pr = mxGetPr(pointerA); //matlab stores in compressed column format int numCols = mxGetN(pointerA); int* colList = new int[numCols + 1]; for (int l = 0; l <= numCols; ++l) { colList[l] = jc[l]; } int nnz = colList[numCols]; int* rowList = new int[nnz]; float* valList = new float[nnz]; for (int l = 0; l < nnz; ++l) { rowList[l] = ir[l]; valList[l] = pr[l]; } A = new flexMatrixGPU<floatingType>((int)mxGetM(pointerA), (int)mxGetN(pointerA), rowList, colList, valList, false, isMinus); delete[] colList; delete[] rowList; delete[] valList; #else auto Atmp = new flexMatrix<floatingType>(static_cast<int>(mxGetM(pointerA)), static_cast<int>(mxGetN(pointerA)), isMinus); copyMatlabToFlexmatrix(pointerA, Atmp); A = Atmp; #endif } else { mexErrMsgTxt("Operator type not supported!\n"); } return A; } void copyToVector(std::vector<floatingType> &vector,const double *input, int numElements) { //resize target vector vector.resize(numElements); for (int j = 0; j < numElements; ++j) { vector[j] = (floatingType)input[j]; } } bool checkClassType(mxArray *object, const std::string& className) { mxArray *output[1], *input[2]; input[0] = object; input[1] = mxCreateString(className.c_str()); mexCallMATLAB(1, output, 2, input, "isa"); if (mxGetScalar(output[0]) > 0) { return true; } else { return false; } } bool checkSparse(mxArray *object) { mxArray *output[1], *input[1]; input[0] = object; mexCallMATLAB(1, output, 1, input, "issparse"); if (mxGetScalar(output[0]) > 0) { return true; } else { return false; } } bool checkProx(mxArray *inputClass, const char* proxName) { mxArray *output[1], *input[1]; input[0] = inputClass; mexCallMATLAB(1, output, 1, input, "superclasses"); for (int j = 0; j < mxGetN(output[0]) * mxGetM(output[0]); ++j) { const char* class_name = mxArrayToString(mxGetCell(output[0],j)); if (strcmp(class_name, proxName) == 0) { return true; } } return false; } void copyMatlabToFlexmatrix(const mxArray *input, flexMatrix<floatingType> *output) { double *pr; mwIndex *ir, *jc; mwSize col, total = 0; mwIndex starting_row_index, stopping_row_index, current_row_index; mwSize n; std::vector<int> indexI(0, 0); std::vector<int> indexJ(0, 0); std::vector<floatingType> indexVal(0, 0.0f); pr = mxGetPr(input); ir = mxGetIr(input); jc = mxGetJc(input); n = mxGetN(input); for (col = 0; col<n; col++) { starting_row_index = jc[col]; stopping_row_index = jc[col + 1]; if (starting_row_index == stopping_row_index) continue; else { for (current_row_index = starting_row_index; current_row_index < stopping_row_index; current_row_index++) { indexI.push_back(static_cast<int>(ir[current_row_index])); indexJ.push_back(static_cast<int>(col)); indexVal.push_back(static_cast<floatingType>(pr[total])); total++; } } } output->blockInsert(indexI, indexJ, indexVal); }
5e74e782bf5b562eb2a70220f810c7f984f2737a.cu
/* % Author Information: % Hendrik Dirks % Institute for Computational and Applied Mathematics % University of Muenster, Germany % % Contact: [email protected] % % % Version 1.0 % Date: 2015-06-17 % All Rights Reserved % % Permission to use, copy, modify, and distribute this software and its % documentation for any purpose other than its incorporation into a % commercial product is hereby granted without fee, provided that the % above copyright notice appear in all copies and that both that % copyright notice and this permission notice appear in supporting % documentation, and that the name of the author and University of Muenster not be used in % advertising or publicity pertaining to distribution of the software % without specific, written prior permission. */ //uncomment later and compiler directive //#define __CUDACC__ 1 //#define DO_CUDA_CHECK 1 #define IS_MATLAB 1 #include "mex.h" #include "math.h" #include <omp.h> #include <iostream> #include <stdio.h> #include <sys/types.h> #include <string.h> #include <cstddef> #include <ctime> #include "tools.h" #include "operator/flexLinearOperator.h" #include "operator/flexIdentityOperator.h" #include "operator/flexZeroOperator.h" #include "operator/flexDiagonalOperator.h" #include "operator/flexMatrix.h" #include "operator/flexGradientOperator.h" #include "operator/flexSuperpixelOperator.h" #include "operator/flexConcatOperator.h" #include "flexBox.h" #include "term/flexTerm.h" //prox #include "prox/flexProxDualDataL1.h" #include "prox/flexProxDualDataL2.h" #include "prox/flexProxDualDataKL.h" #include "prox/flexProxDualL1Aniso.h" #include "prox/flexProxDualL1Iso.h" #include "prox/flexProxDualL2.h" #include "prox/flexProxDualLInf.h" #include "prox/flexProxDualL2Inf.h" #include "prox/flexProxDualHuber.h" #include "prox/flexProxDualFrobenius.h" #include "prox/flexProxDualBoxConstraint.h" #include "prox/flexProxDualInnerProduct.h" #include "prox/flexProxDualLabeling.h" typedef float floatingType; flexBox<floatingType>* mainObject = NULL; #ifdef __CUDACC__ using namespace thrust; #include "operator/flexMatrixGPU.h" typedef thrust::device_vector<floatingType> vectorData; #else using namespace std; typedef std::vector<floatingType> vectorData; #endif flexLinearOperator<floatingType>* transformMatlabToFlexOperator(mxArray *pointerA, int verbose, int operatorNumber); void copyToVector(std::vector<floatingType> &vector, const double *input, int numElements); bool checkClassType(mxArray *object, const std::string& className); bool checkSparse(mxArray *object); bool checkProx(mxArray *inputClass,const char* proxName); void copyMatlabToFlexmatrix(const mxArray *input, flexMatrix<floatingType> *output); void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { bool isGPU = false; #ifdef __CUDACC__ isGPU = true; #endif // read firstRun Variable mxArray *firstRunMX = mxGetProperty(prhs[0], 0, "firstRun"); bool firstRun = static_cast<bool>(mxGetScalar(firstRunMX)); if (firstRun) //(re-)init flexBox { if (mainObject != NULL) delete mainObject; mainObject = new flexBox<floatingType>(); mainObject->isMATLAB = true; mexPrintf("FIRST RUN!!!!!\n"); } else { mexPrintf("ALREADY RUN!!!!!\n"); mainObject->setFirstRun(false); } // read params mxArray *params = mxGetProperty(prhs[0],0,"params"); int numMaxIt = mxGetFieldNumber(params, "maxIt"); if (numMaxIt >= 0) { mainObject->maxIterations = (int)mxGetScalar(mxGetFieldByNumber(params, 0, numMaxIt)); } int numVerbose = mxGetFieldNumber(params, "verbose"); if (numVerbose >= 0) { mainObject->verbose = (int)mxGetScalar(mxGetFieldByNumber(params, 0, numVerbose)); } int numTol = mxGetFieldNumber(params, "tol"); if (numTol >= 0) { mainObject->tol = (float)mxGetScalar(mxGetFieldByNumber(params, 0, numTol)); } int numCheckError = mxGetFieldNumber(params, "checkError"); if (numCheckError >= 0) { mainObject->checkError = (int)mxGetScalar(mxGetFieldByNumber(params, 0, numCheckError)); } int verbose = mainObject->verbose; if (verbose > 0) { printf("Parameters:\n"); printf("maxIterations: %d\n",mainObject->maxIterations); printf("verbose: %d\n",mainObject->verbose); printf("tol: %f\n", mainObject->tol); printf("checkError: %d\n", mainObject->checkError); } mxArray *x = mxGetProperty(prhs[0], 0, "x"); mxArray* y = mxGetProperty(prhs[0], 0, "y"); int numPrimalVars = (int)mxGetN(x)*(int)mxGetM(x); int numberDualVars; if (firstRun) { mxArray *dims = mxGetProperty(prhs[0], 0, "dims"); for (int i = 0; i < numPrimalVars; ++i) { std::vector<int> _dims; double *input_dims = mxGetPr(mxGetCell(dims, i)); int numberOfElementsVector = 1; for (int j = 0; j < mxGetN(mxGetCell(dims, i)) * mxGetM(mxGetCell(dims, i)); ++j) { _dims.push_back((int)input_dims[j]); numberOfElementsVector *= (int)input_dims[j]; } //add primal variable mainObject->addPrimalVar(_dims); //copy matlab variable to c++ variable std::vector<floatingType> tmpVector(numberOfElementsVector, 0.0); copyToVector(tmpVector, mxGetPr(mxGetCell(x, i)), numberOfElementsVector); mainObject->setPrimal(i, tmpVector); } // copy primal terms mxArray *duals = mxGetProperty(prhs[0], 0, "duals"); mxArray *dcp = mxGetProperty(prhs[0], 0, "DcP"); //numbers of primal variables corresponding to dual terms mxArray *dcd = mxGetProperty(prhs[0], 0, "DcD"); //numbers of dual variables corresponding to dual terms int numDualTerms = (int)mxGetN(duals) * (int)mxGetM(duals); for (int i = 0; i < numDualTerms; ++i) { mxArray* classPointer = mxGetCell(duals, i); const char* class_name = mxGetClassName(mxGetCell(duals, i)); //weight float alpha = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "factor")); if (verbose > 1) { mexPrintf("Dual term %i is of type %s with alpha = %f\n", i, mxGetClassName(mxGetCell(duals, i)), alpha); } double *input_correspondingPrimals = mxGetPr(mxGetCell(dcp, i)); std::vector<int> _correspondingPrimals; for (int j = 0; j < mxGetN(mxGetCell(dcp, i)) * mxGetM(mxGetCell(dcp, i)); ++j) { //decrease number by 1 because C++ internal counter starts at 0 _correspondingPrimals.push_back((int)input_correspondingPrimals[j] - 1); if (verbose > 1) { printf("Dual term #%d corresponds to primal var #%d\n", i, (int)input_correspondingPrimals[j] - 1); } } //create list of operators mxArray *matlabOperatorList = mxGetProperty(mxGetCell(duals, i), 0, "operator"); int numberOfOperators = (int)mxGetN(matlabOperatorList) * (int)mxGetM(matlabOperatorList); std::vector<flexLinearOperator<floatingType>*> operatorList; for (int k = 0; k < numberOfOperators; ++k) { int correspondingNumberPrimalVar = k%_correspondingPrimals.size(); mxArray *pointerA = mxGetCell(matlabOperatorList, k); operatorList.push_back(transformMatlabToFlexOperator(pointerA, verbose, k)); } flexProx<floatingType>* myProx; if (checkProx(classPointer, "L1IsoProxDual")) { myProx = new flexProxDualL1Iso<floatingType>(); } else if (checkProx(classPointer, "L1AnisoProxDual")) { myProx = new flexProxDualL1Aniso<floatingType>(); } else if (checkProx(classPointer, "L2proxDual")) { myProx = new flexProxDualL2<floatingType>(); } else if (checkProx(classPointer, "L2InfProxDual")) { myProx = new flexProxDualL2Inf<floatingType>(); } else if (checkProx(classPointer, "LInfProxDual")) { myProx = new flexProxDualLInf<floatingType>(); } else if (checkProx(classPointer, "HuberProxDual")) { float huberEpsilon = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "epsi")); myProx = new flexProxDualHuber<floatingType>(huberEpsilon); } else if (checkProx(classPointer, "FrobeniusProxDual")) { myProx = new flexProxDualFrobenius<floatingType>(); } //data else if (checkProx(classPointer, "L2DataProxDual")) { myProx = new flexProxDualDataL2<floatingType>(); } else if (checkProx(classPointer, "L1DataProxDual")) { myProx = new flexProxDualDataL1<floatingType>(); } else if (checkProx(classPointer, "KLDataProxDual")) { myProx = new flexProxDualDataKL<floatingType>(); } else if (checkProx(classPointer, "constraintBoxDualized")) { float minVal = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "minVal")); float maxVal = (float)mxGetScalar(mxGetProperty(mxGetCell(duals, i), 0, "maxVal")); myProx = new flexProxDualBoxConstraint<floatingType>(minVal, maxVal); } else if (checkProx(classPointer, "innerProductProxDual")) { myProx = new flexProxDualInnerProduct<floatingType>(); } else if (checkProx(classPointer, "labelingProxDual")) { myProx = new flexProxDualLabeling<floatingType>(); } else { mexPrintf("Prox not found"); mexErrMsgTxt("Aborting"); } mxArray* fListInput = mxGetProperty(mxGetCell(duals, i), 0, "f"); int sizeFList = (int)mxGetN(fListInput) * (int)mxGetM(fListInput); std::vector<std::vector<floatingType>> fList; fList.resize(sizeFList); for (int k = 0; k < sizeFList; ++k) { mxArray* fListInputElement = mxGetCell(fListInput, k); //copy elements from matlab to fList vector copyToVector(fList[k], mxGetPr(fListInputElement), (int)mxGetN(fListInputElement) * (int)mxGetM(fListInputElement)); } mainObject->addTerm(new flexTerm<floatingType>(myProx, alpha, (int)_correspondingPrimals.size(), operatorList, fList), _correspondingPrimals); } // copy content for dual vars from MATLAB numberDualVars = mainObject->getNumDualVars(); for (int i = 0; i < numberDualVars; ++i) { mxArray* yElement = mxGetCell(y, i); int numberOfElementsVector = (int)mxGetN(yElement) * (int)mxGetM(yElement); //copy matlab variable to c++ variable std::vector<floatingType> tmpVector(numberOfElementsVector, 0.0); copyToVector(tmpVector, mxGetPr(yElement), numberOfElementsVector); mainObject->setDual(i, tmpVector); } //clean up mxDestroyArray(dims); mxDestroyArray(duals); mxDestroyArray(dcd); mxDestroyArray(dcp); } else numberDualVars = mainObject->getNumDualVars(); //more cleanup mxDestroyArray(params); mxDestroyArray(x); mxDestroyArray(y); mainObject->runAlgorithm(); //send content of primal vars //retrieve results from FlexBox for (int i = 0; i < numPrimalVars; ++i) { std::vector<floatingType> flexResult = mainObject->getPrimal(i); size_t *resultSize = new size_t[2]; resultSize[0] = flexResult.size(); resultSize[1] = 1; plhs[i] = mxCreateNumericArray(2, resultSize, mxDOUBLE_CLASS, mxREAL); double *ptrResult = mxGetPr(plhs[i]); for (int j = 0; j < resultSize[0]; ++j) { ptrResult[j] = flexResult[j]; } delete[] resultSize; } //send content of dual vars //retrieve results from FlexBox for (int i = 0; i < numberDualVars; ++i) { std::vector<floatingType> flexResult = mainObject->getDual(i); size_t *resultSize = new size_t[2]; resultSize[0] = flexResult.size(); resultSize[1] = 1; plhs[numPrimalVars + i] = mxCreateNumericArray(2, resultSize, mxDOUBLE_CLASS, mxREAL); double *ptrResult = mxGetPr(plhs[numPrimalVars+i]); for (int j = 0; j < resultSize[0]; ++j) { ptrResult[j] = flexResult[j]; } delete[] resultSize; } } flexLinearOperator<floatingType>* transformMatlabToFlexOperator(mxArray *pointerA, int verbose, int operatorNumber) { flexLinearOperator<floatingType>*A; bool isGPU = false; #ifdef __CUDACC__ isGPU = true; #endif bool isMinus = false; if (mxGetProperty(pointerA, 0, "isMinus") != NULL) //matrix does not have this property { isMinus = mxGetScalar(mxGetProperty(pointerA, 0, "isMinus")) > 0; } if (verbose > 1) { printf("isMinus is set to %d\n", isMinus); } if (checkClassType(pointerA, std::string("gradientOperator"))) { if (verbose > 1) { printf("Operator %d is type <gradientOperator>\n", operatorNumber); } char *gradientTypeString = mxArrayToString(mxGetProperty(pointerA, 0, "type")); int gradientDirection = static_cast<int>(mxGetScalar(mxGetProperty(pointerA, 0, "gradDirection"))) - 1; //substract one! gradientType gradT = gradientType::forward; if (strcmp(gradientTypeString, "backward") == 0) { gradT = backward; } else if (strcmp(gradientTypeString, "central") == 0) { gradT = central; } auto inputDimensionMatlab = mxGetProperty(pointerA, 0, "inputDimension"); double *inputDimensionMatlabPtr = mxGetPr(inputDimensionMatlab); std::vector<int> tmpDiagonal(mxGetM(inputDimensionMatlab) * mxGetN(inputDimensionMatlab), 0); for (int l = 0; l < mxGetM(inputDimensionMatlab) * mxGetN(inputDimensionMatlab); ++l) { tmpDiagonal[l] = static_cast<int>(inputDimensionMatlabPtr[l]); } A = new flexGradientOperator<floatingType>(tmpDiagonal, gradientDirection, gradT, isMinus); } else if (checkClassType(pointerA, std::string("identityOperator"))) { if (verbose > 1) { printf("Operator %d is type <identityOperator>\n", operatorNumber); } int nPx = static_cast<int>(mxGetScalar(mxGetProperty(pointerA, 0, "nPx"))); A = new flexIdentityOperator<floatingType>(nPx, nPx, isMinus); } else if (checkClassType(pointerA, std::string("zeroOperator"))) { if (verbose > 1) { printf("Operator %d is type <zeroOperator>\n", operatorNumber); } int nPx = static_cast<int>(mxGetScalar(mxGetProperty(pointerA, 0, "nPx"))); A = new flexZeroOperator<floatingType>(nPx, nPx, isMinus); } else if (checkClassType(pointerA, std::string("diagonalOperator"))) { if (verbose > 1) { printf("Operator %d is type <diagonalOperator>\n", operatorNumber); } //copy diagonal vector auto diagElements = mxGetProperty(pointerA, 0, "diagonalElements"); double *tmpDiagonalVector = mxGetPr(diagElements); std::vector<floatingType> tmpDiagonal(mxGetM(diagElements) * mxGetN(diagElements), static_cast<floatingType>(0)); for (int l = 0; l < mxGetM(diagElements) * mxGetN(diagElements); ++l) { tmpDiagonal[l] = static_cast<floatingType>(tmpDiagonalVector[l]); } A = new flexDiagonalOperator<floatingType>(tmpDiagonal, isMinus); } else if (checkClassType(pointerA, std::string("concatOperator"))) { if (verbose > 1) { printf("Operator %d is type <concatOperator>\n", operatorNumber); } std::string operationMatlab = mxArrayToString(mxGetProperty(pointerA, 0, "operation")); mySign operation; if (operationMatlab.compare("composition") == 0) { operation = COMPOSE; } else if (operationMatlab.compare("addition") == 0) { operation = PLUS; } else if (operationMatlab.compare("difference") == 0) { operation = MINUS; } else { mexErrMsgTxt("Unknown operation for concatOperator\n"); } auto operator1 = transformMatlabToFlexOperator(mxGetProperty(pointerA, 0, "A"), verbose, operatorNumber); auto operator2 = transformMatlabToFlexOperator(mxGetProperty(pointerA, 0, "B"), verbose, operatorNumber); A = new flexConcatOperator<floatingType>(operator1, operator2, operation, isMinus); } else if (checkClassType(pointerA, std::string("superpixelOperator")) && isGPU == false) { if (verbose > 1) { printf("Operator %d is type <superpixelOperator>\n", operatorNumber); } float factor = (float)mxGetScalar(mxGetProperty(pointerA, 0, "factor"));// factor that f is being upsized //dimension of data f auto targetDimensionStruct = mxGetProperty(pointerA, 0, "targetDimension"); double *targetDimensionInput = mxGetPr(targetDimensionStruct); int targetDimensionSize = (int)(mxGetN(targetDimensionStruct) * mxGetM(targetDimensionStruct)); std::vector<int> targetDimension(targetDimensionSize, 0); for (int l = 0; l < targetDimensionSize; ++l) { targetDimension[l] = (int)targetDimensionInput[l]; } A = new flexSuperpixelOperator<floatingType>(targetDimension, factor, isMinus); } else if (checkSparse(pointerA) || (checkClassType(pointerA, std::string("superpixelOperator")) && isGPU == true)) { if (verbose > 1) { printf("Operator %d is type <matrix>\n", operatorNumber); } //check if super pixel operator if (checkClassType(pointerA, std::string("superpixelOperator"))) { pointerA = mxGetProperty(pointerA, 0, "matrix"); } #ifdef __CUDACC__ mwIndex *ir, *jc; jc = mxGetJc(pointerA); ir = mxGetIr(pointerA); double* pr = mxGetPr(pointerA); //matlab stores in compressed column format int numCols = mxGetN(pointerA); int* colList = new int[numCols + 1]; for (int l = 0; l <= numCols; ++l) { colList[l] = jc[l]; } int nnz = colList[numCols]; int* rowList = new int[nnz]; float* valList = new float[nnz]; for (int l = 0; l < nnz; ++l) { rowList[l] = ir[l]; valList[l] = pr[l]; } A = new flexMatrixGPU<floatingType>((int)mxGetM(pointerA), (int)mxGetN(pointerA), rowList, colList, valList, false, isMinus); delete[] colList; delete[] rowList; delete[] valList; #else auto Atmp = new flexMatrix<floatingType>(static_cast<int>(mxGetM(pointerA)), static_cast<int>(mxGetN(pointerA)), isMinus); copyMatlabToFlexmatrix(pointerA, Atmp); A = Atmp; #endif } else { mexErrMsgTxt("Operator type not supported!\n"); } return A; } void copyToVector(std::vector<floatingType> &vector,const double *input, int numElements) { //resize target vector vector.resize(numElements); for (int j = 0; j < numElements; ++j) { vector[j] = (floatingType)input[j]; } } bool checkClassType(mxArray *object, const std::string& className) { mxArray *output[1], *input[2]; input[0] = object; input[1] = mxCreateString(className.c_str()); mexCallMATLAB(1, output, 2, input, "isa"); if (mxGetScalar(output[0]) > 0) { return true; } else { return false; } } bool checkSparse(mxArray *object) { mxArray *output[1], *input[1]; input[0] = object; mexCallMATLAB(1, output, 1, input, "issparse"); if (mxGetScalar(output[0]) > 0) { return true; } else { return false; } } bool checkProx(mxArray *inputClass, const char* proxName) { mxArray *output[1], *input[1]; input[0] = inputClass; mexCallMATLAB(1, output, 1, input, "superclasses"); for (int j = 0; j < mxGetN(output[0]) * mxGetM(output[0]); ++j) { const char* class_name = mxArrayToString(mxGetCell(output[0],j)); if (strcmp(class_name, proxName) == 0) { return true; } } return false; } void copyMatlabToFlexmatrix(const mxArray *input, flexMatrix<floatingType> *output) { double *pr; mwIndex *ir, *jc; mwSize col, total = 0; mwIndex starting_row_index, stopping_row_index, current_row_index; mwSize n; std::vector<int> indexI(0, 0); std::vector<int> indexJ(0, 0); std::vector<floatingType> indexVal(0, 0.0f); pr = mxGetPr(input); ir = mxGetIr(input); jc = mxGetJc(input); n = mxGetN(input); for (col = 0; col<n; col++) { starting_row_index = jc[col]; stopping_row_index = jc[col + 1]; if (starting_row_index == stopping_row_index) continue; else { for (current_row_index = starting_row_index; current_row_index < stopping_row_index; current_row_index++) { indexI.push_back(static_cast<int>(ir[current_row_index])); indexJ.push_back(static_cast<int>(col)); indexVal.push_back(static_cast<floatingType>(pr[total])); total++; } } } output->blockInsert(indexI, indexJ, indexVal); }
62b9d8d605352f2def290bd4e030102dfbfd1514.hip
// !!! This is a file automatically generated by hipify!!! #include "operators.h" #include <thrust/iterator/permutation_iterator.h> #include <thrust/set_operations.h> struct is_even { __host__ __device__ bool operator()(const int &x) { return (x % 2) == 0; } }; using namespace mgpu; using namespace std; using namespace thrust::placeholders; size_t int_size = sizeof(int_type); size_t float_size = sizeof(float_type); queue<string> namevars; queue<string> typevars; queue<int> sizevars; queue<int> cols; queue<unsigned int> j_col_count; unsigned int sel_count = 0; unsigned int join_cnt = 0; unsigned int distinct_cnt = 0; unsigned int join_col_cnt = 0; unsigned int join_tab_cnt = 0; unsigned int tab_cnt = 0; queue<string> op_join; queue<char> join_type; queue<char> join_eq_type; unsigned int partition_count; map<string,unsigned int> stat; map<unsigned int, unsigned int> join_and_cnt; map<string, map<string, bool> > used_vars; bool save_dict = 0; ContextPtr context; thrust::device_vector<unsigned char> scratch; map<string, string> filter_var; thrust::device_vector<int> ranj; unsigned long long int currtime; void check_used_vars() { for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) { auto s = (*it).second; auto vars(op_value); while(!vars.empty()) { if(s.count(vars.front()) != 0) { used_vars[(*it).first][vars.front()] = 1; }; vars.pop(); } }; } void emit_name(const char *name) { op_type.push("NAME"); op_value.push(name); } void emit_limit(const int val) { op_nums.push(val); } void emit_string(const char *str) { // remove the float_type quotes if(str[0] == '"') { string sss(str,1, strlen(str)-2); op_value.push(sss); } else { string sss(str); op_value.push(sss); }; op_type.push("STRING"); } void emit_string_grp(const char *str, const char *str_grp) { emit_string(str); grp_val = str_grp; }; void emit_fieldname(const char* name1, const char* name2) { string s1(name1); string s2(name2); op_type.push("FIELD"); op_value.push(s1 + "." + s2); }; void emit_number(const int_type val) { op_type.push("NUMBER"); op_nums.push(val); op_nums_precision.push(0); } void emit_float(const float_type val) { op_type.push("FLOAT"); op_nums_f.push(val); } void emit_decimal(const char* str) { op_type.push("NUMBER"); string s1(str); unsigned int precision; auto pos = s1.find("."); if(pos == std::string::npos) precision = 0; else { precision = (s1.length() - pos) -1; s1.erase(pos,1); }; op_nums.push(stoi(s1)); op_nums_precision.push(precision); cout << "Decimal " << stoi(s1) << " " << precision << endl; } void emit_mul() { op_type.push("MUL"); } void emit_add() { op_type.push("ADD"); } void emit_div() { op_type.push("DIV"); } unsigned int misses = 0; void emit_and() { op_type.push("AND"); join_col_cnt++; } void emit_eq() { op_type.push("JOIN"); join_eq_type.push('E'); if(misses == 0) { join_and_cnt[tab_cnt] = join_col_cnt; misses = join_col_cnt; join_col_cnt = 0; tab_cnt++; } else { misses--; } } void emit_neq() { op_type.push("JOIN"); join_eq_type.push('N'); if(misses == 0) { join_and_cnt[tab_cnt] = join_col_cnt; misses = join_col_cnt; join_col_cnt = 0; tab_cnt++; } else { misses--; } } void emit_distinct() { op_type.push("DISTINCT"); distinct_cnt++; } void emit_year() { op_type.push("YEAR"); } void emit_month() { op_type.push("MONTH"); } void emit_day() { op_type.push("DAY"); } void emit_or() { op_type.push("OR"); } void emit_minus() { op_type.push("MINUS"); } void emit_cmp(int val) { op_type.push("CMP"); op_nums.push(val); } void emit(const char *s, ...) { } void emit_var(const char *s, const int c, const char *f, const char* ref, const char* ref_name) { namevars.push(s); typevars.push(f); sizevars.push(0); cols.push(c); } void emit_var_asc(const char *s) { op_type.push(s); op_value.push("ASC"); } void emit_var_desc(const char *s) { op_type.push(s); op_value.push("DESC"); } void emit_sort(const char *s, const int p) { op_sort.push(s); partition_count = p; } void emit_presort(const char *s) { op_presort.push(s); } void emit_varchar(const char *s, const int c, const char *f, const int d, const char *ref, const char* ref_name) { namevars.push(s); typevars.push(f); sizevars.push(d); cols.push(c); } void emit_vardecimal(const char *s, const int c, const char *f, const int scale, const int precision) { namevars.push(s); typevars.push(f); sizevars.push(precision); cols.push(c); } void emit_sel_name(const char *s) { op_type.push("emit sel_name"); op_value.push(s); sel_count++; } void emit_count() { op_type.push("COUNT"); } void emit_sum() { op_type.push("SUM"); } void emit_average() { op_type.push("AVG"); } void emit_min() { op_type.push("MIN"); } void emit_max() { op_type.push("MAX"); } void emit_join_tab(const char *s, const char tp) { op_join.push(s); join_tab_cnt++; join_type.push(tp); }; void order_inplace_host(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str) { unsigned int* permutation = new unsigned int[a->mRecCount]; thrust::sequence(permutation, permutation + a->mRecCount); char* temp = new char[a->mRecCount*max_char(a)]; stack<string> exe_type1(exe_type), exe_value; while(!exe_type1.empty()) { exe_value.push("ASC"); exe_type1.pop(); }; // sort on host for(;!exe_type.empty(); exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] != 1) update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp); else update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if (a->type[*it] != 1) { thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_int[*it].data(), (int_type*)temp); thrust::copy((int_type*)temp, (int_type*)temp + a->mRecCount, a->h_columns_int[*it].data()); } else { thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_float[*it].data(), (float_type*)temp); thrust::copy((float_type*)temp, (float_type*)temp + a->mRecCount, a->h_columns_float[*it].data()); } }; delete [] temp; delete [] permutation; } void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str) { if(scratch.size() < a->mRecCount*4) scratch.resize(a->mRecCount*4); thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data())); thrust::sequence(permutation, permutation+a->mRecCount,0,1); unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation); if(a->grp.size() < a->mRecCount*8) a->grp.resize(a->mRecCount*8); unsigned int bits; for(; !exe_type.empty(); exe_type.pop()) { if(cpy_bits.empty()) bits = 0; else bits = cpy_bits[exe_type.top()]; if (a->type[exe_type.top()] != 1) { update_permutation(a->d_columns_int[exe_type.top()], raw_ptr, a->mRecCount, "ASC", (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits); } else update_permutation(a->d_columns_float[exe_type.top()], raw_ptr, a->mRecCount,"ASC", (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if(cpy_bits.empty()) bits = 0; else bits = cpy_bits[*it]; if (a->type[*it] != 1) { apply_permutation(a->d_columns_int[*it], raw_ptr, a->mRecCount, (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits); } else { apply_permutation(a->d_columns_float[*it], raw_ptr, a->mRecCount, (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits); }; }; } bool check_star_join(const string j1) { auto op_vals(op_value); for(auto i=0; i < sel_count; i++) { op_vals.pop(); op_vals.pop(); }; if(join_tab_cnt > 0) { while(op_vals.size()) { if (std::find(varNames[j1]->columnNames.begin(), varNames[j1]->columnNames.end(), op_vals.front()) != varNames[j1]->columnNames.end()) { op_vals.pop(); op_vals.pop(); } else { return 0; }; }; if(join_tab_cnt == 1) { if(!check_bitmap_file_exist(varNames[j1], varNames[op_join.front()])) { return 0; }; }; return 1; } else return 0; } void star_join(const char *s, const string j1) { map<string,bool> already_copied; queue<string> op_left; CudaSet* left = varNames.find(j1)->second; queue<string> op_sel; queue<string> op_sel_as; for(auto i=0; i < sel_count; i++) { if(std::find(left->columnNames.begin(), left->columnNames.end(), op_value.front()) != left->columnNames.end()) op_left.push(op_value.front()); op_sel.push(op_value.front()); op_value.pop(); op_sel_as.push(op_value.front()); op_value.pop(); }; auto op_sel_s(op_sel), op_sel_s_as(op_sel_as), op_g(op_value); CudaSet* c = new CudaSet(op_sel_s, op_sel_s_as); string f1, f2; map<string, string> key_map; map<string, char> sort_map; map<string, string> r_map; for(auto i = 0; i < join_tab_cnt; i++) { f1 = op_g.front(); op_g.pop(); f2 = op_g.front(); op_g.pop(); r_map[f1] = f2; queue<string> op_jj(op_join); for(auto z = 0; z < (join_tab_cnt-1) - i; z++) op_jj.pop(); size_t rcount; queue<string> op_vd(op_g), op_alt(op_sel); unsigned int jc = join_col_cnt; while(jc) { jc--; op_vd.pop(); op_alt.push(op_vd.front()); op_vd.pop(); }; key_map[op_jj.front()] = f1; CudaSet* right = varNames.find(op_jj.front())->second; if(!check_bitmaps_exist(left, right)) { cout << "Required bitmap on table " << op_jj.front() << " doesn't exists" << endl; exit(0); }; queue<string> second; while(!op_alt.empty()) { if(f2.compare(op_alt.front()) != 0 && std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) { second.push(op_alt.front()); //cout << "col " << op_alt.front() << " " << op_jj.front() << endl; op_left.push(f1); }; op_alt.pop(); }; if(!second.empty()) { right->filtered = 0; right->mRecCount = right->maxRecs; load_queue(second, right, "", rcount, 0, right->segCount, 0,0); // put all used columns into GPU }; }; queue<string> idx; set<string> already_loaded; bool right_cpy = 0; for (unsigned int i = 0; i < left->segCount; i++) { std::clock_t start2 = std::clock(); if(verbose) cout << "segment " << i << " " << getFreeMem() << endl; idx = left->fil_value; already_loaded.clear(); while(!idx.empty()) { //load the index if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) { //extract table name and colname from index name already_loaded.insert(idx.front()); size_t pos1 = idx.front().find_first_of(".", 0); size_t pos2 = idx.front().find_first_of(".", pos1+1); CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second; char a; //cout << "loading index " << idx.front() << endl; a = left->loadIndex(idx.front(), i); sort_map[idx.front().substr(pos1+1, pos2-pos1-1)] = a; }; idx.pop(); }; left->filtered = 0; size_t cnt_c = 0; allocColumns(left, left->fil_value); copyColumns(left, left->fil_value, i, cnt_c); bool* res = filter(left->fil_type, left->fil_value, left->fil_nums, left->fil_nums_f, left->fil_nums_precision, left, i); thrust::device_ptr<bool> star((bool*)res); size_t cnt = thrust::count(star, star + (unsigned int)left->mRecCount, 1); cout << "join res " << cnt << " out of " << left->mRecCount << endl; thrust::host_vector<unsigned int> prm_vh(cnt); thrust::device_vector<unsigned int> prm_v(cnt); thrust::host_vector<unsigned int> prm_tmp(cnt); thrust::device_vector<unsigned int> prm_tmp_d(cnt); //std::cout<< "seg filter " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; if(cnt) { //gather //start1 = std::clock(); left->prm_d.resize(cnt); thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)left->mRecCount-1), star, left->prm_d.begin(), thrust::identity<bool>()); thrust::device_free(star); prm_vh = left->prm_d; size_t offset = c->mRecCount; c->resize_join(cnt); queue<string> op_sel1(op_sel_s); void* temp; CUDA_SAFE_CALL(hipMalloc((void **) &temp, cnt*max_char(c))); hipMemset(temp,0,cnt*max_char(c)); CudaSet *t; unsigned int cnt1, bits; int_type lower_val; thrust::device_vector<unsigned int> output(cnt); //std::cout<< "seg start " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; while(!op_sel1.empty()) { if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end()) { //cout << "Left " << op_sel1.front() << endl; if(left->filtered) t = varNames[left->source_name]; else t = left; if(left->type[op_sel1.front()] <= 1) { if(ssd && !interactive) { //start1 = std::clock(); lower_val = t->readSsdSegmentsFromFile(i, op_sel1.front(), offset, prm_vh, c); //std::cout<< "SSD L SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; } else { t->readSegmentsFromFile(i, op_sel1.front()); void* h; if(!interactive) { if(left->type[op_sel1.front()] == 0) h = t->h_columns_int[op_sel1.front()].data(); else h = t->h_columns_float[op_sel1.front()].data(); } else { string ff = t->load_file_name + "." + op_sel1.front()+ "." + to_string(i); h = buffers[ff]; }; cnt1 = ((unsigned int*)h)[0];//bytes lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; bits = ((unsigned int*)((char*)h + cnt1))[8]; //cout << cnt1 << " " << lower_val << " " << bits << " " << left->type[op_sel1.front()] << endl; if(bits == 8) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), ptr + offset); }; } else if(bits == 16) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), ptr + offset); }; } else if(bits == 32) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), ptr + offset); } } else if(bits == 64) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), ptr + offset); }; }; }; //cout << "lower_val bits " << lower_val << " " << bits << endl; if(left->type[op_sel1.front()] != 1) thrust::transform( c->h_columns_int[op_sel1.front()].begin() + offset, c->h_columns_int[op_sel1.front()].begin() + offset + cnt, thrust::make_constant_iterator(lower_val), c->h_columns_int[op_sel1.front()].begin() + offset, thrust::plus<int_type>()); else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::transform(ptr + offset, ptr + offset + cnt, thrust::make_constant_iterator(lower_val), ptr + offset, thrust::plus<int_type>()); thrust::transform(ptr + offset, ptr + offset + cnt, c->h_columns_float[op_sel1.front()].begin() + offset, long_to_float()); }; } else { //gather string. There are no strings in fact tables. }; } else { for(auto it = key_map.begin(); it != key_map.end(); it++) { CudaSet* r = varNames.find(it->first)->second; if(std::find(r->columnNames.begin(), r->columnNames.end(), op_sel1.front()) != r->columnNames.end()) { if(i == 0) { if(data_dict[varNames[it->first]->load_file_name][op_sel1.front()].col_type == 2) { //cout << "SET " << op_sel1.front() << " to " << varNames[it->first]->load_file_name + "." + op_sel1.front() << endl; c->string_map[op_sel1.front()] = varNames[it->first]->load_file_name + "." + op_sel1.front(); }; } if(left->filtered) t = varNames[left->source_name]; else t = left; if(ssd && !interactive) { //start1 = std::clock(); lower_val = t->readSsdSegmentsFromFileR(i, key_map[it->first], prm_vh, prm_tmp); //std::cout<< "SSD R SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; } else { t->readSegmentsFromFile(i, key_map[it->first]); void* h; if(!interactive) { h = t->h_columns_int[key_map[it->first]].data(); } else { string ff = t->load_file_name + "." + key_map[it->first] + "." + to_string(i); h = buffers[ff]; }; cnt1 = ((unsigned int*)h)[0]; lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; bits = ((unsigned int*)((char*)h + cnt1))[8]; //cout << cnt1 << " " << lower_val << " " << bits << endl; if(bits == 8) { thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 16) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 32) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 64) { thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), prm_tmp.begin()); }; }; if(lower_val != 1) thrust::transform(prm_tmp.begin(), prm_tmp.end(), thrust::make_constant_iterator(lower_val-1), prm_tmp.begin(), thrust::plus<unsigned int>()); if(sort_map[r->source_name] == '1') { // sorted consecutive starting with 1 dimension keys prm_tmp_d = prm_tmp; //cout << "PATH 1 " << endl; } else { //cout << "PATH 2 " << r->source_name << endl; output = prm_tmp; if(r->d_columns_int[r_map[key_map[it->first]]].size() == 0) { r->d_columns_int[r_map[key_map[it->first]]].resize(r->maxRecs); }; if(right_cpy == 0) { r->CopyColumnToGpu(r_map[key_map[it->first]]); }; thrust::lower_bound(r->d_columns_int[r_map[key_map[it->first]]].begin(), r->d_columns_int[r_map[key_map[it->first]]].end(), output.begin(), output.end(), prm_tmp_d.begin()); }; if(r->type[op_sel1.front()] != 1) { thrust::device_ptr<int_type> d_tmp((int_type*)temp); thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)temp); thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_float[op_sel1.front()].begin() + offset); }; break; }; }; }; op_sel1.pop(); //std::cout<< ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; }; hipFree(temp); right_cpy = 1; }; //std::cout<< "SEG " << i << " " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; //unload the segment indexes : idx = left->fil_value; already_loaded.clear(); while(!idx.empty()) { if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) { //extract table name and colname from index name already_loaded.insert(idx.front()); size_t pos1 = idx.front().find_first_of(".", 0); size_t pos2 = idx.front().find_first_of(".", pos1+1); CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second; string f1 = idx.front() + "." + to_string(i); auto it = index_buffers.find(f1); if(it != index_buffers.end()) { hipHostFree(index_buffers[f1]); index_buffers.erase(it); }; }; idx.pop(); }; }; //if(verbose) // std::cout<< "star join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; while(!op_join.empty()) { varNames[op_join.front()]->deAllocOnDevice(); op_join.pop(); }; varNames[s] = c; c->maxRecs = c->mRecCount; if(verbose) cout << endl << "join count " << c->mRecCount << endl; }; void emit_join(const char *s, const char *j1, const int grp, const int start_seg, const int end_seg) { //cout << "emit_join " << s << " " << join_tab_cnt << " " << op_join.front() << endl; statement_count++; if (scan_state == 0) { if (stat.find(j1) == stat.end() && data_dict.count(j1) == 0) { process_error(2, "Join : couldn't find variable " + string(j1) ); }; if (stat.find(op_join.front()) == stat.end() && data_dict.count(op_join.front()) == 0) { process_error(2, "Join : couldn't find variable " + op_join.front() ); }; stat[s] = statement_count; stat[j1] = statement_count; if(filter_var.find(j1) != filter_var.end()) { stat[filter_var[j1]] = statement_count; }; check_used_vars(); while(!op_join.empty()) { stat[op_join.front()] = statement_count; if(filter_var.find(op_join.front()) != filter_var.end()) { stat[filter_var[op_join.front()]] = statement_count; }; op_join.pop(); }; return; }; queue<string> op_m(op_value); if(check_star_join(j1)) { if(verbose) cout << "executing star join !! " << endl; star_join(s, j1); } else { if(join_tab_cnt > 1) { string tab_name; for(unsigned int i = 1; i <= join_tab_cnt; i++) { if(i == join_tab_cnt) tab_name = s; else tab_name = s + to_string(i); string j, j2; if(i == 1) { j2 = op_join.front(); op_join.pop(); j = op_join.front(); op_join.pop(); } else { if(!op_join.empty()) { j = op_join.front(); op_join.pop(); } else j = j1; j2 = s + to_string(i-1); }; emit_multijoin(tab_name, j, j2, i, s, start_seg, end_seg); op_value = op_m; }; } else { emit_multijoin(s, j1, op_join.front(), 1, s, start_seg, end_seg); op_join.pop(); }; }; queue<string> op_sel; queue<string> op_sel_as; for(int i=0; i < sel_count; i++) { op_sel.push(op_m.front()); op_m.pop(); op_sel_as.push(op_m.front()); op_m.pop(); }; while(!op_sel_as.empty()) { //cout << "alias " << op_sel.front() << " : " << op_sel_as.front() << endl; if(op_sel.front() != op_sel_as.front()) { if(varNames[s]->type[op_sel.front()] == 0) { varNames[s]->h_columns_int[op_sel_as.front()] = varNames[s]->h_columns_int[op_sel.front()]; varNames[s]->h_columns_int.erase(op_sel.front()); varNames[s]->d_columns_int[op_sel_as.front()] = varNames[s]->d_columns_int[op_sel.front()]; varNames[s]->d_columns_int.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 0; varNames[s]->type.erase(op_sel.front()); } else if(varNames[s]->type[op_sel.front()] == 1) { varNames[s]->h_columns_float[op_sel_as.front()] = varNames[s]->h_columns_float[op_sel.front()]; varNames[s]->h_columns_float.erase(op_sel.front()); varNames[s]->d_columns_float[op_sel_as.front()] = varNames[s]->d_columns_float[op_sel.front()]; varNames[s]->d_columns_float.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 1; varNames[s]->type.erase(op_sel.front()); varNames[s]->decimal.erase(op_sel.front()); } else { varNames[s]->h_columns_char[op_sel_as.front()] = varNames[s]->h_columns_char[op_sel.front()]; varNames[s]->h_columns_char.erase(op_sel.front()); varNames[s]->d_columns_char[op_sel_as.front()] = varNames[s]->d_columns_char[op_sel.front()]; varNames[s]->d_columns_char.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 2; varNames[s]->type.erase(op_sel.front()); varNames[s]->char_size[op_sel_as.front()] = varNames[s]->char_size[op_sel.front()]; varNames[s]->char_size.erase(op_sel.front()); }; varNames[s]->decimal[op_sel_as.front()] = varNames[s]->decimal[op_sel.front()]; auto it = std::find(varNames[s]->columnNames.begin(), varNames[s]->columnNames.end(), op_sel.front()); *it = op_sel_as.front(); }; op_sel_as.pop(); op_sel.pop(); }; clean_queues(); if(stat[s] == statement_count) { varNames[s]->free(); varNames.erase(s); }; if(op_join.size()) { if(stat[op_join.front()] == statement_count && op_join.front().compare(j1) != 0) { varNames[op_join.front()]->free(); varNames.erase(op_join.front()); }; }; } template<typename T, typename P> void p_gather(thrust::host_vector<int>& h_tmp, T* h, P* dest) { for(int i = 0; i < h_tmp.size(); i++) { dest[i] = h[h_tmp[i]]; }; }; void emit_multijoin(const string s, const string j1, const string j2, const unsigned int tab, const char* res_name, const int start_segment, const int end_segment) { if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) { clean_queues(); if(varNames.find(j1) == varNames.end()) cout << "Couldn't find j1 " << j1 << endl; if(varNames.find(j2) == varNames.end()) cout << "Couldn't find j2 " << j2 << " here " << endl; return; }; CudaSet* left = varNames.find(j1)->second; CudaSet* right = varNames.find(j2)->second; queue<string> op_sel; queue<string> op_sel_as; for(int i=0; i < sel_count; i++) { op_sel.push(op_value.front()); op_value.pop(); op_sel_as.push(op_value.front()); op_value.pop(); }; queue<string> op_sel_s(op_sel); queue<string> op_sel_s_as(op_sel_as); queue<string> op_g(op_value); if(tab > 0) { for(unsigned int z = 0; z < join_tab_cnt - tab; z++) { for(unsigned int j = 0; j < join_and_cnt[z]*2 + 2; j++) { op_sel_s.push(op_g.front()); op_sel_s_as.push(op_g.front()); op_g.pop(); }; }; }; string f1 = op_g.front(); op_g.pop(); string f2 = op_g.front(); op_g.pop(); if (verbose) cout << "JOIN " << s << " " << f1 << " " << f2 << " " << getFreeMem() << " " << phase_copy << endl; std::clock_t start1 = std::clock(); CudaSet* c = new CudaSet(right, left, op_sel_s, op_sel_s_as); if ((left->mRecCount == 0 && !left->filtered) || (right->mRecCount == 0 && !right->filtered)) { c = new CudaSet(left, right, op_sel_s, op_sel_s_as); varNames[res_name] = c; clean_queues(); return; }; if(join_tab_cnt > 1 && tab < join_tab_cnt) c->tmp_table = 1; else c->tmp_table = 0; string colname1, colname2; string tmpstr; if (std::find(left->columnNames.begin(), left->columnNames.end(), f1) != left->columnNames.end()) { colname1 = f1; if (std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) { colname2 = f2; } else { process_error(2, "Couldn't find column " + f2 ); }; } else if (std::find(right->columnNames.begin(), right->columnNames.end(), f1) != right->columnNames.end()) { colname2 = f1; tmpstr = f1; f1 = f2; if (std::find(left->columnNames.begin(), left->columnNames.end(), f2) != left->columnNames.end()) { colname1 = f2; f2 = tmpstr; } else { process_error(2, "Couldn't find column " +f2 ); }; } else { process_error(2, "Couldn't find column " + f1); }; if (!((left->type[colname1] == 0 && right->type[colname2] == 0) || (left->type[colname1] == 2 && right->type[colname2] == 2) || (left->type[colname1] == 1 && right->type[colname2] == 1 && left->decimal[colname1] && right->decimal[colname2]))) { process_error(2, "Joins on floats are not supported "); }; //bool decimal_join = 0; //if (left->type[colname1] == 1 && right->type[colname2] == 1) // decimal_join = 1; queue<string> op_vd(op_g); queue<string> op_g1(op_g); queue<string> op_alt(op_sel); unsigned int jc = join_and_cnt[join_tab_cnt - tab]; while(jc) { jc--; op_vd.pop(); op_alt.push(op_vd.front()); op_vd.pop(); }; size_t rcount = 0, cnt_r; queue<string> cc; if (left->type[colname1] == 2) { left->d_columns_int[colname1] = thrust::device_vector<int_type>(); } else { cc.push(f1); allocColumns(left, cc); }; left->hostRecCount = left->mRecCount; size_t cnt_l, res_count, tot_count = 0, offset = 0, k = 0; queue<string> lc(cc); thrust::device_vector<unsigned int> v_l(left->maxRecs); MGPU_MEM(int) aIndicesDevice, bIndicesDevice, intersectionDevice; stack<string> exe_type; set<string> field_names; exe_type.push(f2); for(unsigned int i = 0; i < right->columnNames.size(); i++) { if (std::find(c->columnNames.begin(), c->columnNames.end(), right->columnNames[i]) != c->columnNames.end() || right->columnNames[i] == f2 || join_and_cnt[join_tab_cnt - tab]) { field_names.insert(right->columnNames[i]); }; }; thrust::device_vector<int> p_tmp; unsigned int start_part = 0; bool prejoin = 0; while(start_part < right->segCount) { right->deAllocOnDevice(); std::clock_t start12 = std::clock(); if(right->not_compressed || (!right->filtered && getFreeMem() < right->columnNames.size()*right->hostRecCount*8*2)) { cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, start_part+1); start_part = start_part+1; } else { cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, right->segCount); start_part = right->segCount; for(unsigned int i=0; i < right->columnNames.size(); i++) { if (right->type[right->columnNames[i]] != 1) { right->d_columns_int[right->columnNames[i]].shrink_to_fit(); } else right->d_columns_float[right->columnNames[i]].shrink_to_fit(); }; }; right->mRecCount = cnt_r; bool order = 1; if(!right->presorted_fields.empty() && right->presorted_fields.front() == f2) { order = 0; //cout << "No need to sort " << endl; if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) right->sort_check = '1'; else { right->sort_check = '0'; }; }; if(order) { if(thrust::is_sorted(right->d_columns_int[f2].begin(), right->d_columns_int[f2].end())) { if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) { right->sort_check = '1'; } else { right->sort_check = '0'; }; } else { //cout << "sorting " << endl; size_t tot_size = right->mRecCount*8*right->columnNames.size(); if (getFreeMem() > tot_size*1.5) { order_inplace(right, exe_type, field_names, 0); } else { //for(unsigned int i = 0; i < right->columnNames.size(); i++) { for (auto it=field_names.begin(); it!=field_names.end(); ++it) { //cout << "sorting " << *it << endl; if(right->type[*it] != 1) { if(right->h_columns_int[*it].size() < right->mRecCount) right->h_columns_int[*it].resize(right->mRecCount); thrust::copy(right->d_columns_int[*it].begin(), right->d_columns_int[*it].begin() + right->mRecCount, right->h_columns_int[*it].begin()); } else { if(right->type[*it] == 1) { if(right->h_columns_float[*it].size() < right->mRecCount) right->h_columns_float[*it].resize(right->mRecCount); }; thrust::copy(right->d_columns_float[*it].begin(), right->d_columns_float[*it].begin() + right->mRecCount, right->h_columns_float[*it].begin()); }; }; order_inplace_host(right, exe_type, field_names, 0); for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if(right->type[*it] != 1) thrust::copy(right->h_columns_int[*it].begin(), right->h_columns_int[*it].begin() + right->mRecCount, right->d_columns_int[*it].begin()); else thrust::copy(right->h_columns_float[*it].begin(), right->h_columns_float[*it].begin() + right->mRecCount, right->d_columns_float[*it].begin()); }; }; }; }; //std::cout<< "join right load time " << ( ( std::clock() - start12 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; int e_segment; if(end_segment == -1) { e_segment = left->segCount; } else e_segment = end_segment; for (unsigned int i = start_segment; i < e_segment; i++) { if(verbose) //cout << "segment " << i << '\xd'; cout << "segment " << i << endl; cnt_l = 0; /*left->readSegmentsFromFile(i, colname1); void* h; h = left->h_columns_int[colname1].data(); auto cnt1 = ((unsigned int*)h)[0]; auto lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; auto bits = ((unsigned int*)((char*)h + cnt1))[8]; cout << "Partition " << cnt1 << " " << lower_val << " " << bits << endl; std::clock_t start15 = std::clock(); if(bits == 8) { thrust::stable_partition((char*)((unsigned int*)h + 6), (char*)((unsigned int*)h + 6) + cnt1, is_even()); } else if(bits == 16) { thrust::stable_partition((unsigned short int*)((unsigned int*)h + 6), (unsigned short int*)((unsigned int*)h + 6) + cnt1/2, is_even()); } else if(bits == 32) { thrust::stable_partition(((unsigned int*)h + 6), ((unsigned int*)h + 6) + cnt1/4, is_even()); } else if(bits == 64) { thrust::stable_partition((int_type*)((unsigned int*)h + 6), (int_type*)((unsigned int*)h + 6) + cnt1/8, is_even()); }; auto new_cnt = hipDeviceSynchronize(); std::cout<< "partition time " << ( ( std::clock() - start15 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; exit(0); */ copyColumns(left, lc, i, cnt_l); cnt_l = left->mRecCount; auto join_eq_type1(join_eq_type); if (cnt_l) { // sort the left index column, save the permutation vector, it might be needed later thrust::device_ptr<int_type> d_col((int_type*)thrust::raw_pointer_cast(left->d_columns_int[colname1].data())); thrust::sequence(v_l.begin(), v_l.begin() + cnt_l,0,1); bool do_sort = 1; if(!left->sorted_fields.empty()) { if(left->sorted_fields.front() == f1) { do_sort = 0; }; } else if(!left->presorted_fields.empty()) { if(left->presorted_fields.front() == f1) { do_sort = 0; }; }; if(do_sort) { thrust::sort_by_key(d_col, d_col + cnt_l, v_l.begin()); } else if(verbose) cout << "No need of sorting " << endl; if(prejoin) { res_count = SetOpKeys<MgpuSetOpIntersection, true>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &intersectionDevice, *context, false); if(!res_count) continue; }; if(verbose) cout << "join " << cnt_l << ":" << cnt_r << " " << join_type.front() << endl; /*if(cnt_r > 10) { for(int z = 0; z < 10 ; z++) cout << " R " << right->d_columns_int[colname2][(cnt_r-1)-z] << endl; for(int z = 0; z < 10 ; z++) cout << " L " << left->d_columns_int[colname1][(cnt_l-1)-z] << endl; }; */ if (left->d_columns_int[colname1][0] > right->d_columns_int[colname2][cnt_r-1] || left->d_columns_int[colname1][cnt_l-1] < right->d_columns_int[colname2][0]) { if(verbose) cout << endl << "skipping after copying " << endl; continue; }; //else // cout << "JOINING " << left->d_columns_int[colname1][0] << ":" << left->d_columns_int[colname1][cnt_l-1] << " AND " << right->d_columns_int[colname2][0] << ":" << right->d_columns_int[colname2][cnt_r-1] << endl; //cout << "joining " << left->d_columns_int[colname1][0] << " : " << left->d_columns_int[colname1][cnt_l-1] << " and " << right->d_columns_int[colname2][0] << " : " << right->d_columns_int[colname2][cnt_r-1] << endl; char join_kind = join_type.front(); std::clock_t start11 = std::clock(); if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') res_count = RelationalJoin<MgpuJoinKindInner>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'L') res_count = RelationalJoin<MgpuJoinKindLeft>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'R') res_count = RelationalJoin<MgpuJoinKindRight>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'O') res_count = RelationalJoin<MgpuJoinKindOuter>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); if(verbose) std::cout<< "join time " << ( ( std::clock() - start11 ) / (double)CLOCKS_PER_SEC ) << '\n'; if(verbose) cout << "RES " << res_count << endl; if(res_count == 0) prejoin = 1; int* r1 = aIndicesDevice->get(); thrust::device_ptr<int> d_res1((int*)r1); int* r2 = bIndicesDevice->get(); thrust::device_ptr<int> d_res2((int*)r2); if(res_count) { p_tmp.resize(res_count); thrust::sequence(p_tmp.begin(), p_tmp.end(),-1); thrust::gather_if(d_res1, d_res1+res_count, d_res1, v_l.begin(), p_tmp.begin(), _1 >= 0); }; // check if the join is a multicolumn join unsigned int mul_cnt = join_and_cnt[join_tab_cnt - tab]; while(mul_cnt) { mul_cnt--; queue<string> mult(op_g); string f3 = mult.front(); mult.pop(); string f4 = mult.front(); mult.pop(); //cout << "ADDITIONAL COL JOIN " << f3 << " " << f4 << " " << join_eq_type.front() << endl; queue<string> rc; rc.push(f3); allocColumns(left, rc); size_t offset = 0; copyColumns(left, rc, i, offset, 0, 0); rc.pop(); if (res_count) { thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(res_count); if(right->d_columns_int[f4].size() == 0) load_queue(rc, right, f4, rcount, 0, right->segCount, 0, 0); if (left->type[f3] == 1 && right->type[f4] == 1) { thrust::transform(make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_float[f4].begin(), d_res2), d_add, float_equal_to()); } else { if(join_eq_type1.front() != 'N') thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2), d_add, thrust::equal_to<int_type>()); else { thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2), d_add, thrust::not_equal_to<int_type>()); }; }; if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') { // result count changes only in case of an inner join unsigned int new_cnt = thrust::count(d_add, d_add+res_count, 1); thrust::stable_partition(d_res2, d_res2 + res_count, d_add, thrust::identity<unsigned int>()); thrust::stable_partition(p_tmp.begin(), p_tmp.end(), d_add, thrust::identity<unsigned int>()); res_count = new_cnt; } else { //otherwise we consider it a valid left join result with non-nulls on the left side and nulls on the right side thrust::transform(d_res2, d_res2 + res_count, d_add , d_res2, set_minus()); }; thrust::device_free(d_add); }; if(!join_eq_type1.empty()) join_eq_type1.pop(); }; while(!join_eq_type1.empty()) join_eq_type1.pop(); //cout << "MUL res_count " << res_count << endl; if(join_kind == '1') { //LEFT SEMI thrust::sort(p_tmp.begin(), p_tmp.begin() + res_count); auto new_end = thrust::unique(p_tmp.begin(), p_tmp.begin() + res_count); res_count = new_end - p_tmp.begin(); } else if(join_kind == '2'){ // RIGHT SEMI thrust::sort(d_res2, d_res2 + res_count); auto new_end = thrust::unique(d_res2, d_res2 + res_count); res_count = new_end - d_res2; auto old_sz = ranj.size(); ranj.resize(ranj.size() + res_count); thrust::copy(d_res2, d_res2 + res_count, ranj.begin() + old_sz); thrust::sort(ranj.begin(), ranj.end()); auto ra_cnt = thrust::unique(ranj.begin(), ranj.end()); ranj.resize(ra_cnt-ranj.begin()); } else if(join_kind == '3'){ // ANTI JOIN LEFT thrust::counting_iterator<int> iter(0); thrust::device_vector<int> rr(cnt_l); auto new_end = thrust::set_difference(iter, iter+cnt_l, p_tmp.begin(), p_tmp.begin() + res_count, rr.begin()); res_count = new_end - rr.begin(); thrust::copy(rr.begin(), new_end, p_tmp.begin()); } else if(join_kind == '4'){ // ANTI JOIN RIGHT thrust::sort(d_res2, d_res2 + res_count); auto new_end = thrust::unique(d_res2, d_res2 + res_count); auto cnt = new_end - d_res2; thrust::device_vector<int> seq(cnt + ranj.size()); //auto new_end = thrust::set_difference(seq.begin(), seq.end(), d_res2, d_res2 + res_count, rr.begin()); auto new_end1 = thrust::set_union(d_res2, d_res2 + cnt, ranj.begin(), ranj.end(), seq.begin()); auto s_cnt = new_end1 - seq.begin(); thrust::sort(seq.begin(), seq.begin() + s_cnt); auto end_seq = thrust::unique(seq.begin(), seq.begin() + s_cnt); auto u_cnt = end_seq - seq.begin(); ranj.resize(u_cnt); thrust::copy(seq.begin(), seq.begin() + u_cnt, ranj.begin()); thrust::sort(ranj.begin(), ranj.end()); auto ra_cnt = thrust::unique(ranj.begin(), ranj.end()); ranj.resize(ra_cnt-ranj.begin()); } tot_count = tot_count + res_count; //cout << "tot " << tot_count << endl; //std::clock_t start12 = std::clock(); if(res_count && join_kind != '4' && join_kind != '2') { offset = c->mRecCount; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end() && join_kind != '2') { allocColumns(left, cc); copyColumns(left, cc, i, k, 0, 0); //gather if(left->type[op_sel1.front()] != 1 ) { thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset); }; if(op_sel1.front() != colname1) left->deAllocColumnOnDevice(op_sel1.front()); //}; } else if(std::find(right->columnNames.begin(), right->columnNames.end(), op_sel1.front()) != right->columnNames.end()) { //gather if(right->type[op_sel1.front()] != 1) { thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(d_res2, d_res2 + res_count, right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(d_res2, d_res2 + res_count, right->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset); } } else { }; op_sel1.pop(); }; }; }; }; if(join_type.front() == '4') { thrust::device_vector<int> st(cnt_r); thrust::sequence(st.begin(), st.end(),0,1); thrust::device_vector<int> r(cnt_r); auto new_end = thrust::set_difference(st.begin(), st.end(), ranj.begin(), ranj.end(), r.begin()); ranj.resize(0); res_count = new_end - r.begin(); tot_count = res_count; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(r.begin(), r.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin()); op_sel1.pop(); }; } else if(join_type.front() == '2') { res_count = ranj.size(); tot_count = res_count; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(ranj.begin(), ranj.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin()); op_sel1.pop(); }; ranj.resize(0); }; }; left->deAllocOnDevice(); right->deAllocOnDevice(); c->deAllocOnDevice(); varNames[s] = c; c->mRecCount = tot_count; c->hostRecCount = tot_count; c->name = s; if(verbose) cout << "tot res " << tot_count << " " << getFreeMem() << endl; if(right->tmp_table == 1) { right->free(); varNames.erase(j2); } else { if(stat[j2] == statement_count) { right->free(); varNames.erase(j2); }; }; if(stat[j1] == statement_count) { left->free(); varNames.erase(j1); }; join_type.pop(); if(!join_eq_type.empty()) join_eq_type.pop(); size_t tot_size = tot_count*8*c->columnNames.size(); if (getFreeMem() > tot_size) { c->maxRecs = tot_count; c->segCount = 1; } else { c->segCount = ((tot_size)/getFreeMem() + 1); c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1); }; if(verbose) std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; } void order_on_host(CudaSet *a, CudaSet* b, queue<string> names, stack<string> exe_type, stack<string> exe_value) { unsigned int tot = 0; if(!a->not_compressed) { //compressed allocColumns(a, names); unsigned int c = 0; size_t cnt = 0; for(unsigned int i = 0; i < a->segCount; i++) { copyColumns(a, names, (a->segCount - i) - 1, cnt); //uses segment 1 on a host to copy data from a file to gpu if (a->mRecCount) { a->CopyToHost((c - tot) - a->mRecCount, a->mRecCount); tot = tot + a->mRecCount; }; }; } else tot = a->mRecCount; b->resize(tot); //resize host arrays a->mRecCount = tot; unsigned int* permutation = new unsigned int[a->mRecCount]; thrust::sequence(permutation, permutation + a->mRecCount); size_t maxSize = a->mRecCount; char* temp; temp = new char[maxSize*max_char(a)]; // sort on host for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] == 0) update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp); else if (a->type[exe_type.top()] == 1) update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp); else { update_char_permutation(a, exe_type.top(), permutation, exe_value.top(), temp, 1); }; }; for (unsigned int i = 0; i < a->mColumnCount; i++) { if (a->type[a->columnNames[i]] != 1) { apply_permutation_host(a->h_columns_int[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_int[a->columnNames[i]].data()); } else apply_permutation_host(a->h_columns_float[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_float[a->columnNames[i]].data()); }; delete [] temp; delete [] permutation; } void emit_order(const char *s, const char *f, const int e, const int ll) { if(ll == 0) statement_count++; if (scan_state == 0 && ll == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Order : couldn't find variable " + string(f)); }; stat[s] = statement_count; stat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; return; }; if (scan_state == 0) { check_used_vars(); return; }; if(varNames.find(f) == varNames.end() ) { clean_queues(); return; }; CudaSet* a = varNames.find(f)->second; stack<string> exe_type, exe_value; if(verbose) cout << "ORDER: " << s << " " << f << endl; for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) { if ((op_type.front()).compare("NAME") == 0) { exe_type.push(op_value.front()); exe_value.push("ASC"); } else { exe_type.push(op_type.front()); exe_value.push(op_value.front()); }; if(std::find(a->columnNames.begin(), a->columnNames.end(), exe_type.top()) == a->columnNames.end()) { process_error(2, "Couldn't find name " + exe_type.top()); }; }; stack<string> tp(exe_type); queue<string> op_vx; while (!tp.empty()) { op_vx.push(tp.top()); tp.pop(); }; queue<string> names; for (unsigned int i = 0; i < a->columnNames.size() ; i++ ) names.push(a->columnNames[i]); CudaSet *b = a->copyDeviceStruct(); //lets find out if our data set fits into a GPU size_t mem_available = getFreeMem(); size_t rec_size = 0; for(unsigned int i = 0; i < a->mColumnCount; i++) { if(a->type[a->columnNames[i]] == 0) rec_size = rec_size + int_size; else if(a->type[a->columnNames[i]] == 1) rec_size = rec_size + float_size; else rec_size = rec_size + a->char_size[a->columnNames[i]]; }; bool fits; if (rec_size*a->mRecCount > (mem_available/2)) // doesn't fit into a GPU fits = 0; else fits = 1; if(!fits) { order_on_host(a, b, names, exe_type, exe_value); } else { // initialize permutation to [0, 1, 2, ... ,N-1] size_t rcount; if(a->filtered) { CudaSet *t = varNames[a->source_name]; a->mRecCount = t->mRecCount; a->hostRecCount = a->mRecCount; }; a->mRecCount = load_queue(names, a, op_vx.front(), rcount, 0, a->segCount); if(scratch.size() < a->mRecCount) scratch.resize(a->mRecCount*4); thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data())); thrust::sequence(permutation, permutation+(a->mRecCount)); unsigned int* perm_ptr = thrust::raw_pointer_cast(permutation); void* temp; CUDA_SAFE_CALL(hipMalloc((void **) &temp, a->mRecCount*max_char(a))); if(a->filtered) varNames[a->source_name]->hostRecCount = varNames[a->source_name]->mRecCount; else a->hostRecCount = a->mRecCount;; if(a->filtered) varNames[a->source_name]->mRecCount = varNames[a->source_name]->hostRecCount; else a->mRecCount = a->hostRecCount; for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] == 0 && a->string_map.find(exe_type.top()) == a->string_map.end()) update_permutation(a->d_columns_int[exe_type.top()], perm_ptr, a->mRecCount, exe_value.top(), (int_type*)temp, 64); else if (a->type[exe_type.top()] == 1) update_permutation(a->d_columns_float[exe_type.top()], perm_ptr, a->mRecCount,exe_value.top(), (float_type*)temp, 64); else { //get strings to device update_char_permutation(a, exe_type.top(), perm_ptr, exe_value.top(), temp, 0); }; }; b->resize(a->mRecCount); //resize host arrays b->mRecCount = a->mRecCount; for (unsigned int i = 0; i < a->mColumnCount; i++) { if (a->type[a->columnNames[i]] != 1) { apply_permutation(a->d_columns_int[a->columnNames[i]], perm_ptr, a->mRecCount, (int_type*)temp, 64); } else apply_permutation(a->d_columns_float[a->columnNames[i]], perm_ptr, a->mRecCount, (float_type*)temp, 64); }; for(unsigned int i = 0; i < a->mColumnCount; i++) { if(a->type[a->columnNames[i]] != 1) { thrust::copy(a->d_columns_int[a->columnNames[i]].begin(), a->d_columns_int[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_int[a->columnNames[i]].begin()); } else thrust::copy(a->d_columns_float[a->columnNames[i]].begin(), a->d_columns_float[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_float[a->columnNames[i]].begin()); }; b->deAllocOnDevice(); a->deAllocOnDevice(); hipFree(temp); }; varNames[s] = b; b->segCount = 1; b->not_compressed = 1; b->string_map = a->string_map; if(stat[f] == statement_count && !a->keep) { a->free(); varNames.erase(f); }; } void emit_select(const char *s, const char *f, const int grp_cnt) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Select : couldn't find variable " + string(f) ); }; stat[s] = statement_count; stat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; check_used_vars(); clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); cout << "Couldn't find1 " << f << endl; process_error(2, "Couldn't find(1) " + string(f) ); return; }; queue<string> op_v1(op_value); while(op_v1.size() > grp_cnt) op_v1.pop(); stack<string> op_v2; queue<string> op_v3; for(int i=0; i < grp_cnt; ++i) { op_v2.push(op_v1.front()); op_v3.push(op_v1.front()); op_v1.pop(); }; CudaSet *a; if(varNames.find(f) != varNames.end()) a = varNames.find(f)->second; else { process_error(2, "Couldn't find " + string(f) ); }; if(a->mRecCount == 0 && !a->filtered) { CudaSet *c; c = new CudaSet(0,1); varNames[s] = c; c->name = s; clean_queues(); if(verbose) cout << "SELECT " << s << " count : 0, Mem " << getFreeMem() << endl; return; }; if(verbose) cout << "SELECT " << s << " " << f << " " << getFreeMem() << endl; std::clock_t start1 = std::clock(); // here we need to determine the column count and composition queue<string> op_v(op_value); queue<string> op_vx; set<string> field_names; map<string,string> aliases; string tt; while(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) { tt = op_v.front(); op_v.pop(); if(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end()) { if(aliases.count(tt) == 0) { aliases[tt] = op_v.front(); }; } else { while(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end() && !op_v.empty()) { op_v.pop(); }; }; }; }; if(!op_v.empty()) op_v.pop(); }; op_v = op_value; while(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) { field_names.insert(op_v.front()); }; op_v.pop(); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { op_vx.push(*it); }; // find out how many columns a new set will have queue<string> op_t(op_type); int_type col_count = 0; for(int i=0; !op_t.empty(); ++i, op_t.pop()) if((op_t.front()).compare("emit sel_name") == 0) col_count++; CudaSet *b, *c; if(a->segCount <= 1) setSegments(a, op_vx); allocColumns(a, op_vx); unsigned int cycle_count; if(a->filtered) cycle_count = varNames[a->source_name]->segCount; else cycle_count = a->segCount; size_t ol_count = a->mRecCount, cnt; a->hostRecCount = a->mRecCount; b = new CudaSet(0, col_count); b->name = "tmp b in select"; bool c_set = 0; //size_t tmp_size = a->mRecCount; //if(a->segCount > 1) // tmp_size = a->maxRecs; vector<thrust::device_vector<int_type> > distinct_val; //keeps array of DISTINCT values for every key vector<thrust::device_vector<int_type> > distinct_hash; //keeps array of DISTINCT values for every key vector<thrust::device_vector<int_type> > distinct_tmp; /* for(unsigned int i = 0; i < distinct_cnt; i++) { distinct_tmp.push_back(thrust::device_vector<int_type>(tmp_size)); distinct_val.push_back(thrust::device_vector<int_type>()); distinct_hash.push_back(thrust::device_vector<int_type>()); }; */ bool one_liner; if (grp_cnt != 0) phase_copy = 1; for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE if(verbose) cout << "segment " << i << " select mem " << getFreeMem() << endl; std::clock_t start3 = std::clock(); cnt = 0; copyColumns(a, op_vx, i, cnt); if(a->mRecCount) { if (grp_cnt != 0) { bool srt = 0; stack<string> op_vv(op_v2); while(!op_vv.empty()) { if(!min_max_eq[op_vv.top()]) srt = 1; op_vv.pop(); }; if(srt) { order_inplace(a, op_v2, field_names, 1); a->GroupBy(op_v2); } else { if(a->grp.size() < a->mRecCount) a->grp.resize(a->mRecCount); thrust::fill(a->grp.begin(),a->grp.begin()+a->mRecCount,0); a->grp[a->mRecCount-1] = 1; a->grp_count = 1; }; } else a->grp_count = 0; copyFinalize(a, op_vx,0); select(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a,b, distinct_tmp, one_liner); if(i == 0) std::reverse(b->columnNames.begin(), b->columnNames.end()); if (!c_set && b->mRecCount > 0) { c = new CudaSet(0, col_count); create_c(c,b); c_set = 1; c->name = s; }; if (grp_cnt && cycle_count > 1 && b->mRecCount > 0) { add(c,b,op_v3, aliases, distinct_tmp, distinct_val, distinct_hash, a); } else { //copy b to c unsigned int c_offset = c->mRecCount; c->resize(b->mRecCount); for(unsigned int j=0; j < b->columnNames.size(); j++) { if (b->type[b->columnNames[j]] == 0) { thrust::copy(b->d_columns_int[b->columnNames[j]].begin(), b->d_columns_int[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_int[b->columnNames[j]].begin() + c_offset); } else if (b->type[b->columnNames[j]] == 1) { thrust::copy(b->d_columns_float[b->columnNames[j]].begin(), b->d_columns_float[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_float[b->columnNames[j]].begin() + c_offset); }; }; }; //std::cout<< "add time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << '\n'; }; std::cout<< "cycle sel time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; }; phase_copy = 0; a->mRecCount = ol_count; a->mRecCount = a->hostRecCount; a->deAllocOnDevice(); b->deAllocOnDevice(); a->grp.resize(0); a->grp.shrink_to_fit(); for(auto i = 0; i < alloced_mem.size(); i++) { hipFree(alloced_mem[i]); alloced_mem.pop_back(); }; if(!c_set) { CudaSet *c; c = new CudaSet(0,1); varNames[s] = c; c->name = s; clean_queues(); return; }; if (grp_cnt) { count_avg(c, distinct_hash); } else { if(one_liner) { count_simple(c); }; }; c->maxRecs = c->mRecCount; c->hostRecCount = c->mRecCount; c->string_map = b->string_map; c->name = s; c->keep = 1; if(verbose) cout << "select res " << c->mRecCount << endl; size_t tot_size = c->maxRecs*8*c->columnNames.size(); if (getFreeMem() < tot_size*3) { c->segCount = ((tot_size*3)/getFreeMem() + 1); c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1); }; clean_queues(); varNames[s] = c; b->free(); varNames[s]->keep = 1; if(stat[s] == statement_count) { varNames[s]->free(); varNames.erase(s); }; if(stat[f] == statement_count && a->keep == 0) { a->free(); varNames.erase(f); }; if(verbose) std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n'; } void emit_insert(const char *f, const char* s) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Insert : couldn't find variable " + string(f)); }; if (stat.find(s) == stat.end() && data_dict.count(s) == 0) { process_error(2, "Insert : couldn't find variable " + string(s) ); }; check_used_vars(); stat[f] = statement_count; stat[s] = statement_count; clean_queues(); return; }; if(varNames.find(f) == varNames.end() || varNames.find(s) == varNames.end()) { clean_queues(); return; }; if(verbose) cout << "INSERT " << f << " " << s << endl; insert_records(f,s); clean_queues(); }; void emit_delete(const char *f) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Delete : couldn't find variable " + string(f)); }; stat[f] = statement_count; check_used_vars(); clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); return; }; delete_records(f); cout << "DELETE " << f << endl; clean_queues(); } void emit_case() { op_case = 1; if (scan_state == 1) cout << "emit case " << endl; //extract releveant values and pass to modified filter // get a bool vector back /* while(!op_type.empty()) { cout << "CASE type " << op_type.front() << endl; op_type.pop(); } */ } void emit_create_index(const char *index_name, const char *table, const char *column) { if (scan_state != 0) { FILE *f; string s1(table); string s3 = s1 + ".key"; f = fopen(s3.c_str(), "w"); fputs(column,f); fclose(f); }; } void emit_create_interval(const char *interval_name, const char *table, const char *lcolumn, const char *rcolumn) { if (scan_state != 0) { FILE *f; string s1(table); string s3 = s1 + ".interval"; f = fopen(s3.c_str(), "w"); fputs(lcolumn,f); fputc('|',f); fputs(rcolumn,f); fclose(f); }; } void emit_create_bitmap_index(const char *index_name, const char *ltable, const char *rtable, const char *rcolumn, const char *lid, const char *rid) { statement_count++; if (scan_state == 0) { emit_name(rcolumn); emit_sel_name(rcolumn); emit_name(lid); emit_name(rid); check_used_vars(); stat[rtable] = std::numeric_limits<unsigned int>::max(); stat[ltable] = std::numeric_limits<unsigned int>::max(); } else { cout << ltable << " " << rtable << " " << rid << " " << lid << endl; emit_name(rcolumn); emit_sel_name(rcolumn); emit_name(lid); emit_name(rid); check_used_vars(); if(varNames.find(ltable) == varNames.end()) cout << "Couldn't find " << ltable << endl; if(varNames.find(rtable) == varNames.end()) cout << "Couldn't find " << rtable << endl; CudaSet* left = varNames.find(ltable)->second; CudaSet* right = varNames.find(rtable)->second; queue<string> op_vx; op_vx.push(rcolumn);op_vx.push(rid); allocColumns(right, op_vx); right->CopyColumnToGpu(rid, 0, 0); right->CopyColumnToGpu(rcolumn, 0, 0); op_vx.pop();op_vx.pop(); op_vx.push(lid); allocColumns(left, op_vx); for(int i = 0; i < left->segCount; i++) { left->CopyColumnToGpu(lid, i, 0); thrust::device_vector<unsigned int> output(left->mRecCount); thrust::lower_bound(right->d_columns_int[rid].begin(), right->d_columns_int[rid].begin() + right->mRecCount, left->d_columns_int[lid].begin(), left->d_columns_int[lid].begin() + left->mRecCount, output.begin()); string str = std::string(ltable) + std::string(".") + std::string(rtable) + std::string(".") + std::string(rcolumn) + std::string(".") + to_string(i); thrust::device_vector<int_type> res(left->mRecCount); thrust::host_vector<int_type> res_h(left->mRecCount); if(right->type[rcolumn] == 0) { thrust::gather(output.begin(), output.begin() + left->mRecCount, right->d_columns_int[rcolumn].begin() , res.begin()); thrust::copy(res.begin(), res.begin() + left->mRecCount, res_h.begin()); compress_int(str, res_h); } else if(right->type[rcolumn] == 1) { } else { //strings string f1 = right->load_file_name + "." + rcolumn + ".0.hash"; //need to change it in case if there are dimensions tables larger than 1 segment ? FILE* f = fopen(f1.c_str(), "rb" ); unsigned int cnt; fread(&cnt, 4, 1, f); if(res_h.size() < cnt) res_h.resize(cnt); if(res.size() < cnt) res.resize(cnt); fread(res_h.data(), cnt*8, 1, f); res = res_h; fclose(f); thrust::device_vector<int_type> output1(left->mRecCount); thrust::gather(output.begin(), output.begin() + left->mRecCount , res.begin(), output1.begin()); thrust::copy(output1.begin(), output1.begin() + left->mRecCount, res_h.begin()); compress_int(str, res_h); }; }; }; } void emit_display(const char *f, const char* sep) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Filter : couldn't find variable " + string(f) ); }; stat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); return; }; CudaSet* a = varNames.find(f)->second; int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; a->Display(limit, 0, 1); clean_queues(); if(stat[f] == statement_count && a->keep == 0) { a->free(); varNames.erase(f); }; } void emit_filter(char *s, char *f) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(1, "Filter : couldn't find variable " + string(f)); }; stat[s] = statement_count; stat[f] = statement_count; filter_var[s] = f; // check possible use of other variables in filters queue<string> op(op_value); while(!op.empty()) { size_t pos1 = op.front().find_first_of(".", 0); if(pos1 != string::npos) { stat[op.front().substr(0,pos1)] = statement_count; }; op.pop(); }; check_used_vars(); clean_queues(); return; }; CudaSet *a, *b; a = varNames.find(f)->second; a->name = f; if(a->mRecCount == 0 && !a->filtered) { b = new CudaSet(0,1); } else { if(verbose) cout << "INLINE FILTER " << f << endl; b = a->copyDeviceStruct(); b->name = s; b->sorted_fields = a->sorted_fields; b->presorted_fields = a->presorted_fields; //save the stack b->fil_s = s; b->fil_f = f; b->fil_type = op_type; b->fil_value = op_value; b->fil_nums = op_nums; b->fil_nums_f = op_nums_f; b->fil_nums_precision = op_nums_precision; b->filtered = 1; b->tmp_table = a->tmp_table; b->string_map = a->string_map; if(a->filtered) { b->source_name = a->source_name; b->fil_f = a->fil_f; while(!a->fil_value.empty()) { b->fil_value.push(a->fil_value.front()); a->fil_value.pop(); }; while(!a->fil_type.empty()) { b->fil_type.push(a->fil_type.front()); a->fil_type.pop(); }; b->fil_type.push("AND"); while(!a->fil_nums.empty()) { b->fil_nums.push(a->fil_nums.front()); a->fil_nums.pop(); }; while(!a->fil_nums_precision.empty()) { b->fil_nums_precision.push(a->fil_nums_precision.front()); a->fil_nums_precision.pop(); }; while(!a->fil_nums_f.empty()) { b->fil_nums_f.push(a->fil_nums_f.front()); a->fil_nums_f.pop(); }; a->filtered = 0; varNames.erase(f); } else b->source_name = f; b->maxRecs = a->maxRecs; b->prm_d.resize(a->maxRecs); }; b->hostRecCount = a->hostRecCount; clean_queues(); if (varNames.count(s) > 0) varNames[s]->free(); varNames[s] = b; if(stat[s] == statement_count) { b->free(); varNames.erase(s); }; } void emit_store(const char *s, const char *f, const char* sep) { statement_count++; if (scan_state == 0) { if (stat.find(s) == stat.end() && data_dict.count(s) == 0) { process_error(2, "Store : couldn't find variable " + string(s) ); }; stat[s] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; clean_queues(); return; }; if(varNames.find(s) == varNames.end()) return; CudaSet* a = varNames.find(s)->second; if(verbose) cout << "STORE: " << s << " " << f << " " << sep << endl; int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; a->Store(f,sep, limit, 0, 0); if(stat[s] == statement_count && a->keep == 0) { a->free(); varNames.erase(s); }; }; void emit_store_binary(const char *s, const char *f, const bool append) { statement_count++; if (scan_state == 0) { if (stat.find(s) == stat.end() && data_dict.count(s) == 0) { process_error(2, "Store : couldn't find variable " + string(s)); }; stat[s] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; clean_queues(); return; }; cout << "Append " << append << endl; if(varNames.find(s) == varNames.end()) return; CudaSet* a = varNames.find(s)->second; if(stat[f] == statement_count) a->deAllocOnDevice(); printf("STORE: %s %s \n", s, f); int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; total_count = 0; total_segments = 0; a->maxRecs = 0; if(fact_file_loaded) { a->Store(f,"", limit, 1, append); } else { FILE* file_p; if(a->text_source) { file_p = fopen(a->load_file_name.c_str(), "rb"); if (!file_p) { process_error(2, "Could not open file " + a->load_file_name ); }; }; thrust::device_vector<char> d_readbuff; thrust::device_vector<char*> dest(a->mColumnCount); thrust::device_vector<unsigned int> ind(a->mColumnCount); thrust::device_vector<unsigned int> dest_len(a->mColumnCount); while(!fact_file_loaded) { if(verbose) cout << "LOADING " << a->load_file_name << " mem: " << getFreeMem() << endl; if(a->text_source) fact_file_loaded = a->LoadBigFile(file_p, d_readbuff, dest, ind, dest_len); if(a->maxRecs < a->mRecCount) a->maxRecs = a->mRecCount; a->Store(f,"", limit, 1, append); }; }; a->writeSortHeader(f); if(stat[f] == statement_count && !a->keep) { a->free(); varNames.erase(s); }; }; void emit_load_binary(const char *s, const char *f, const int d) { statement_count++; if (scan_state == 0) { stat[s] = statement_count; return; }; if(verbose) printf("BINARY LOAD: %s %s \n", s, f); //std::clock_t start1 = std::clock(); CudaSet *a; unsigned int segCount, maxRecs; string f1(f); f1 += "." + namevars.front() + ".header"; FILE* ff = fopen(f1.c_str(), "rb"); if(!ff) { process_error(2, "Couldn't open file " + f1); }; size_t totRecs; fread((char *)&totRecs, 8, 1, ff); fread((char *)&segCount, 4, 1, ff); fread((char *)&maxRecs, 4, 1, ff); fclose(ff); if(verbose) cout << "Reading " << totRecs << " records" << endl; a = new CudaSet(namevars, typevars, sizevars, cols, totRecs, f, maxRecs); a->segCount = segCount; a->keep = true; a->name = s; varNames[s] = a; if(stat[s] == statement_count ) { a->free(); varNames.erase(s); }; //std::cout<< "load time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; } void emit_load(const char *s, const char *f, const int d, const char* sep) { statement_count++; if (scan_state == 0) { stat[s] = statement_count; return; }; printf("LOAD: %s %s %d %s \n", s, f, d, sep); CudaSet *a; a = new CudaSet(namevars, typevars, sizevars, cols, process_count); a->keep = true; a->not_compressed = 1; a->load_file_name = f; a->separator = sep; varNames[s] = a; fact_file_loaded = 0; if(stat[s] == statement_count) { a->free(); varNames.erase(s); }; } void emit_show_tables() { if (scan_state == 1) { for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) { cout << (*it).first << endl; }; }; return; } void emit_drop_table(const char* table_name) { if (scan_state == 1) { map<string, map<string, col_data> >::iterator iter; if((iter = data_dict.find(table_name)) != data_dict.end()) { auto s = (*iter).second; for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) { int seg = 0; string f_name = (*iter).first + "." + (*it).first + "." + to_string(seg); while(!remove(f_name.c_str())) { seg++; f_name = (*iter).first + "." + (*it).first + "." + to_string(seg); }; f_name = (*iter).first + "." + (*it).first + ".header"; remove(f_name.c_str()); }; }; string s_name = (*iter).first + ".presort"; remove(s_name.c_str()); s_name = (*iter).first + ".sort"; remove(s_name.c_str()); if(data_dict.find(table_name) != data_dict.end()) { data_dict.erase(table_name); }; save_dict = 1; }; return; } void emit_describe_table(const char* table_name) { if (scan_state == 1) { map<string, map<string, col_data> >::iterator iter; if((iter = data_dict.find(table_name)) != data_dict.end()) { auto s = (*iter).second; for (auto it=s.begin() ; it != s.end(); ++it ) { if ((*it).second.col_type == 0) { if((*it).second.col_length) { if((*it).second.col_length != UINT_MAX) cout << (*it).first << " decimal with precision of " << (*it).second.col_length << endl; else cout << (*it).first << " timestamp" << endl; } else cout << (*it).first << " integer" << endl; } else if ((*it).second.col_type == 1) { cout << (*it).first << " float" << endl; } else if ((*it).second.col_type == 3) { cout << (*it).first << " decimal" << endl; } else { cout << (*it).first << " char(" << (*it).second.col_length << ")" << endl; }; }; }; }; return; } void yyerror(char *s, ...) { extern int yylineno; extern char *yytext; fprintf(stderr, "%d: error: ", yylineno); cout << yytext << endl; error_cb(1, s); } void clean_queues() { while(!op_type.empty()) op_type.pop(); while(!op_value.empty()) op_value.pop(); while(!op_join.empty()) op_join.pop(); while(!op_nums.empty()) op_nums.pop(); while(!op_nums_f.empty()) op_nums_f.pop(); while(!op_nums_precision.empty()) op_nums_precision.pop(); while(!j_col_count.empty()) j_col_count.pop(); while(!namevars.empty()) namevars.pop(); while(!typevars.empty()) typevars.pop(); while(!sizevars.empty()) sizevars.pop(); while(!cols.empty()) cols.pop(); while(!op_sort.empty()) op_sort.pop(); while(!op_presort.empty()) op_presort.pop(); while(!join_type.empty()) join_type.pop(); while(!join_eq_type.empty()) join_eq_type.pop(); op_case = 0; sel_count = 0; join_cnt = 0; join_col_cnt = 0; distinct_cnt = 0; join_tab_cnt = 0; tab_cnt = 0; join_and_cnt.clear(); } void load_vars() { if(used_vars.size() == 0) { //cout << "Error, no valid column names have been found " << endl; //exit(0); } else { for (auto it=used_vars.begin(); it != used_vars.end(); ++it ) { while(!namevars.empty()) namevars.pop(); while(!typevars.empty()) typevars.pop(); while(!sizevars.empty()) sizevars.pop(); while(!cols.empty()) cols.pop(); if(stat.count((*it).first) != 0) { auto c = (*it).second; for (auto sit=c.begin() ; sit != c.end(); ++sit ) { //cout << "name " << (*sit).first << " " << data_dict[(*it).first][(*sit).first].col_length << endl; namevars.push((*sit).first); if(data_dict[(*it).first][(*sit).first].col_type == 0) { if(data_dict[(*it).first][(*sit).first].col_length == 0) { typevars.push("int"); } else { if(data_dict[(*it).first][(*sit).first].col_length == UINT_MAX) typevars.push("timestamp"); else typevars.push("decimal"); } } else if(data_dict[(*it).first][(*sit).first].col_type == 1) typevars.push("float"); else typevars.push("char"); sizevars.push(data_dict[(*it).first][(*sit).first].col_length); cols.push(0); }; emit_load_binary((*it).first.c_str(), (*it).first.c_str(), 0); }; }; }; } void process_error(int severity, string err) { switch (severity) { case 1: err = "(Warning) " + err; break; case 2: err = "(Fatal) " + err; break; default: err = "(Aborting) " + err; break; } error_cb(severity, err.c_str()); // send the error to the c based callback } void alenkaInit(char ** av) { process_count = 1000000000; verbose = 0; scan_state = 1; statement_count = 0; clean_queues(); context = CreateCudaDevice(0, nullptr, true); } void alenkaClose() { statement_count = 0; if(alloced_sz) { hipFree(alloced_tmp); alloced_sz = 0; }; }
62b9d8d605352f2def290bd4e030102dfbfd1514.cu
#include "operators.h" #include <thrust/iterator/permutation_iterator.h> #include <thrust/set_operations.h> struct is_even { __host__ __device__ bool operator()(const int &x) { return (x % 2) == 0; } }; using namespace mgpu; using namespace std; using namespace thrust::placeholders; size_t int_size = sizeof(int_type); size_t float_size = sizeof(float_type); queue<string> namevars; queue<string> typevars; queue<int> sizevars; queue<int> cols; queue<unsigned int> j_col_count; unsigned int sel_count = 0; unsigned int join_cnt = 0; unsigned int distinct_cnt = 0; unsigned int join_col_cnt = 0; unsigned int join_tab_cnt = 0; unsigned int tab_cnt = 0; queue<string> op_join; queue<char> join_type; queue<char> join_eq_type; unsigned int partition_count; map<string,unsigned int> stat; map<unsigned int, unsigned int> join_and_cnt; map<string, map<string, bool> > used_vars; bool save_dict = 0; ContextPtr context; thrust::device_vector<unsigned char> scratch; map<string, string> filter_var; thrust::device_vector<int> ranj; unsigned long long int currtime; void check_used_vars() { for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) { auto s = (*it).second; auto vars(op_value); while(!vars.empty()) { if(s.count(vars.front()) != 0) { used_vars[(*it).first][vars.front()] = 1; }; vars.pop(); } }; } void emit_name(const char *name) { op_type.push("NAME"); op_value.push(name); } void emit_limit(const int val) { op_nums.push(val); } void emit_string(const char *str) { // remove the float_type quotes if(str[0] == '"') { string sss(str,1, strlen(str)-2); op_value.push(sss); } else { string sss(str); op_value.push(sss); }; op_type.push("STRING"); } void emit_string_grp(const char *str, const char *str_grp) { emit_string(str); grp_val = str_grp; }; void emit_fieldname(const char* name1, const char* name2) { string s1(name1); string s2(name2); op_type.push("FIELD"); op_value.push(s1 + "." + s2); }; void emit_number(const int_type val) { op_type.push("NUMBER"); op_nums.push(val); op_nums_precision.push(0); } void emit_float(const float_type val) { op_type.push("FLOAT"); op_nums_f.push(val); } void emit_decimal(const char* str) { op_type.push("NUMBER"); string s1(str); unsigned int precision; auto pos = s1.find("."); if(pos == std::string::npos) precision = 0; else { precision = (s1.length() - pos) -1; s1.erase(pos,1); }; op_nums.push(stoi(s1)); op_nums_precision.push(precision); cout << "Decimal " << stoi(s1) << " " << precision << endl; } void emit_mul() { op_type.push("MUL"); } void emit_add() { op_type.push("ADD"); } void emit_div() { op_type.push("DIV"); } unsigned int misses = 0; void emit_and() { op_type.push("AND"); join_col_cnt++; } void emit_eq() { op_type.push("JOIN"); join_eq_type.push('E'); if(misses == 0) { join_and_cnt[tab_cnt] = join_col_cnt; misses = join_col_cnt; join_col_cnt = 0; tab_cnt++; } else { misses--; } } void emit_neq() { op_type.push("JOIN"); join_eq_type.push('N'); if(misses == 0) { join_and_cnt[tab_cnt] = join_col_cnt; misses = join_col_cnt; join_col_cnt = 0; tab_cnt++; } else { misses--; } } void emit_distinct() { op_type.push("DISTINCT"); distinct_cnt++; } void emit_year() { op_type.push("YEAR"); } void emit_month() { op_type.push("MONTH"); } void emit_day() { op_type.push("DAY"); } void emit_or() { op_type.push("OR"); } void emit_minus() { op_type.push("MINUS"); } void emit_cmp(int val) { op_type.push("CMP"); op_nums.push(val); } void emit(const char *s, ...) { } void emit_var(const char *s, const int c, const char *f, const char* ref, const char* ref_name) { namevars.push(s); typevars.push(f); sizevars.push(0); cols.push(c); } void emit_var_asc(const char *s) { op_type.push(s); op_value.push("ASC"); } void emit_var_desc(const char *s) { op_type.push(s); op_value.push("DESC"); } void emit_sort(const char *s, const int p) { op_sort.push(s); partition_count = p; } void emit_presort(const char *s) { op_presort.push(s); } void emit_varchar(const char *s, const int c, const char *f, const int d, const char *ref, const char* ref_name) { namevars.push(s); typevars.push(f); sizevars.push(d); cols.push(c); } void emit_vardecimal(const char *s, const int c, const char *f, const int scale, const int precision) { namevars.push(s); typevars.push(f); sizevars.push(precision); cols.push(c); } void emit_sel_name(const char *s) { op_type.push("emit sel_name"); op_value.push(s); sel_count++; } void emit_count() { op_type.push("COUNT"); } void emit_sum() { op_type.push("SUM"); } void emit_average() { op_type.push("AVG"); } void emit_min() { op_type.push("MIN"); } void emit_max() { op_type.push("MAX"); } void emit_join_tab(const char *s, const char tp) { op_join.push(s); join_tab_cnt++; join_type.push(tp); }; void order_inplace_host(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str) { unsigned int* permutation = new unsigned int[a->mRecCount]; thrust::sequence(permutation, permutation + a->mRecCount); char* temp = new char[a->mRecCount*max_char(a)]; stack<string> exe_type1(exe_type), exe_value; while(!exe_type1.empty()) { exe_value.push("ASC"); exe_type1.pop(); }; // sort on host for(;!exe_type.empty(); exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] != 1) update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp); else update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if (a->type[*it] != 1) { thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_int[*it].data(), (int_type*)temp); thrust::copy((int_type*)temp, (int_type*)temp + a->mRecCount, a->h_columns_int[*it].data()); } else { thrust::gather(permutation, permutation + a->mRecCount, a->h_columns_float[*it].data(), (float_type*)temp); thrust::copy((float_type*)temp, (float_type*)temp + a->mRecCount, a->h_columns_float[*it].data()); } }; delete [] temp; delete [] permutation; } void order_inplace(CudaSet* a, stack<string> exe_type, set<string> field_names, bool update_str) { if(scratch.size() < a->mRecCount*4) scratch.resize(a->mRecCount*4); thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data())); thrust::sequence(permutation, permutation+a->mRecCount,0,1); unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation); if(a->grp.size() < a->mRecCount*8) a->grp.resize(a->mRecCount*8); unsigned int bits; for(; !exe_type.empty(); exe_type.pop()) { if(cpy_bits.empty()) bits = 0; else bits = cpy_bits[exe_type.top()]; if (a->type[exe_type.top()] != 1) { update_permutation(a->d_columns_int[exe_type.top()], raw_ptr, a->mRecCount, "ASC", (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits); } else update_permutation(a->d_columns_float[exe_type.top()], raw_ptr, a->mRecCount,"ASC", (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if(cpy_bits.empty()) bits = 0; else bits = cpy_bits[*it]; if (a->type[*it] != 1) { apply_permutation(a->d_columns_int[*it], raw_ptr, a->mRecCount, (int_type*)thrust::raw_pointer_cast(a->grp.data()), bits); } else { apply_permutation(a->d_columns_float[*it], raw_ptr, a->mRecCount, (float_type*)thrust::raw_pointer_cast(a->grp.data()), bits); }; }; } bool check_star_join(const string j1) { auto op_vals(op_value); for(auto i=0; i < sel_count; i++) { op_vals.pop(); op_vals.pop(); }; if(join_tab_cnt > 0) { while(op_vals.size()) { if (std::find(varNames[j1]->columnNames.begin(), varNames[j1]->columnNames.end(), op_vals.front()) != varNames[j1]->columnNames.end()) { op_vals.pop(); op_vals.pop(); } else { return 0; }; }; if(join_tab_cnt == 1) { if(!check_bitmap_file_exist(varNames[j1], varNames[op_join.front()])) { return 0; }; }; return 1; } else return 0; } void star_join(const char *s, const string j1) { map<string,bool> already_copied; queue<string> op_left; CudaSet* left = varNames.find(j1)->second; queue<string> op_sel; queue<string> op_sel_as; for(auto i=0; i < sel_count; i++) { if(std::find(left->columnNames.begin(), left->columnNames.end(), op_value.front()) != left->columnNames.end()) op_left.push(op_value.front()); op_sel.push(op_value.front()); op_value.pop(); op_sel_as.push(op_value.front()); op_value.pop(); }; auto op_sel_s(op_sel), op_sel_s_as(op_sel_as), op_g(op_value); CudaSet* c = new CudaSet(op_sel_s, op_sel_s_as); string f1, f2; map<string, string> key_map; map<string, char> sort_map; map<string, string> r_map; for(auto i = 0; i < join_tab_cnt; i++) { f1 = op_g.front(); op_g.pop(); f2 = op_g.front(); op_g.pop(); r_map[f1] = f2; queue<string> op_jj(op_join); for(auto z = 0; z < (join_tab_cnt-1) - i; z++) op_jj.pop(); size_t rcount; queue<string> op_vd(op_g), op_alt(op_sel); unsigned int jc = join_col_cnt; while(jc) { jc--; op_vd.pop(); op_alt.push(op_vd.front()); op_vd.pop(); }; key_map[op_jj.front()] = f1; CudaSet* right = varNames.find(op_jj.front())->second; if(!check_bitmaps_exist(left, right)) { cout << "Required bitmap on table " << op_jj.front() << " doesn't exists" << endl; exit(0); }; queue<string> second; while(!op_alt.empty()) { if(f2.compare(op_alt.front()) != 0 && std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) { second.push(op_alt.front()); //cout << "col " << op_alt.front() << " " << op_jj.front() << endl; op_left.push(f1); }; op_alt.pop(); }; if(!second.empty()) { right->filtered = 0; right->mRecCount = right->maxRecs; load_queue(second, right, "", rcount, 0, right->segCount, 0,0); // put all used columns into GPU }; }; queue<string> idx; set<string> already_loaded; bool right_cpy = 0; for (unsigned int i = 0; i < left->segCount; i++) { std::clock_t start2 = std::clock(); if(verbose) cout << "segment " << i << " " << getFreeMem() << endl; idx = left->fil_value; already_loaded.clear(); while(!idx.empty()) { //load the index if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) { //extract table name and colname from index name already_loaded.insert(idx.front()); size_t pos1 = idx.front().find_first_of(".", 0); size_t pos2 = idx.front().find_first_of(".", pos1+1); CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second; char a; //cout << "loading index " << idx.front() << endl; a = left->loadIndex(idx.front(), i); sort_map[idx.front().substr(pos1+1, pos2-pos1-1)] = a; }; idx.pop(); }; left->filtered = 0; size_t cnt_c = 0; allocColumns(left, left->fil_value); copyColumns(left, left->fil_value, i, cnt_c); bool* res = filter(left->fil_type, left->fil_value, left->fil_nums, left->fil_nums_f, left->fil_nums_precision, left, i); thrust::device_ptr<bool> star((bool*)res); size_t cnt = thrust::count(star, star + (unsigned int)left->mRecCount, 1); cout << "join res " << cnt << " out of " << left->mRecCount << endl; thrust::host_vector<unsigned int> prm_vh(cnt); thrust::device_vector<unsigned int> prm_v(cnt); thrust::host_vector<unsigned int> prm_tmp(cnt); thrust::device_vector<unsigned int> prm_tmp_d(cnt); //std::cout<< "seg filter " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; if(cnt) { //gather //start1 = std::clock(); left->prm_d.resize(cnt); thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)left->mRecCount-1), star, left->prm_d.begin(), thrust::identity<bool>()); thrust::device_free(star); prm_vh = left->prm_d; size_t offset = c->mRecCount; c->resize_join(cnt); queue<string> op_sel1(op_sel_s); void* temp; CUDA_SAFE_CALL(cudaMalloc((void **) &temp, cnt*max_char(c))); cudaMemset(temp,0,cnt*max_char(c)); CudaSet *t; unsigned int cnt1, bits; int_type lower_val; thrust::device_vector<unsigned int> output(cnt); //std::cout<< "seg start " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; while(!op_sel1.empty()) { if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end()) { //cout << "Left " << op_sel1.front() << endl; if(left->filtered) t = varNames[left->source_name]; else t = left; if(left->type[op_sel1.front()] <= 1) { if(ssd && !interactive) { //start1 = std::clock(); lower_val = t->readSsdSegmentsFromFile(i, op_sel1.front(), offset, prm_vh, c); //std::cout<< "SSD L SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; } else { t->readSegmentsFromFile(i, op_sel1.front()); void* h; if(!interactive) { if(left->type[op_sel1.front()] == 0) h = t->h_columns_int[op_sel1.front()].data(); else h = t->h_columns_float[op_sel1.front()].data(); } else { string ff = t->load_file_name + "." + op_sel1.front()+ "." + to_string(i); h = buffers[ff]; }; cnt1 = ((unsigned int*)h)[0];//bytes lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; bits = ((unsigned int*)((char*)h + cnt1))[8]; //cout << cnt1 << " " << lower_val << " " << bits << " " << left->type[op_sel1.front()] << endl; if(bits == 8) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), ptr + offset); }; } else if(bits == 16) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), ptr + offset); }; } else if(bits == 32) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), ptr + offset); } } else if(bits == 64) { if(left->type[op_sel1.front()] == 0) { thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), c->h_columns_int[op_sel1.front()].begin() + offset); } else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), ptr + offset); }; }; }; //cout << "lower_val bits " << lower_val << " " << bits << endl; if(left->type[op_sel1.front()] != 1) thrust::transform( c->h_columns_int[op_sel1.front()].begin() + offset, c->h_columns_int[op_sel1.front()].begin() + offset + cnt, thrust::make_constant_iterator(lower_val), c->h_columns_int[op_sel1.front()].begin() + offset, thrust::plus<int_type>()); else { int_type* ptr = (int_type*)c->h_columns_float[op_sel1.front()].data(); thrust::transform(ptr + offset, ptr + offset + cnt, thrust::make_constant_iterator(lower_val), ptr + offset, thrust::plus<int_type>()); thrust::transform(ptr + offset, ptr + offset + cnt, c->h_columns_float[op_sel1.front()].begin() + offset, long_to_float()); }; } else { //gather string. There are no strings in fact tables. }; } else { for(auto it = key_map.begin(); it != key_map.end(); it++) { CudaSet* r = varNames.find(it->first)->second; if(std::find(r->columnNames.begin(), r->columnNames.end(), op_sel1.front()) != r->columnNames.end()) { if(i == 0) { if(data_dict[varNames[it->first]->load_file_name][op_sel1.front()].col_type == 2) { //cout << "SET " << op_sel1.front() << " to " << varNames[it->first]->load_file_name + "." + op_sel1.front() << endl; c->string_map[op_sel1.front()] = varNames[it->first]->load_file_name + "." + op_sel1.front(); }; } if(left->filtered) t = varNames[left->source_name]; else t = left; if(ssd && !interactive) { //start1 = std::clock(); lower_val = t->readSsdSegmentsFromFileR(i, key_map[it->first], prm_vh, prm_tmp); //std::cout<< "SSD R SEEK READ " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; } else { t->readSegmentsFromFile(i, key_map[it->first]); void* h; if(!interactive) { h = t->h_columns_int[key_map[it->first]].data(); } else { string ff = t->load_file_name + "." + key_map[it->first] + "." + to_string(i); h = buffers[ff]; }; cnt1 = ((unsigned int*)h)[0]; lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; bits = ((unsigned int*)((char*)h + cnt1))[8]; //cout << cnt1 << " " << lower_val << " " << bits << endl; if(bits == 8) { thrust::gather(prm_vh.begin(), prm_vh.end(), (char*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 16) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned short int*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 32) { thrust::gather(prm_vh.begin(), prm_vh.end(), (unsigned int*)((unsigned int*)h + 6), prm_tmp.begin()); } else if(bits == 64) { thrust::gather(prm_vh.begin(), prm_vh.end(), (int_type*)((unsigned int*)h + 6), prm_tmp.begin()); }; }; if(lower_val != 1) thrust::transform(prm_tmp.begin(), prm_tmp.end(), thrust::make_constant_iterator(lower_val-1), prm_tmp.begin(), thrust::plus<unsigned int>()); if(sort_map[r->source_name] == '1') { // sorted consecutive starting with 1 dimension keys prm_tmp_d = prm_tmp; //cout << "PATH 1 " << endl; } else { //cout << "PATH 2 " << r->source_name << endl; output = prm_tmp; if(r->d_columns_int[r_map[key_map[it->first]]].size() == 0) { r->d_columns_int[r_map[key_map[it->first]]].resize(r->maxRecs); }; if(right_cpy == 0) { r->CopyColumnToGpu(r_map[key_map[it->first]]); }; thrust::lower_bound(r->d_columns_int[r_map[key_map[it->first]]].begin(), r->d_columns_int[r_map[key_map[it->first]]].end(), output.begin(), output.end(), prm_tmp_d.begin()); }; if(r->type[op_sel1.front()] != 1) { thrust::device_ptr<int_type> d_tmp((int_type*)temp); thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)temp); thrust::gather(prm_tmp_d.begin(), prm_tmp_d.end(), r->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + cnt, c->h_columns_float[op_sel1.front()].begin() + offset); }; break; }; }; }; op_sel1.pop(); //std::cout<< ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; }; cudaFree(temp); right_cpy = 1; }; //std::cout<< "SEG " << i << " " << ( ( std::clock() - start2 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; //unload the segment indexes : idx = left->fil_value; already_loaded.clear(); while(!idx.empty()) { if(idx.front().find(".") != string::npos && (already_loaded.find(idx.front()) == already_loaded.end())) { //extract table name and colname from index name already_loaded.insert(idx.front()); size_t pos1 = idx.front().find_first_of(".", 0); size_t pos2 = idx.front().find_first_of(".", pos1+1); CudaSet* r = varNames.find(idx.front().substr(pos1+1, pos2-pos1-1))->second; string f1 = idx.front() + "." + to_string(i); auto it = index_buffers.find(f1); if(it != index_buffers.end()) { cudaFreeHost(index_buffers[f1]); index_buffers.erase(it); }; }; idx.pop(); }; }; //if(verbose) // std::cout<< "star join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; while(!op_join.empty()) { varNames[op_join.front()]->deAllocOnDevice(); op_join.pop(); }; varNames[s] = c; c->maxRecs = c->mRecCount; if(verbose) cout << endl << "join count " << c->mRecCount << endl; }; void emit_join(const char *s, const char *j1, const int grp, const int start_seg, const int end_seg) { //cout << "emit_join " << s << " " << join_tab_cnt << " " << op_join.front() << endl; statement_count++; if (scan_state == 0) { if (stat.find(j1) == stat.end() && data_dict.count(j1) == 0) { process_error(2, "Join : couldn't find variable " + string(j1) ); }; if (stat.find(op_join.front()) == stat.end() && data_dict.count(op_join.front()) == 0) { process_error(2, "Join : couldn't find variable " + op_join.front() ); }; stat[s] = statement_count; stat[j1] = statement_count; if(filter_var.find(j1) != filter_var.end()) { stat[filter_var[j1]] = statement_count; }; check_used_vars(); while(!op_join.empty()) { stat[op_join.front()] = statement_count; if(filter_var.find(op_join.front()) != filter_var.end()) { stat[filter_var[op_join.front()]] = statement_count; }; op_join.pop(); }; return; }; queue<string> op_m(op_value); if(check_star_join(j1)) { if(verbose) cout << "executing star join !! " << endl; star_join(s, j1); } else { if(join_tab_cnt > 1) { string tab_name; for(unsigned int i = 1; i <= join_tab_cnt; i++) { if(i == join_tab_cnt) tab_name = s; else tab_name = s + to_string(i); string j, j2; if(i == 1) { j2 = op_join.front(); op_join.pop(); j = op_join.front(); op_join.pop(); } else { if(!op_join.empty()) { j = op_join.front(); op_join.pop(); } else j = j1; j2 = s + to_string(i-1); }; emit_multijoin(tab_name, j, j2, i, s, start_seg, end_seg); op_value = op_m; }; } else { emit_multijoin(s, j1, op_join.front(), 1, s, start_seg, end_seg); op_join.pop(); }; }; queue<string> op_sel; queue<string> op_sel_as; for(int i=0; i < sel_count; i++) { op_sel.push(op_m.front()); op_m.pop(); op_sel_as.push(op_m.front()); op_m.pop(); }; while(!op_sel_as.empty()) { //cout << "alias " << op_sel.front() << " : " << op_sel_as.front() << endl; if(op_sel.front() != op_sel_as.front()) { if(varNames[s]->type[op_sel.front()] == 0) { varNames[s]->h_columns_int[op_sel_as.front()] = varNames[s]->h_columns_int[op_sel.front()]; varNames[s]->h_columns_int.erase(op_sel.front()); varNames[s]->d_columns_int[op_sel_as.front()] = varNames[s]->d_columns_int[op_sel.front()]; varNames[s]->d_columns_int.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 0; varNames[s]->type.erase(op_sel.front()); } else if(varNames[s]->type[op_sel.front()] == 1) { varNames[s]->h_columns_float[op_sel_as.front()] = varNames[s]->h_columns_float[op_sel.front()]; varNames[s]->h_columns_float.erase(op_sel.front()); varNames[s]->d_columns_float[op_sel_as.front()] = varNames[s]->d_columns_float[op_sel.front()]; varNames[s]->d_columns_float.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 1; varNames[s]->type.erase(op_sel.front()); varNames[s]->decimal.erase(op_sel.front()); } else { varNames[s]->h_columns_char[op_sel_as.front()] = varNames[s]->h_columns_char[op_sel.front()]; varNames[s]->h_columns_char.erase(op_sel.front()); varNames[s]->d_columns_char[op_sel_as.front()] = varNames[s]->d_columns_char[op_sel.front()]; varNames[s]->d_columns_char.erase(op_sel.front()); varNames[s]->type[op_sel_as.front()] = 2; varNames[s]->type.erase(op_sel.front()); varNames[s]->char_size[op_sel_as.front()] = varNames[s]->char_size[op_sel.front()]; varNames[s]->char_size.erase(op_sel.front()); }; varNames[s]->decimal[op_sel_as.front()] = varNames[s]->decimal[op_sel.front()]; auto it = std::find(varNames[s]->columnNames.begin(), varNames[s]->columnNames.end(), op_sel.front()); *it = op_sel_as.front(); }; op_sel_as.pop(); op_sel.pop(); }; clean_queues(); if(stat[s] == statement_count) { varNames[s]->free(); varNames.erase(s); }; if(op_join.size()) { if(stat[op_join.front()] == statement_count && op_join.front().compare(j1) != 0) { varNames[op_join.front()]->free(); varNames.erase(op_join.front()); }; }; } template<typename T, typename P> void p_gather(thrust::host_vector<int>& h_tmp, T* h, P* dest) { for(int i = 0; i < h_tmp.size(); i++) { dest[i] = h[h_tmp[i]]; }; }; void emit_multijoin(const string s, const string j1, const string j2, const unsigned int tab, const char* res_name, const int start_segment, const int end_segment) { if(varNames.find(j1) == varNames.end() || varNames.find(j2) == varNames.end()) { clean_queues(); if(varNames.find(j1) == varNames.end()) cout << "Couldn't find j1 " << j1 << endl; if(varNames.find(j2) == varNames.end()) cout << "Couldn't find j2 " << j2 << " here " << endl; return; }; CudaSet* left = varNames.find(j1)->second; CudaSet* right = varNames.find(j2)->second; queue<string> op_sel; queue<string> op_sel_as; for(int i=0; i < sel_count; i++) { op_sel.push(op_value.front()); op_value.pop(); op_sel_as.push(op_value.front()); op_value.pop(); }; queue<string> op_sel_s(op_sel); queue<string> op_sel_s_as(op_sel_as); queue<string> op_g(op_value); if(tab > 0) { for(unsigned int z = 0; z < join_tab_cnt - tab; z++) { for(unsigned int j = 0; j < join_and_cnt[z]*2 + 2; j++) { op_sel_s.push(op_g.front()); op_sel_s_as.push(op_g.front()); op_g.pop(); }; }; }; string f1 = op_g.front(); op_g.pop(); string f2 = op_g.front(); op_g.pop(); if (verbose) cout << "JOIN " << s << " " << f1 << " " << f2 << " " << getFreeMem() << " " << phase_copy << endl; std::clock_t start1 = std::clock(); CudaSet* c = new CudaSet(right, left, op_sel_s, op_sel_s_as); if ((left->mRecCount == 0 && !left->filtered) || (right->mRecCount == 0 && !right->filtered)) { c = new CudaSet(left, right, op_sel_s, op_sel_s_as); varNames[res_name] = c; clean_queues(); return; }; if(join_tab_cnt > 1 && tab < join_tab_cnt) c->tmp_table = 1; else c->tmp_table = 0; string colname1, colname2; string tmpstr; if (std::find(left->columnNames.begin(), left->columnNames.end(), f1) != left->columnNames.end()) { colname1 = f1; if (std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) { colname2 = f2; } else { process_error(2, "Couldn't find column " + f2 ); }; } else if (std::find(right->columnNames.begin(), right->columnNames.end(), f1) != right->columnNames.end()) { colname2 = f1; tmpstr = f1; f1 = f2; if (std::find(left->columnNames.begin(), left->columnNames.end(), f2) != left->columnNames.end()) { colname1 = f2; f2 = tmpstr; } else { process_error(2, "Couldn't find column " +f2 ); }; } else { process_error(2, "Couldn't find column " + f1); }; if (!((left->type[colname1] == 0 && right->type[colname2] == 0) || (left->type[colname1] == 2 && right->type[colname2] == 2) || (left->type[colname1] == 1 && right->type[colname2] == 1 && left->decimal[colname1] && right->decimal[colname2]))) { process_error(2, "Joins on floats are not supported "); }; //bool decimal_join = 0; //if (left->type[colname1] == 1 && right->type[colname2] == 1) // decimal_join = 1; queue<string> op_vd(op_g); queue<string> op_g1(op_g); queue<string> op_alt(op_sel); unsigned int jc = join_and_cnt[join_tab_cnt - tab]; while(jc) { jc--; op_vd.pop(); op_alt.push(op_vd.front()); op_vd.pop(); }; size_t rcount = 0, cnt_r; queue<string> cc; if (left->type[colname1] == 2) { left->d_columns_int[colname1] = thrust::device_vector<int_type>(); } else { cc.push(f1); allocColumns(left, cc); }; left->hostRecCount = left->mRecCount; size_t cnt_l, res_count, tot_count = 0, offset = 0, k = 0; queue<string> lc(cc); thrust::device_vector<unsigned int> v_l(left->maxRecs); MGPU_MEM(int) aIndicesDevice, bIndicesDevice, intersectionDevice; stack<string> exe_type; set<string> field_names; exe_type.push(f2); for(unsigned int i = 0; i < right->columnNames.size(); i++) { if (std::find(c->columnNames.begin(), c->columnNames.end(), right->columnNames[i]) != c->columnNames.end() || right->columnNames[i] == f2 || join_and_cnt[join_tab_cnt - tab]) { field_names.insert(right->columnNames[i]); }; }; thrust::device_vector<int> p_tmp; unsigned int start_part = 0; bool prejoin = 0; while(start_part < right->segCount) { right->deAllocOnDevice(); std::clock_t start12 = std::clock(); if(right->not_compressed || (!right->filtered && getFreeMem() < right->columnNames.size()*right->hostRecCount*8*2)) { cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, start_part+1); start_part = start_part+1; } else { cnt_r = load_right(right, f2, op_g1, op_alt, rcount, start_part, right->segCount); start_part = right->segCount; for(unsigned int i=0; i < right->columnNames.size(); i++) { if (right->type[right->columnNames[i]] != 1) { right->d_columns_int[right->columnNames[i]].shrink_to_fit(); } else right->d_columns_float[right->columnNames[i]].shrink_to_fit(); }; }; right->mRecCount = cnt_r; bool order = 1; if(!right->presorted_fields.empty() && right->presorted_fields.front() == f2) { order = 0; //cout << "No need to sort " << endl; if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) right->sort_check = '1'; else { right->sort_check = '0'; }; }; if(order) { if(thrust::is_sorted(right->d_columns_int[f2].begin(), right->d_columns_int[f2].end())) { if (right->d_columns_int[f2][0] == 1 && right->d_columns_int[f2][right->d_columns_int[f2].size()-1] == right->d_columns_int[f2].size()) { right->sort_check = '1'; } else { right->sort_check = '0'; }; } else { //cout << "sorting " << endl; size_t tot_size = right->mRecCount*8*right->columnNames.size(); if (getFreeMem() > tot_size*1.5) { order_inplace(right, exe_type, field_names, 0); } else { //for(unsigned int i = 0; i < right->columnNames.size(); i++) { for (auto it=field_names.begin(); it!=field_names.end(); ++it) { //cout << "sorting " << *it << endl; if(right->type[*it] != 1) { if(right->h_columns_int[*it].size() < right->mRecCount) right->h_columns_int[*it].resize(right->mRecCount); thrust::copy(right->d_columns_int[*it].begin(), right->d_columns_int[*it].begin() + right->mRecCount, right->h_columns_int[*it].begin()); } else { if(right->type[*it] == 1) { if(right->h_columns_float[*it].size() < right->mRecCount) right->h_columns_float[*it].resize(right->mRecCount); }; thrust::copy(right->d_columns_float[*it].begin(), right->d_columns_float[*it].begin() + right->mRecCount, right->h_columns_float[*it].begin()); }; }; order_inplace_host(right, exe_type, field_names, 0); for (auto it=field_names.begin(); it!=field_names.end(); ++it) { if(right->type[*it] != 1) thrust::copy(right->h_columns_int[*it].begin(), right->h_columns_int[*it].begin() + right->mRecCount, right->d_columns_int[*it].begin()); else thrust::copy(right->h_columns_float[*it].begin(), right->h_columns_float[*it].begin() + right->mRecCount, right->d_columns_float[*it].begin()); }; }; }; }; //std::cout<< "join right load time " << ( ( std::clock() - start12 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; int e_segment; if(end_segment == -1) { e_segment = left->segCount; } else e_segment = end_segment; for (unsigned int i = start_segment; i < e_segment; i++) { if(verbose) //cout << "segment " << i << '\xd'; cout << "segment " << i << endl; cnt_l = 0; /*left->readSegmentsFromFile(i, colname1); void* h; h = left->h_columns_int[colname1].data(); auto cnt1 = ((unsigned int*)h)[0]; auto lower_val = ((int_type*)(((unsigned int*)h)+1))[0]; auto bits = ((unsigned int*)((char*)h + cnt1))[8]; cout << "Partition " << cnt1 << " " << lower_val << " " << bits << endl; std::clock_t start15 = std::clock(); if(bits == 8) { thrust::stable_partition((char*)((unsigned int*)h + 6), (char*)((unsigned int*)h + 6) + cnt1, is_even()); } else if(bits == 16) { thrust::stable_partition((unsigned short int*)((unsigned int*)h + 6), (unsigned short int*)((unsigned int*)h + 6) + cnt1/2, is_even()); } else if(bits == 32) { thrust::stable_partition(((unsigned int*)h + 6), ((unsigned int*)h + 6) + cnt1/4, is_even()); } else if(bits == 64) { thrust::stable_partition((int_type*)((unsigned int*)h + 6), (int_type*)((unsigned int*)h + 6) + cnt1/8, is_even()); }; auto new_cnt = cudaDeviceSynchronize(); std::cout<< "partition time " << ( ( std::clock() - start15 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; exit(0); */ copyColumns(left, lc, i, cnt_l); cnt_l = left->mRecCount; auto join_eq_type1(join_eq_type); if (cnt_l) { // sort the left index column, save the permutation vector, it might be needed later thrust::device_ptr<int_type> d_col((int_type*)thrust::raw_pointer_cast(left->d_columns_int[colname1].data())); thrust::sequence(v_l.begin(), v_l.begin() + cnt_l,0,1); bool do_sort = 1; if(!left->sorted_fields.empty()) { if(left->sorted_fields.front() == f1) { do_sort = 0; }; } else if(!left->presorted_fields.empty()) { if(left->presorted_fields.front() == f1) { do_sort = 0; }; }; if(do_sort) { thrust::sort_by_key(d_col, d_col + cnt_l, v_l.begin()); } else if(verbose) cout << "No need of sorting " << endl; if(prejoin) { res_count = SetOpKeys<MgpuSetOpIntersection, true>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &intersectionDevice, *context, false); if(!res_count) continue; }; if(verbose) cout << "join " << cnt_l << ":" << cnt_r << " " << join_type.front() << endl; /*if(cnt_r > 10) { for(int z = 0; z < 10 ; z++) cout << " R " << right->d_columns_int[colname2][(cnt_r-1)-z] << endl; for(int z = 0; z < 10 ; z++) cout << " L " << left->d_columns_int[colname1][(cnt_l-1)-z] << endl; }; */ if (left->d_columns_int[colname1][0] > right->d_columns_int[colname2][cnt_r-1] || left->d_columns_int[colname1][cnt_l-1] < right->d_columns_int[colname2][0]) { if(verbose) cout << endl << "skipping after copying " << endl; continue; }; //else // cout << "JOINING " << left->d_columns_int[colname1][0] << ":" << left->d_columns_int[colname1][cnt_l-1] << " AND " << right->d_columns_int[colname2][0] << ":" << right->d_columns_int[colname2][cnt_r-1] << endl; //cout << "joining " << left->d_columns_int[colname1][0] << " : " << left->d_columns_int[colname1][cnt_l-1] << " and " << right->d_columns_int[colname2][0] << " : " << right->d_columns_int[colname2][cnt_r-1] << endl; char join_kind = join_type.front(); std::clock_t start11 = std::clock(); if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') res_count = RelationalJoin<MgpuJoinKindInner>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'L') res_count = RelationalJoin<MgpuJoinKindLeft>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'R') res_count = RelationalJoin<MgpuJoinKindRight>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); else if(join_kind == 'O') res_count = RelationalJoin<MgpuJoinKindOuter>(thrust::raw_pointer_cast(left->d_columns_int[colname1].data()), cnt_l, thrust::raw_pointer_cast(right->d_columns_int[colname2].data()), cnt_r, &aIndicesDevice, &bIndicesDevice, mgpu::less<int_type>(), *context); if(verbose) std::cout<< "join time " << ( ( std::clock() - start11 ) / (double)CLOCKS_PER_SEC ) << '\n'; if(verbose) cout << "RES " << res_count << endl; if(res_count == 0) prejoin = 1; int* r1 = aIndicesDevice->get(); thrust::device_ptr<int> d_res1((int*)r1); int* r2 = bIndicesDevice->get(); thrust::device_ptr<int> d_res2((int*)r2); if(res_count) { p_tmp.resize(res_count); thrust::sequence(p_tmp.begin(), p_tmp.end(),-1); thrust::gather_if(d_res1, d_res1+res_count, d_res1, v_l.begin(), p_tmp.begin(), _1 >= 0); }; // check if the join is a multicolumn join unsigned int mul_cnt = join_and_cnt[join_tab_cnt - tab]; while(mul_cnt) { mul_cnt--; queue<string> mult(op_g); string f3 = mult.front(); mult.pop(); string f4 = mult.front(); mult.pop(); //cout << "ADDITIONAL COL JOIN " << f3 << " " << f4 << " " << join_eq_type.front() << endl; queue<string> rc; rc.push(f3); allocColumns(left, rc); size_t offset = 0; copyColumns(left, rc, i, offset, 0, 0); rc.pop(); if (res_count) { thrust::device_ptr<bool> d_add = thrust::device_malloc<bool>(res_count); if(right->d_columns_int[f4].size() == 0) load_queue(rc, right, f4, rcount, 0, right->segCount, 0, 0); if (left->type[f3] == 1 && right->type[f4] == 1) { thrust::transform(make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_float[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_float[f4].begin(), d_res2), d_add, float_equal_to()); } else { if(join_eq_type1.front() != 'N') thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2), d_add, thrust::equal_to<int_type>()); else { thrust::transform(make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.begin()), make_permutation_iterator(left->d_columns_int[f3].begin(), p_tmp.end()), make_permutation_iterator(right->d_columns_int[f4].begin(), d_res2), d_add, thrust::not_equal_to<int_type>()); }; }; if (join_kind == 'I' || join_kind == '1' || join_kind == '2' || join_kind == '3' || join_kind == '4') { // result count changes only in case of an inner join unsigned int new_cnt = thrust::count(d_add, d_add+res_count, 1); thrust::stable_partition(d_res2, d_res2 + res_count, d_add, thrust::identity<unsigned int>()); thrust::stable_partition(p_tmp.begin(), p_tmp.end(), d_add, thrust::identity<unsigned int>()); res_count = new_cnt; } else { //otherwise we consider it a valid left join result with non-nulls on the left side and nulls on the right side thrust::transform(d_res2, d_res2 + res_count, d_add , d_res2, set_minus()); }; thrust::device_free(d_add); }; if(!join_eq_type1.empty()) join_eq_type1.pop(); }; while(!join_eq_type1.empty()) join_eq_type1.pop(); //cout << "MUL res_count " << res_count << endl; if(join_kind == '1') { //LEFT SEMI thrust::sort(p_tmp.begin(), p_tmp.begin() + res_count); auto new_end = thrust::unique(p_tmp.begin(), p_tmp.begin() + res_count); res_count = new_end - p_tmp.begin(); } else if(join_kind == '2'){ // RIGHT SEMI thrust::sort(d_res2, d_res2 + res_count); auto new_end = thrust::unique(d_res2, d_res2 + res_count); res_count = new_end - d_res2; auto old_sz = ranj.size(); ranj.resize(ranj.size() + res_count); thrust::copy(d_res2, d_res2 + res_count, ranj.begin() + old_sz); thrust::sort(ranj.begin(), ranj.end()); auto ra_cnt = thrust::unique(ranj.begin(), ranj.end()); ranj.resize(ra_cnt-ranj.begin()); } else if(join_kind == '3'){ // ANTI JOIN LEFT thrust::counting_iterator<int> iter(0); thrust::device_vector<int> rr(cnt_l); auto new_end = thrust::set_difference(iter, iter+cnt_l, p_tmp.begin(), p_tmp.begin() + res_count, rr.begin()); res_count = new_end - rr.begin(); thrust::copy(rr.begin(), new_end, p_tmp.begin()); } else if(join_kind == '4'){ // ANTI JOIN RIGHT thrust::sort(d_res2, d_res2 + res_count); auto new_end = thrust::unique(d_res2, d_res2 + res_count); auto cnt = new_end - d_res2; thrust::device_vector<int> seq(cnt + ranj.size()); //auto new_end = thrust::set_difference(seq.begin(), seq.end(), d_res2, d_res2 + res_count, rr.begin()); auto new_end1 = thrust::set_union(d_res2, d_res2 + cnt, ranj.begin(), ranj.end(), seq.begin()); auto s_cnt = new_end1 - seq.begin(); thrust::sort(seq.begin(), seq.begin() + s_cnt); auto end_seq = thrust::unique(seq.begin(), seq.begin() + s_cnt); auto u_cnt = end_seq - seq.begin(); ranj.resize(u_cnt); thrust::copy(seq.begin(), seq.begin() + u_cnt, ranj.begin()); thrust::sort(ranj.begin(), ranj.end()); auto ra_cnt = thrust::unique(ranj.begin(), ranj.end()); ranj.resize(ra_cnt-ranj.begin()); } tot_count = tot_count + res_count; //cout << "tot " << tot_count << endl; //std::clock_t start12 = std::clock(); if(res_count && join_kind != '4' && join_kind != '2') { offset = c->mRecCount; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); if(std::find(left->columnNames.begin(), left->columnNames.end(), op_sel1.front()) != left->columnNames.end() && join_kind != '2') { allocColumns(left, cc); copyColumns(left, cc, i, k, 0, 0); //gather if(left->type[op_sel1.front()] != 1 ) { thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(p_tmp.begin(), p_tmp.begin() + res_count, left->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset); }; if(op_sel1.front() != colname1) left->deAllocColumnOnDevice(op_sel1.front()); //}; } else if(std::find(right->columnNames.begin(), right->columnNames.end(), op_sel1.front()) != right->columnNames.end()) { //gather if(right->type[op_sel1.front()] != 1) { thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(d_res2, d_res2 + res_count, right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin() + offset); } else { thrust::device_ptr<float_type> d_tmp((float_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(d_res2, d_res2 + res_count, right->d_columns_float[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_float[op_sel1.front()].begin() + offset); } } else { }; op_sel1.pop(); }; }; }; }; if(join_type.front() == '4') { thrust::device_vector<int> st(cnt_r); thrust::sequence(st.begin(), st.end(),0,1); thrust::device_vector<int> r(cnt_r); auto new_end = thrust::set_difference(st.begin(), st.end(), ranj.begin(), ranj.end(), r.begin()); ranj.resize(0); res_count = new_end - r.begin(); tot_count = res_count; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(r.begin(), r.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin()); op_sel1.pop(); }; } else if(join_type.front() == '2') { res_count = ranj.size(); tot_count = res_count; queue<string> op_sel1(op_sel_s); c->resize_join(res_count); if(scratch.size() < res_count*int_size) scratch.resize(res_count*int_size); thrust::fill(scratch.begin(), scratch.begin() + res_count*int_size, 0); std::map<string,bool> processed; while(!op_sel1.empty()) { if (processed.find(op_sel1.front()) != processed.end()) { op_sel1.pop(); continue; } else processed[op_sel1.front()] = 1; while(!cc.empty()) cc.pop(); cc.push(op_sel1.front()); thrust::device_ptr<int_type> d_tmp((int_type*)thrust::raw_pointer_cast(scratch.data())); thrust::gather(ranj.begin(), ranj.end(), right->d_columns_int[op_sel1.front()].begin(), d_tmp); thrust::copy(d_tmp, d_tmp + res_count, c->h_columns_int[op_sel1.front()].begin()); op_sel1.pop(); }; ranj.resize(0); }; }; left->deAllocOnDevice(); right->deAllocOnDevice(); c->deAllocOnDevice(); varNames[s] = c; c->mRecCount = tot_count; c->hostRecCount = tot_count; c->name = s; if(verbose) cout << "tot res " << tot_count << " " << getFreeMem() << endl; if(right->tmp_table == 1) { right->free(); varNames.erase(j2); } else { if(stat[j2] == statement_count) { right->free(); varNames.erase(j2); }; }; if(stat[j1] == statement_count) { left->free(); varNames.erase(j1); }; join_type.pop(); if(!join_eq_type.empty()) join_eq_type.pop(); size_t tot_size = tot_count*8*c->columnNames.size(); if (getFreeMem() > tot_size) { c->maxRecs = tot_count; c->segCount = 1; } else { c->segCount = ((tot_size)/getFreeMem() + 1); c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1); }; if(verbose) std::cout<< "join time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; } void order_on_host(CudaSet *a, CudaSet* b, queue<string> names, stack<string> exe_type, stack<string> exe_value) { unsigned int tot = 0; if(!a->not_compressed) { //compressed allocColumns(a, names); unsigned int c = 0; size_t cnt = 0; for(unsigned int i = 0; i < a->segCount; i++) { copyColumns(a, names, (a->segCount - i) - 1, cnt); //uses segment 1 on a host to copy data from a file to gpu if (a->mRecCount) { a->CopyToHost((c - tot) - a->mRecCount, a->mRecCount); tot = tot + a->mRecCount; }; }; } else tot = a->mRecCount; b->resize(tot); //resize host arrays a->mRecCount = tot; unsigned int* permutation = new unsigned int[a->mRecCount]; thrust::sequence(permutation, permutation + a->mRecCount); size_t maxSize = a->mRecCount; char* temp; temp = new char[maxSize*max_char(a)]; // sort on host for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] == 0) update_permutation_host(a->h_columns_int[exe_type.top()].data(), permutation, a->mRecCount, exe_value.top(), (int_type*)temp); else if (a->type[exe_type.top()] == 1) update_permutation_host(a->h_columns_float[exe_type.top()].data(), permutation, a->mRecCount,exe_value.top(), (float_type*)temp); else { update_char_permutation(a, exe_type.top(), permutation, exe_value.top(), temp, 1); }; }; for (unsigned int i = 0; i < a->mColumnCount; i++) { if (a->type[a->columnNames[i]] != 1) { apply_permutation_host(a->h_columns_int[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_int[a->columnNames[i]].data()); } else apply_permutation_host(a->h_columns_float[a->columnNames[i]].data(), permutation, a->mRecCount, b->h_columns_float[a->columnNames[i]].data()); }; delete [] temp; delete [] permutation; } void emit_order(const char *s, const char *f, const int e, const int ll) { if(ll == 0) statement_count++; if (scan_state == 0 && ll == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Order : couldn't find variable " + string(f)); }; stat[s] = statement_count; stat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; return; }; if (scan_state == 0) { check_used_vars(); return; }; if(varNames.find(f) == varNames.end() ) { clean_queues(); return; }; CudaSet* a = varNames.find(f)->second; stack<string> exe_type, exe_value; if(verbose) cout << "ORDER: " << s << " " << f << endl; for(int i=0; !op_type.empty(); ++i, op_type.pop(),op_value.pop()) { if ((op_type.front()).compare("NAME") == 0) { exe_type.push(op_value.front()); exe_value.push("ASC"); } else { exe_type.push(op_type.front()); exe_value.push(op_value.front()); }; if(std::find(a->columnNames.begin(), a->columnNames.end(), exe_type.top()) == a->columnNames.end()) { process_error(2, "Couldn't find name " + exe_type.top()); }; }; stack<string> tp(exe_type); queue<string> op_vx; while (!tp.empty()) { op_vx.push(tp.top()); tp.pop(); }; queue<string> names; for (unsigned int i = 0; i < a->columnNames.size() ; i++ ) names.push(a->columnNames[i]); CudaSet *b = a->copyDeviceStruct(); //lets find out if our data set fits into a GPU size_t mem_available = getFreeMem(); size_t rec_size = 0; for(unsigned int i = 0; i < a->mColumnCount; i++) { if(a->type[a->columnNames[i]] == 0) rec_size = rec_size + int_size; else if(a->type[a->columnNames[i]] == 1) rec_size = rec_size + float_size; else rec_size = rec_size + a->char_size[a->columnNames[i]]; }; bool fits; if (rec_size*a->mRecCount > (mem_available/2)) // doesn't fit into a GPU fits = 0; else fits = 1; if(!fits) { order_on_host(a, b, names, exe_type, exe_value); } else { // initialize permutation to [0, 1, 2, ... ,N-1] size_t rcount; if(a->filtered) { CudaSet *t = varNames[a->source_name]; a->mRecCount = t->mRecCount; a->hostRecCount = a->mRecCount; }; a->mRecCount = load_queue(names, a, op_vx.front(), rcount, 0, a->segCount); if(scratch.size() < a->mRecCount) scratch.resize(a->mRecCount*4); thrust::device_ptr<unsigned int> permutation((unsigned int*)thrust::raw_pointer_cast(scratch.data())); thrust::sequence(permutation, permutation+(a->mRecCount)); unsigned int* perm_ptr = thrust::raw_pointer_cast(permutation); void* temp; CUDA_SAFE_CALL(cudaMalloc((void **) &temp, a->mRecCount*max_char(a))); if(a->filtered) varNames[a->source_name]->hostRecCount = varNames[a->source_name]->mRecCount; else a->hostRecCount = a->mRecCount;; if(a->filtered) varNames[a->source_name]->mRecCount = varNames[a->source_name]->hostRecCount; else a->mRecCount = a->hostRecCount; for(int i=0; !exe_type.empty(); ++i, exe_type.pop(),exe_value.pop()) { if (a->type[exe_type.top()] == 0 && a->string_map.find(exe_type.top()) == a->string_map.end()) update_permutation(a->d_columns_int[exe_type.top()], perm_ptr, a->mRecCount, exe_value.top(), (int_type*)temp, 64); else if (a->type[exe_type.top()] == 1) update_permutation(a->d_columns_float[exe_type.top()], perm_ptr, a->mRecCount,exe_value.top(), (float_type*)temp, 64); else { //get strings to device update_char_permutation(a, exe_type.top(), perm_ptr, exe_value.top(), temp, 0); }; }; b->resize(a->mRecCount); //resize host arrays b->mRecCount = a->mRecCount; for (unsigned int i = 0; i < a->mColumnCount; i++) { if (a->type[a->columnNames[i]] != 1) { apply_permutation(a->d_columns_int[a->columnNames[i]], perm_ptr, a->mRecCount, (int_type*)temp, 64); } else apply_permutation(a->d_columns_float[a->columnNames[i]], perm_ptr, a->mRecCount, (float_type*)temp, 64); }; for(unsigned int i = 0; i < a->mColumnCount; i++) { if(a->type[a->columnNames[i]] != 1) { thrust::copy(a->d_columns_int[a->columnNames[i]].begin(), a->d_columns_int[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_int[a->columnNames[i]].begin()); } else thrust::copy(a->d_columns_float[a->columnNames[i]].begin(), a->d_columns_float[a->columnNames[i]].begin() + a->mRecCount, b->h_columns_float[a->columnNames[i]].begin()); }; b->deAllocOnDevice(); a->deAllocOnDevice(); cudaFree(temp); }; varNames[s] = b; b->segCount = 1; b->not_compressed = 1; b->string_map = a->string_map; if(stat[f] == statement_count && !a->keep) { a->free(); varNames.erase(f); }; } void emit_select(const char *s, const char *f, const int grp_cnt) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Select : couldn't find variable " + string(f) ); }; stat[s] = statement_count; stat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; check_used_vars(); clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); cout << "Couldn't find1 " << f << endl; process_error(2, "Couldn't find(1) " + string(f) ); return; }; queue<string> op_v1(op_value); while(op_v1.size() > grp_cnt) op_v1.pop(); stack<string> op_v2; queue<string> op_v3; for(int i=0; i < grp_cnt; ++i) { op_v2.push(op_v1.front()); op_v3.push(op_v1.front()); op_v1.pop(); }; CudaSet *a; if(varNames.find(f) != varNames.end()) a = varNames.find(f)->second; else { process_error(2, "Couldn't find " + string(f) ); }; if(a->mRecCount == 0 && !a->filtered) { CudaSet *c; c = new CudaSet(0,1); varNames[s] = c; c->name = s; clean_queues(); if(verbose) cout << "SELECT " << s << " count : 0, Mem " << getFreeMem() << endl; return; }; if(verbose) cout << "SELECT " << s << " " << f << " " << getFreeMem() << endl; std::clock_t start1 = std::clock(); // here we need to determine the column count and composition queue<string> op_v(op_value); queue<string> op_vx; set<string> field_names; map<string,string> aliases; string tt; while(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) { tt = op_v.front(); op_v.pop(); if(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end()) { if(aliases.count(tt) == 0) { aliases[tt] = op_v.front(); }; } else { while(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) == a->columnNames.end() && !op_v.empty()) { op_v.pop(); }; }; }; }; if(!op_v.empty()) op_v.pop(); }; op_v = op_value; while(!op_v.empty()) { if(std::find(a->columnNames.begin(), a->columnNames.end(), op_v.front()) != a->columnNames.end()) { field_names.insert(op_v.front()); }; op_v.pop(); }; for (auto it=field_names.begin(); it!=field_names.end(); ++it) { op_vx.push(*it); }; // find out how many columns a new set will have queue<string> op_t(op_type); int_type col_count = 0; for(int i=0; !op_t.empty(); ++i, op_t.pop()) if((op_t.front()).compare("emit sel_name") == 0) col_count++; CudaSet *b, *c; if(a->segCount <= 1) setSegments(a, op_vx); allocColumns(a, op_vx); unsigned int cycle_count; if(a->filtered) cycle_count = varNames[a->source_name]->segCount; else cycle_count = a->segCount; size_t ol_count = a->mRecCount, cnt; a->hostRecCount = a->mRecCount; b = new CudaSet(0, col_count); b->name = "tmp b in select"; bool c_set = 0; //size_t tmp_size = a->mRecCount; //if(a->segCount > 1) // tmp_size = a->maxRecs; vector<thrust::device_vector<int_type> > distinct_val; //keeps array of DISTINCT values for every key vector<thrust::device_vector<int_type> > distinct_hash; //keeps array of DISTINCT values for every key vector<thrust::device_vector<int_type> > distinct_tmp; /* for(unsigned int i = 0; i < distinct_cnt; i++) { distinct_tmp.push_back(thrust::device_vector<int_type>(tmp_size)); distinct_val.push_back(thrust::device_vector<int_type>()); distinct_hash.push_back(thrust::device_vector<int_type>()); }; */ bool one_liner; if (grp_cnt != 0) phase_copy = 1; for(unsigned int i = 0; i < cycle_count; i++) { // MAIN CYCLE if(verbose) cout << "segment " << i << " select mem " << getFreeMem() << endl; std::clock_t start3 = std::clock(); cnt = 0; copyColumns(a, op_vx, i, cnt); if(a->mRecCount) { if (grp_cnt != 0) { bool srt = 0; stack<string> op_vv(op_v2); while(!op_vv.empty()) { if(!min_max_eq[op_vv.top()]) srt = 1; op_vv.pop(); }; if(srt) { order_inplace(a, op_v2, field_names, 1); a->GroupBy(op_v2); } else { if(a->grp.size() < a->mRecCount) a->grp.resize(a->mRecCount); thrust::fill(a->grp.begin(),a->grp.begin()+a->mRecCount,0); a->grp[a->mRecCount-1] = 1; a->grp_count = 1; }; } else a->grp_count = 0; copyFinalize(a, op_vx,0); select(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a,b, distinct_tmp, one_liner); if(i == 0) std::reverse(b->columnNames.begin(), b->columnNames.end()); if (!c_set && b->mRecCount > 0) { c = new CudaSet(0, col_count); create_c(c,b); c_set = 1; c->name = s; }; if (grp_cnt && cycle_count > 1 && b->mRecCount > 0) { add(c,b,op_v3, aliases, distinct_tmp, distinct_val, distinct_hash, a); } else { //copy b to c unsigned int c_offset = c->mRecCount; c->resize(b->mRecCount); for(unsigned int j=0; j < b->columnNames.size(); j++) { if (b->type[b->columnNames[j]] == 0) { thrust::copy(b->d_columns_int[b->columnNames[j]].begin(), b->d_columns_int[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_int[b->columnNames[j]].begin() + c_offset); } else if (b->type[b->columnNames[j]] == 1) { thrust::copy(b->d_columns_float[b->columnNames[j]].begin(), b->d_columns_float[b->columnNames[j]].begin() + b->mRecCount, c->h_columns_float[b->columnNames[j]].begin() + c_offset); }; }; }; //std::cout<< "add time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << '\n'; }; std::cout<< "cycle sel time " << ( ( std::clock() - start3 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; }; phase_copy = 0; a->mRecCount = ol_count; a->mRecCount = a->hostRecCount; a->deAllocOnDevice(); b->deAllocOnDevice(); a->grp.resize(0); a->grp.shrink_to_fit(); for(auto i = 0; i < alloced_mem.size(); i++) { cudaFree(alloced_mem[i]); alloced_mem.pop_back(); }; if(!c_set) { CudaSet *c; c = new CudaSet(0,1); varNames[s] = c; c->name = s; clean_queues(); return; }; if (grp_cnt) { count_avg(c, distinct_hash); } else { if(one_liner) { count_simple(c); }; }; c->maxRecs = c->mRecCount; c->hostRecCount = c->mRecCount; c->string_map = b->string_map; c->name = s; c->keep = 1; if(verbose) cout << "select res " << c->mRecCount << endl; size_t tot_size = c->maxRecs*8*c->columnNames.size(); if (getFreeMem() < tot_size*3) { c->segCount = ((tot_size*3)/getFreeMem() + 1); c->maxRecs = c->hostRecCount - (c->hostRecCount/c->segCount)*(c->segCount-1); }; clean_queues(); varNames[s] = c; b->free(); varNames[s]->keep = 1; if(stat[s] == statement_count) { varNames[s]->free(); varNames.erase(s); }; if(stat[f] == statement_count && a->keep == 0) { a->free(); varNames.erase(f); }; if(verbose) std::cout<< "select time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n'; } void emit_insert(const char *f, const char* s) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Insert : couldn't find variable " + string(f)); }; if (stat.find(s) == stat.end() && data_dict.count(s) == 0) { process_error(2, "Insert : couldn't find variable " + string(s) ); }; check_used_vars(); stat[f] = statement_count; stat[s] = statement_count; clean_queues(); return; }; if(varNames.find(f) == varNames.end() || varNames.find(s) == varNames.end()) { clean_queues(); return; }; if(verbose) cout << "INSERT " << f << " " << s << endl; insert_records(f,s); clean_queues(); }; void emit_delete(const char *f) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Delete : couldn't find variable " + string(f)); }; stat[f] = statement_count; check_used_vars(); clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); return; }; delete_records(f); cout << "DELETE " << f << endl; clean_queues(); } void emit_case() { op_case = 1; if (scan_state == 1) cout << "emit case " << endl; //extract releveant values and pass to modified filter // get a bool vector back /* while(!op_type.empty()) { cout << "CASE type " << op_type.front() << endl; op_type.pop(); } */ } void emit_create_index(const char *index_name, const char *table, const char *column) { if (scan_state != 0) { FILE *f; string s1(table); string s3 = s1 + ".key"; f = fopen(s3.c_str(), "w"); fputs(column,f); fclose(f); }; } void emit_create_interval(const char *interval_name, const char *table, const char *lcolumn, const char *rcolumn) { if (scan_state != 0) { FILE *f; string s1(table); string s3 = s1 + ".interval"; f = fopen(s3.c_str(), "w"); fputs(lcolumn,f); fputc('|',f); fputs(rcolumn,f); fclose(f); }; } void emit_create_bitmap_index(const char *index_name, const char *ltable, const char *rtable, const char *rcolumn, const char *lid, const char *rid) { statement_count++; if (scan_state == 0) { emit_name(rcolumn); emit_sel_name(rcolumn); emit_name(lid); emit_name(rid); check_used_vars(); stat[rtable] = std::numeric_limits<unsigned int>::max(); stat[ltable] = std::numeric_limits<unsigned int>::max(); } else { cout << ltable << " " << rtable << " " << rid << " " << lid << endl; emit_name(rcolumn); emit_sel_name(rcolumn); emit_name(lid); emit_name(rid); check_used_vars(); if(varNames.find(ltable) == varNames.end()) cout << "Couldn't find " << ltable << endl; if(varNames.find(rtable) == varNames.end()) cout << "Couldn't find " << rtable << endl; CudaSet* left = varNames.find(ltable)->second; CudaSet* right = varNames.find(rtable)->second; queue<string> op_vx; op_vx.push(rcolumn);op_vx.push(rid); allocColumns(right, op_vx); right->CopyColumnToGpu(rid, 0, 0); right->CopyColumnToGpu(rcolumn, 0, 0); op_vx.pop();op_vx.pop(); op_vx.push(lid); allocColumns(left, op_vx); for(int i = 0; i < left->segCount; i++) { left->CopyColumnToGpu(lid, i, 0); thrust::device_vector<unsigned int> output(left->mRecCount); thrust::lower_bound(right->d_columns_int[rid].begin(), right->d_columns_int[rid].begin() + right->mRecCount, left->d_columns_int[lid].begin(), left->d_columns_int[lid].begin() + left->mRecCount, output.begin()); string str = std::string(ltable) + std::string(".") + std::string(rtable) + std::string(".") + std::string(rcolumn) + std::string(".") + to_string(i); thrust::device_vector<int_type> res(left->mRecCount); thrust::host_vector<int_type> res_h(left->mRecCount); if(right->type[rcolumn] == 0) { thrust::gather(output.begin(), output.begin() + left->mRecCount, right->d_columns_int[rcolumn].begin() , res.begin()); thrust::copy(res.begin(), res.begin() + left->mRecCount, res_h.begin()); compress_int(str, res_h); } else if(right->type[rcolumn] == 1) { } else { //strings string f1 = right->load_file_name + "." + rcolumn + ".0.hash"; //need to change it in case if there are dimensions tables larger than 1 segment ? FILE* f = fopen(f1.c_str(), "rb" ); unsigned int cnt; fread(&cnt, 4, 1, f); if(res_h.size() < cnt) res_h.resize(cnt); if(res.size() < cnt) res.resize(cnt); fread(res_h.data(), cnt*8, 1, f); res = res_h; fclose(f); thrust::device_vector<int_type> output1(left->mRecCount); thrust::gather(output.begin(), output.begin() + left->mRecCount , res.begin(), output1.begin()); thrust::copy(output1.begin(), output1.begin() + left->mRecCount, res_h.begin()); compress_int(str, res_h); }; }; }; } void emit_display(const char *f, const char* sep) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(2, "Filter : couldn't find variable " + string(f) ); }; stat[f] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; clean_queues(); return; }; if(varNames.find(f) == varNames.end()) { clean_queues(); return; }; CudaSet* a = varNames.find(f)->second; int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; a->Display(limit, 0, 1); clean_queues(); if(stat[f] == statement_count && a->keep == 0) { a->free(); varNames.erase(f); }; } void emit_filter(char *s, char *f) { statement_count++; if (scan_state == 0) { if (stat.find(f) == stat.end() && data_dict.count(f) == 0) { process_error(1, "Filter : couldn't find variable " + string(f)); }; stat[s] = statement_count; stat[f] = statement_count; filter_var[s] = f; // check possible use of other variables in filters queue<string> op(op_value); while(!op.empty()) { size_t pos1 = op.front().find_first_of(".", 0); if(pos1 != string::npos) { stat[op.front().substr(0,pos1)] = statement_count; }; op.pop(); }; check_used_vars(); clean_queues(); return; }; CudaSet *a, *b; a = varNames.find(f)->second; a->name = f; if(a->mRecCount == 0 && !a->filtered) { b = new CudaSet(0,1); } else { if(verbose) cout << "INLINE FILTER " << f << endl; b = a->copyDeviceStruct(); b->name = s; b->sorted_fields = a->sorted_fields; b->presorted_fields = a->presorted_fields; //save the stack b->fil_s = s; b->fil_f = f; b->fil_type = op_type; b->fil_value = op_value; b->fil_nums = op_nums; b->fil_nums_f = op_nums_f; b->fil_nums_precision = op_nums_precision; b->filtered = 1; b->tmp_table = a->tmp_table; b->string_map = a->string_map; if(a->filtered) { b->source_name = a->source_name; b->fil_f = a->fil_f; while(!a->fil_value.empty()) { b->fil_value.push(a->fil_value.front()); a->fil_value.pop(); }; while(!a->fil_type.empty()) { b->fil_type.push(a->fil_type.front()); a->fil_type.pop(); }; b->fil_type.push("AND"); while(!a->fil_nums.empty()) { b->fil_nums.push(a->fil_nums.front()); a->fil_nums.pop(); }; while(!a->fil_nums_precision.empty()) { b->fil_nums_precision.push(a->fil_nums_precision.front()); a->fil_nums_precision.pop(); }; while(!a->fil_nums_f.empty()) { b->fil_nums_f.push(a->fil_nums_f.front()); a->fil_nums_f.pop(); }; a->filtered = 0; varNames.erase(f); } else b->source_name = f; b->maxRecs = a->maxRecs; b->prm_d.resize(a->maxRecs); }; b->hostRecCount = a->hostRecCount; clean_queues(); if (varNames.count(s) > 0) varNames[s]->free(); varNames[s] = b; if(stat[s] == statement_count) { b->free(); varNames.erase(s); }; } void emit_store(const char *s, const char *f, const char* sep) { statement_count++; if (scan_state == 0) { if (stat.find(s) == stat.end() && data_dict.count(s) == 0) { process_error(2, "Store : couldn't find variable " + string(s) ); }; stat[s] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; clean_queues(); return; }; if(varNames.find(s) == varNames.end()) return; CudaSet* a = varNames.find(s)->second; if(verbose) cout << "STORE: " << s << " " << f << " " << sep << endl; int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; a->Store(f,sep, limit, 0, 0); if(stat[s] == statement_count && a->keep == 0) { a->free(); varNames.erase(s); }; }; void emit_store_binary(const char *s, const char *f, const bool append) { statement_count++; if (scan_state == 0) { if (stat.find(s) == stat.end() && data_dict.count(s) == 0) { process_error(2, "Store : couldn't find variable " + string(s)); }; stat[s] = statement_count; if(filter_var.find(f) != filter_var.end()) stat[filter_var[f]] = statement_count; clean_queues(); return; }; cout << "Append " << append << endl; if(varNames.find(s) == varNames.end()) return; CudaSet* a = varNames.find(s)->second; if(stat[f] == statement_count) a->deAllocOnDevice(); printf("STORE: %s %s \n", s, f); int limit = 0; if(!op_nums.empty()) { limit = op_nums.front(); op_nums.pop(); }; total_count = 0; total_segments = 0; a->maxRecs = 0; if(fact_file_loaded) { a->Store(f,"", limit, 1, append); } else { FILE* file_p; if(a->text_source) { file_p = fopen(a->load_file_name.c_str(), "rb"); if (!file_p) { process_error(2, "Could not open file " + a->load_file_name ); }; }; thrust::device_vector<char> d_readbuff; thrust::device_vector<char*> dest(a->mColumnCount); thrust::device_vector<unsigned int> ind(a->mColumnCount); thrust::device_vector<unsigned int> dest_len(a->mColumnCount); while(!fact_file_loaded) { if(verbose) cout << "LOADING " << a->load_file_name << " mem: " << getFreeMem() << endl; if(a->text_source) fact_file_loaded = a->LoadBigFile(file_p, d_readbuff, dest, ind, dest_len); if(a->maxRecs < a->mRecCount) a->maxRecs = a->mRecCount; a->Store(f,"", limit, 1, append); }; }; a->writeSortHeader(f); if(stat[f] == statement_count && !a->keep) { a->free(); varNames.erase(s); }; }; void emit_load_binary(const char *s, const char *f, const int d) { statement_count++; if (scan_state == 0) { stat[s] = statement_count; return; }; if(verbose) printf("BINARY LOAD: %s %s \n", s, f); //std::clock_t start1 = std::clock(); CudaSet *a; unsigned int segCount, maxRecs; string f1(f); f1 += "." + namevars.front() + ".header"; FILE* ff = fopen(f1.c_str(), "rb"); if(!ff) { process_error(2, "Couldn't open file " + f1); }; size_t totRecs; fread((char *)&totRecs, 8, 1, ff); fread((char *)&segCount, 4, 1, ff); fread((char *)&maxRecs, 4, 1, ff); fclose(ff); if(verbose) cout << "Reading " << totRecs << " records" << endl; a = new CudaSet(namevars, typevars, sizevars, cols, totRecs, f, maxRecs); a->segCount = segCount; a->keep = true; a->name = s; varNames[s] = a; if(stat[s] == statement_count ) { a->free(); varNames.erase(s); }; //std::cout<< "load time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << '\n'; } void emit_load(const char *s, const char *f, const int d, const char* sep) { statement_count++; if (scan_state == 0) { stat[s] = statement_count; return; }; printf("LOAD: %s %s %d %s \n", s, f, d, sep); CudaSet *a; a = new CudaSet(namevars, typevars, sizevars, cols, process_count); a->keep = true; a->not_compressed = 1; a->load_file_name = f; a->separator = sep; varNames[s] = a; fact_file_loaded = 0; if(stat[s] == statement_count) { a->free(); varNames.erase(s); }; } void emit_show_tables() { if (scan_state == 1) { for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) { cout << (*it).first << endl; }; }; return; } void emit_drop_table(const char* table_name) { if (scan_state == 1) { map<string, map<string, col_data> >::iterator iter; if((iter = data_dict.find(table_name)) != data_dict.end()) { auto s = (*iter).second; for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) { int seg = 0; string f_name = (*iter).first + "." + (*it).first + "." + to_string(seg); while(!remove(f_name.c_str())) { seg++; f_name = (*iter).first + "." + (*it).first + "." + to_string(seg); }; f_name = (*iter).first + "." + (*it).first + ".header"; remove(f_name.c_str()); }; }; string s_name = (*iter).first + ".presort"; remove(s_name.c_str()); s_name = (*iter).first + ".sort"; remove(s_name.c_str()); if(data_dict.find(table_name) != data_dict.end()) { data_dict.erase(table_name); }; save_dict = 1; }; return; } void emit_describe_table(const char* table_name) { if (scan_state == 1) { map<string, map<string, col_data> >::iterator iter; if((iter = data_dict.find(table_name)) != data_dict.end()) { auto s = (*iter).second; for (auto it=s.begin() ; it != s.end(); ++it ) { if ((*it).second.col_type == 0) { if((*it).second.col_length) { if((*it).second.col_length != UINT_MAX) cout << (*it).first << " decimal with precision of " << (*it).second.col_length << endl; else cout << (*it).first << " timestamp" << endl; } else cout << (*it).first << " integer" << endl; } else if ((*it).second.col_type == 1) { cout << (*it).first << " float" << endl; } else if ((*it).second.col_type == 3) { cout << (*it).first << " decimal" << endl; } else { cout << (*it).first << " char(" << (*it).second.col_length << ")" << endl; }; }; }; }; return; } void yyerror(char *s, ...) { extern int yylineno; extern char *yytext; fprintf(stderr, "%d: error: ", yylineno); cout << yytext << endl; error_cb(1, s); } void clean_queues() { while(!op_type.empty()) op_type.pop(); while(!op_value.empty()) op_value.pop(); while(!op_join.empty()) op_join.pop(); while(!op_nums.empty()) op_nums.pop(); while(!op_nums_f.empty()) op_nums_f.pop(); while(!op_nums_precision.empty()) op_nums_precision.pop(); while(!j_col_count.empty()) j_col_count.pop(); while(!namevars.empty()) namevars.pop(); while(!typevars.empty()) typevars.pop(); while(!sizevars.empty()) sizevars.pop(); while(!cols.empty()) cols.pop(); while(!op_sort.empty()) op_sort.pop(); while(!op_presort.empty()) op_presort.pop(); while(!join_type.empty()) join_type.pop(); while(!join_eq_type.empty()) join_eq_type.pop(); op_case = 0; sel_count = 0; join_cnt = 0; join_col_cnt = 0; distinct_cnt = 0; join_tab_cnt = 0; tab_cnt = 0; join_and_cnt.clear(); } void load_vars() { if(used_vars.size() == 0) { //cout << "Error, no valid column names have been found " << endl; //exit(0); } else { for (auto it=used_vars.begin(); it != used_vars.end(); ++it ) { while(!namevars.empty()) namevars.pop(); while(!typevars.empty()) typevars.pop(); while(!sizevars.empty()) sizevars.pop(); while(!cols.empty()) cols.pop(); if(stat.count((*it).first) != 0) { auto c = (*it).second; for (auto sit=c.begin() ; sit != c.end(); ++sit ) { //cout << "name " << (*sit).first << " " << data_dict[(*it).first][(*sit).first].col_length << endl; namevars.push((*sit).first); if(data_dict[(*it).first][(*sit).first].col_type == 0) { if(data_dict[(*it).first][(*sit).first].col_length == 0) { typevars.push("int"); } else { if(data_dict[(*it).first][(*sit).first].col_length == UINT_MAX) typevars.push("timestamp"); else typevars.push("decimal"); } } else if(data_dict[(*it).first][(*sit).first].col_type == 1) typevars.push("float"); else typevars.push("char"); sizevars.push(data_dict[(*it).first][(*sit).first].col_length); cols.push(0); }; emit_load_binary((*it).first.c_str(), (*it).first.c_str(), 0); }; }; }; } void process_error(int severity, string err) { switch (severity) { case 1: err = "(Warning) " + err; break; case 2: err = "(Fatal) " + err; break; default: err = "(Aborting) " + err; break; } error_cb(severity, err.c_str()); // send the error to the c based callback } void alenkaInit(char ** av) { process_count = 1000000000; verbose = 0; scan_state = 1; statement_count = 0; clean_queues(); context = CreateCudaDevice(0, nullptr, true); } void alenkaClose() { statement_count = 0; if(alloced_sz) { cudaFree(alloced_tmp); alloced_sz = 0; }; }
c80542217e5dec2aa536b8e80cc81ef392dc8bb0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_neighbored_pairs(int * arr, int * temp, int l) { int tid = threadIdx.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; int * i_data = arr + blockDim.x * blockIdx.x; if(gid>l) return; for(int offset=1;offset<=blockDim.x/2;offset*=2) { int index = 2*offset*tid; if(index < blockDim.x) { i_data[index]+=i_data[index+offset]; } __syncthreads(); } if(tid==0) { temp[blockIdx.x]=arr[gid]; } } int cpu_summer(int * arr, int l) { int s=0; for(int i=0;i<l;i++) { s+=arr[i]; } return s; } int main() { int shape=1<<27; int size=shape*sizeof(int); int block_size=128; dim3 block(block_size); dim3 grid(shape/block.x); int * arr; arr=(int *)malloc(size); int temp_size=sizeof(int)*grid.x; int * tarr; tarr=(int *)malloc(temp_size); int * d_arr, * d_temp; hipMalloc((void**)&d_arr, size); hipMalloc((void**)&d_temp, temp_size); hipMemset(d_temp, 0, temp_size); for(int i=0; i< shape; i++) { arr[i]=(int)(rand() & 0x0f); } clock_t ct1,ct2,gt1,gt2; ct1=clock(); int cpu=cpu_summer(arr, shape); ct2=clock(); gt1=clock(); hipMemcpy(d_arr, arr, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( reduction_neighbored_pairs), dim3(grid), dim3(block), 0, 0, d_arr, d_temp, shape); hipDeviceSynchronize(); hipMemcpy(tarr, d_temp, temp_size, hipMemcpyDeviceToHost); int gpu=0; for(int i=0;i<grid.x;i++) { gpu+=tarr[i]; } gt2=clock(); printf(cpu==gpu?"CPU and GPU values Match\n":"CPU and GPU values do not match\n"); printf("GPU time : %lf sec\n",(double)((gt2-gt1)/(double)CLOCKS_PER_SEC)); printf("CPU time : %lf sec\n",(double)((ct2-ct1)/(double)CLOCKS_PER_SEC)); hipFree(d_arr); hipFree(d_temp); free(arr); free(tarr); hipDeviceReset(); return 0; }
c80542217e5dec2aa536b8e80cc81ef392dc8bb0.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_neighbored_pairs(int * arr, int * temp, int l) { int tid = threadIdx.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; int * i_data = arr + blockDim.x * blockIdx.x; if(gid>l) return; for(int offset=1;offset<=blockDim.x/2;offset*=2) { int index = 2*offset*tid; if(index < blockDim.x) { i_data[index]+=i_data[index+offset]; } __syncthreads(); } if(tid==0) { temp[blockIdx.x]=arr[gid]; } } int cpu_summer(int * arr, int l) { int s=0; for(int i=0;i<l;i++) { s+=arr[i]; } return s; } int main() { int shape=1<<27; int size=shape*sizeof(int); int block_size=128; dim3 block(block_size); dim3 grid(shape/block.x); int * arr; arr=(int *)malloc(size); int temp_size=sizeof(int)*grid.x; int * tarr; tarr=(int *)malloc(temp_size); int * d_arr, * d_temp; cudaMalloc((void**)&d_arr, size); cudaMalloc((void**)&d_temp, temp_size); cudaMemset(d_temp, 0, temp_size); for(int i=0; i< shape; i++) { arr[i]=(int)(rand() & 0x0f); } clock_t ct1,ct2,gt1,gt2; ct1=clock(); int cpu=cpu_summer(arr, shape); ct2=clock(); gt1=clock(); cudaMemcpy(d_arr, arr, size, cudaMemcpyHostToDevice); reduction_neighbored_pairs<<<grid, block>>>(d_arr, d_temp, shape); cudaDeviceSynchronize(); cudaMemcpy(tarr, d_temp, temp_size, cudaMemcpyDeviceToHost); int gpu=0; for(int i=0;i<grid.x;i++) { gpu+=tarr[i]; } gt2=clock(); printf(cpu==gpu?"CPU and GPU values Match\n":"CPU and GPU values do not match\n"); printf("GPU time : %lf sec\n",(double)((gt2-gt1)/(double)CLOCKS_PER_SEC)); printf("CPU time : %lf sec\n",(double)((ct2-ct1)/(double)CLOCKS_PER_SEC)); cudaFree(d_arr); cudaFree(d_temp); free(arr); free(tarr); cudaDeviceReset(); return 0; }
83f96529f3ccb9b9a04c08f6ddf7ca6a522ff379.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pm.h" // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# /* @section */ static __forceinline__ __device__ __host__ vec3 operator+(vec3 v, f32 s){ return {v.x0+s, v.x1+s, v.x2+s}; } // Scalar addition! static __forceinline__ __device__ __host__ vec3 operator-(vec3 v, f32 s){ return {v.x0-s, v.x1-s, v.x2-s}; } // Scalar subtraction! static __forceinline__ __device__ __host__ vec3 operator*(f32 s, vec3 v){ return {s*v.x0, s*v.x1, s*v.x2}; } // Scalar multiplication! static __forceinline__ __device__ __host__ vec3 operator/(vec3 v, f32 s){ return {v.x0/s, v.x1/s, v.x2/s}; } // Scalar division! static __forceinline__ __device__ __host__ vec3 operator+(vec3 v0, vec3 v1){ return {v0.x0+v1.x0, v0.x1+v1.x1, v0.x2+v1.x2}; } // Vector elementwise addition! static __forceinline__ __device__ __host__ vec3 operator-(vec3 v0, vec3 v1){ return {v0.x0-v1.x0, v0.x1-v1.x1, v0.x2-v1.x2}; } // Vector elementwise subtraction! static __forceinline__ __device__ __host__ vec3 operator*(vec3 v0, vec3 v1){ return {v0.x0*v1.x0, v0.x1*v1.x1, v0.x2*v1.x2}; } // Vector elementwise multiplication! static __forceinline__ __device__ __host__ vec3 operator/(vec3 v0, vec3 v1){ return {v0.x0/v1.x0, v0.x1/v1.x1, v0.x2/v1.x2}; } // Vector elementwise division! static __forceinline__ __device__ __host__ f32 dot( vec3 v0, vec3 v1){ return v0.x0*v1.x0 + v0.x1*v1.x1 + v0.x2*v1.x2; } // Quite important for triangle intersection and a bit for the path tracer! static __forceinline__ __device__ __host__ vec3 cross(vec3 v0, vec3 v1){ // Homology of R3: 0 --> 1 --> 2 --> 0 --> 1 --> 2 --> 0 --> ... return {v0.x1*v1.x2 - v0.x2*v1.x1, // 0 --> 1 --> 2 v0.x2*v1.x0 - v0.x0*v1.x2, // 1 --> 2 --> 0 v0.x0*v1.x1 - v0.x1*v1.x0}; // 2 --> 0 --> 1 } static __forceinline__ __device__ __host__ vec3 normalize(vec3 v){ f32 s = rsqrtf(dot(v,v)); return {s*v.x0, s*v.x1, s*v.x2}; } // ----------------------------------------------------------------------------------------------------------------------------# static __forceinline__ __device__ __host__ quat operator*(f32 s, quat q){ return {s*q.x0, s*q.x1, s*q.x2, s*q.x3}; } // Scalar multiplication! static __forceinline__ __device__ __host__ quat operator+(quat v0, quat v1){ return {v0.x0+v1.x0, v0.x1+v1.x1, v0.x2+v1.x2, v0.x3+v1.x3}; } // Vector addition! static __forceinline__ __device__ __host__ quat operator*(quat q0, quat q1){ return {q0.x0*q1.x0 - q0.x1*q1.x1 - q0.x2*q1.x2 - q0.x3*q1.x3, q0.x0*q1.x1 + q0.x1*q1.x0 + q0.x2*q1.x3 - q0.x3*q1.x2, q0.x0*q1.x2 - q0.x1*q1.x3 + q0.x2*q1.x0 + q0.x3*q1.x1, q0.x0*q1.x3 + q0.x1*q1.x2 - q0.x2*q1.x1 + q0.x3*q1.x0}; } static __forceinline__ __device__ __host__ quat conj( quat q){ return {q.x0, -q.x1, -q.x2, -q.x3}; } // The quaternion inverse of a quaternion `q` is just `conj(q) / quad(q)`, just like for complex numbers! static __forceinline__ __device__ __host__ f32 dot( quat q0, quat q1){ return q0.x0*q1.x0 + q0.x1*q1.x1 + q0.x2*q1.x2 + q0.x3*q1.x3; } // Quite important for triangle intersection and a bit for the path tracer! static __forceinline__ __device__ __host__ quat normalize(quat q){ return rsqrtf(dot(q,q)) * q; } static __forceinline__ __device__ __host__ quat versor(f32 angle, vec3 dir){ vec3 v = __sinf(.5*angle)*normalize(dir); return {__cosf(.5*angle), v.x0,v.x1,v.x2}; // If @dir isn't a `direction vector` (ie. a unit vector), then the rotation speed is not constant, methinks! } static __forceinline__ __device__ __host__ vec3 qrotl(vec3 v, quat versor){ // WARN! @versor must be a unit-quaternion! quat p_rot = versor * (quat){0,v.x0,v.x1,v.x2} * conj(versor); // Left-conjugation by @versor! The quaternion-inverse of a unit-quaternion is its quaternion-conjugate! return {p_rot.x1, p_rot.x2, p_rot.x3}; } static __forceinline__ __device__ __host__ vec3 qrotr(vec3 v, quat versor){ // WARN! @versor must be a unit-quaternion! quat p_rot = conj(versor) * (quat){0,v.x0,v.x1,v.x2} * versor; // Right-conjugation by @versor! The quaternion-inverse of a unit-quaternion is its quaternion-conjugate! return {p_rot.x1, p_rot.x2, p_rot.x3}; } // ----------------------------------------------------------------------------------------------------------------------------# __forceinline__ __device__ vec3 clamp01(vec3 v){ return {__saturatef(v.x0), __saturatef(v.x1), __saturatef(v.x2)}; } __forceinline__ __device__ f32 fminf(f32 a, f32 b, f32 c){ return fminf(fminf(a,b),c); } __forceinline__ __device__ f32 fmaxf(f32 a, f32 b, f32 c){ return fmaxf(fmaxf(a,b),c); } __forceinline__ __device__ f32 fminf(vec3 a){ return fminf(a.x0,a.x1,a.x2); } __forceinline__ __device__ f32 fmaxf(vec3 a){ return fmaxf(a.x0,a.x1,a.x2); } __forceinline__ __device__ vec3 fminf(vec3 a0, vec3 a1){ return {fminf(a0.x0,a1.x0), fminf(a0.x1,a1.x1), fminf(a0.x2,a1.x2)}; } __forceinline__ __device__ vec3 fmaxf(vec3 a0, vec3 a1){ return {fmaxf(a0.x0,a1.x0), fmaxf(a0.x1,a1.x1), fmaxf(a0.x2,a1.x2)}; } __forceinline__ __device__ vec3 fminf(vec3 a0, vec3 a1, vec3 a2){ return fminf(a2,fminf(a1,a0)); } __forceinline__ __device__ vec3 fmaxf(vec3 a0, vec3 a1, vec3 a2){ return fmaxf(a2,fmaxf(a1,a0)); } // ----------------------------------------------------------------------------------------------------------------------------# __forceinline__ __device__ f32 rgb_gamma_decode(f32 channel){ return __powf(channel, 2.2/1); } __forceinline__ __device__ f32 rgb_gamma_encode(f32 channel){ return __powf(channel, 1/2.2); } __forceinline__ __device__ f32 rgb_u8_to_f32( u32 channel){ return rgb_gamma_decode(channel/255.); } // Read "from disk" "to memory", map from nonlinear color space (for monitor displaying) to linear color space (for computations)! __forceinline__ __device__ u32 rgb_f32_to_u8( f32 channel){ return 255.*rgb_gamma_encode(channel) + .5; } // Write "from memory" "to disk", map from linear color space (for computations) to nonlinear color space (for monitor displaying)! __forceinline__ __device__ vec3 bgr8u_to_rgb32f(u32 bgr8u){ return {rgb_u8_to_f32((bgr8u>>0x10)&0xff), rgb_u8_to_f32((bgr8u>>0x08)&0xff), rgb_u8_to_f32((bgr8u>>0x00)&0xff)}; } __forceinline__ __device__ vec3 rgb8u_to_rgb32f(u32 bgr8u){ return {rgb_u8_to_f32((bgr8u>>0x00)&0xff), rgb_u8_to_f32((bgr8u>>0x08)&0xff), rgb_u8_to_f32((bgr8u>>0x10)&0xff)}; } __forceinline__ __device__ u32 rgb32f_to_rgbu8(vec3 rgbf32){ return (rgb_f32_to_u8(rgbf32.x0)<<0x00) | (rgb_f32_to_u8(rgbf32.x1)<<0x08) | (rgb_f32_to_u8(rgbf32.x2)<<0x10); } __forceinline__ __device__ f32 rand_f32(u32* seed0, u32* seed1){ // RNG from github.com/gz/rust-raytracer *seed0 = 36969*(*seed0&0xffff) + (*seed0>>0x10); *seed1 = 18489*(*seed1&0xffff) + (*seed1>>0x10); u32 val_u32 = 0x40000000 | (((*seed0<<0x10) + *seed1) & 0x007fffff); return .5f * (*(f32*)&val_u32) - 1.f; } // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# /* @section path tracer! Based on Sam Lapere's path tracer: */ // ----------------------------------------------------------------------------------------------------------------------------# static __forceinline__ __device__ f32 pt_aabb_intersect(aabb_t self, ray_t ray){ // Intersect this primitive with a ray! Return the distance, or 0 if there's no hit! vec3 t_min = (self.min - ray.pos) / ray.dir; vec3 t_max = (self.max - ray.pos) / ray.dir; vec3 real_min = fminf(t_min, t_max); vec3 real_max = fmaxf(t_min, t_max); f32 maxmin = fmaxf(real_min); f32 minmax = fminf(real_max); if(minmax>=maxmin) return maxmin; else return 0.f; } static __forceinline__ __device__ vec3 pt_aabb_normal(aabb_t self, vec3 p){ vec3 normal; if( fabsf(self.min.x0-p.x0)<PM_EPSILON) normal={-1, 0, 0}; // Only 6 possible normals, one possibility for each face of the box! else if(fabsf(self.max.x0-p.x0)<PM_EPSILON) normal={ 1, 0, 0}; else if(fabsf(self.min.x1-p.x1)<PM_EPSILON) normal={ 0,-1, 0}; else if(fabsf(self.max.x1-p.x1)<PM_EPSILON) normal={ 0, 1, 0}; else if(fabsf(self.min.x2-p.x2)<PM_EPSILON) normal={ 0, 0,-1}; else normal={ 0, 0, 1}; return normal; // A normal MUST be a direction vector, ie. a unit vector! But notice each vector above IS already normalized! =D } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_triangle_intersect(triangle_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit! vec3 op = ray.pos - self.vert0; vec3 pvec = cross(ray.dir,self.edge02); f32 det = __fdividef(1.f, dot(self.edge01,pvec)); // `det<0` means FRONT! f32 u = det * dot(op,pvec); if(u<0.f || u >1.f) return 0.f; // No intersection! Early exit DOES help! vec3 qvec = cross(op,self.edge01); f32 v = det * dot(ray.dir,qvec); if(v<0.f || u+v>1.f) return 0.f; // No intersection! return det * dot(self.edge02,qvec); // `det<0` means FRONT! } static __forceinline__ __device__ vec3 pt_triangle_normal(triangle_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: it's value is the same across all points on the surface! return normalize(cross(self.edge01,self.edge02)); // The cross product of two triangle edges yields a vector orthogonal to the triangle plane! A normal MUST be a direction vector, ie. a unit vector! } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_light_intersect(light_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit! return pt_triangle_intersect({self.vert0, self.edge01, self.edge02}, ray); } static __forceinline__ __device__ vec3 pt_light_normal(light_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: it's value is the same across all points on the surface! return normalize(cross(self.edge01,self.edge02)); // The cross product of two triangle edges yields a vector orthogonal to the triangle plane! A normal MUST be a direction vector, ie. a unit vector! } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_cylinder_intersect(cylinder_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit! if(self.height==0.f) return 0.f; // This allows us to have "trivial" primitives in the mesh and not break the path tracer! vec3 op = ray.pos - self.pos; f32 op_dir = dot(op,self.dir); vec3 op_dir_vec = op_dir*self.dir; vec3 x = ray.dir - dot(ray.dir,self.dir)*self.dir; vec3 y = op - op_dir_vec; f32 A = dot(x,x); f32 B = 2.f*dot(x,y); f32 C = dot(y,y) - self.radius*self.radius; f32 dscr = B*B - 4*A*C; // Discriminant of the quadratic equation! if(dscr<0.f) return 0.f; else{ f32 top; vec3 bot_base = self.pos; vec3 top_base = self.pos + self.height*self.dir; top = -B-sqrtf(dscr); if(top>0.f){ f32 t=top/(2.f*A); vec3 q=ray.pos+t*ray.dir; if(dot(self.dir, q-bot_base)>0.f && dot(self.dir, q-top_base)<0.f) return t; else return 0.f; } // No 'FAST intrinsic for sqrt? top = -B+sqrtf(dscr); if(top>0.f){ f32 t=top/(2.f*A); vec3 q=ray.pos+t*ray.dir; if(dot(self.dir, q-bot_base)>0.f && dot(self.dir, q-top_base)<0.f) return t; else return 0.f; } // No 'FAST intrinsic for sqrt? } return 0.f; } static __forceinline__ __device__ vec3 pt_cylinder_normal(cylinder_t self, vec3 x){ // Return the normal to the cylinder at a point @x vec3 a = x - self.pos; vec3 b = self.dir; vec3 r = a - dot(a,b)*b; // a - dot(a,b)/dot(b,b)*b; // Vector rejection of `a` on `b`, but optimized since `b` is a unit vector! return normalize(r); } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_sphere_intersect(sphere_t self, ray_t ray){ if(self.radius==0.f) return 0.f; vec3 op = self.pos - ray.pos; f32 b = dot(op, ray.dir); // `b` term in the sphere's quadratic equation f32 disc = b*b - dot(op,op) + self.radius*self.radius; // The disc in the sphere's quadratic equation if(disc<0.f) return 0.f; // If the discriminant is negative, then there's only complex roots! disc = __fsqrt_rn(disc); // If discriminant non-negative, then check for roots using negative and positive discriminant! f32 t; t = b-disc; if(t>0.f) return t; // Pick closest point in front of ray origin? t = b+disc; if(t>0.f) return t; return 0.f; } static __forceinline__ __device__ vec3 pt_sphere_normal(sphere_t self, vec3 x){ return normalize(x - self.pos); } // ---------------------------------------------------------------- static __forceinline__ __device__ hit_t pt_scene_intersect(ray_t ray, scene_t scene){ hit_t hit = {gtype:GTYPE_UNKNOWN, id:0xffffffff, t:1e38}; // ---------------------------------------------------------------- // Record the position of the closest intersection point in RAY COORDINATES (which are 1-dimensional, so you need a single number), and also the ID of the object in question for(int i=0; i<scene.nlights; ++i){ f32 t = pt_light_intersect(scene.lights[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_LIGHT; } #if 0 for(int i=0; i<scene.ntriangles; ++i){ f32 t = pt_triangle_intersect(scene.triangles[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_TRIANGLE; } #endif #if 0 for(i32 i=0; i<scene.ncylinders; ++i){ f32 t = pt_cylinder_intersect(scene.cylinders[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_CYLINDER; } #endif #if 0 for(i32 i=0; i<scene.nspheres; ++i){ f32 t = pt_sphere_intersect(scene.spheres[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_SPHERE; } #endif // ---------------------------------------------------------------- #if 1 // Mesh intersection with a BVH! 5x-10x faster than without a BVH! { bvh_node_t* node_stack[BVH_STACK_NELEMS]; // Use static allocation! Use stack to traverse BVH to save space, cost is O(height) i32 stack_idx = BVH_STACK_NELEMS; node_stack[--stack_idx] = scene.mesh0.tree_nodes; // The root is the LAST element of the stack, ie. the element at position BVH_STACK_NELEMS! while(stack_idx != BVH_STACK_NELEMS){ // Stack-based recursion! bvh_node_t* node = node_stack[stack_idx++]; if(!pt_aabb_intersect(node->node_aabb, ray)) continue; // In BVH-based path tracing, MOST of the BVH intersection-traversal is ray/AABB! To intersect a NON-LEAF (every level other than the last), we use ray/AABB intersection! To intersect a LEAF (only the last level), we use ray/triangle intersection! if(!node->is_leaf){ // We'll only call the primitive-intersection routine at the LEAVES of the BVH! node_stack[--stack_idx] = node->right; node_stack[--stack_idx] = node->left; // if(stack_idx<0){ printf("The BVH stack not big enough! Increase BVH_STACK_NELEMS!\n"); return hit; } continue; } // The spheres don't need an AABB! The non-leaf nodes don't need a sphere or a sphere index! u32 id = node->primitive_idx; // sphere_t primitive = node->primitive; // Storing the sphere full data is just a bit slower than storing just the sphere index! sphere_t sphere = ((sphere_t*)scene.mesh0.data)[id]; f32 t = pt_sphere_intersect(sphere, ray); if(t<PM_EPSILON || t>hit.t) continue; // Ray/sphere intersection only happens at the leaves! hit.t = t; hit.id = id; // NOW we need the sphere struct to hold the sphere index, since we're NOT traversing the sphere array in linear order! hit.gtype = scene.mesh0.gtype; } } #endif // Mesh intersection with a BVH! // ---------------------------------------------------------------- return hit; } // ---------------------------------------------------------------- static __forceinline__ __device__ vec3 pt_normal_out(vec3 normal, vec3 ray_dir){ return dot(normal,ray_dir)<0 ? normal : -1*normal; // "Outwards" normal, to create a "bounce"! } static __forceinline__ __device__ vec3 pt_hemisphere_randdir(vec3 normal, uint* seed_x, uint* seed_y){ // Sample a random direction on the dome/hemisphere around the hitpoint base on the normal at that point! // Compute local orthonormal basis uvw at hitpoint, to compute the (random) ray direction. 1st vector is the normal, 2nd vector is orthogonal to 1st, 3rd vector is orthogonal to first others vec3 basis_w = normal; vec3 axis = fabs(basis_w.x0)<.1f ? (vec3){1,0,0} : (vec3){0,1,0}; vec3 basis_u = normalize(cross(axis, basis_w)); // We shouldn't need to normalize this, but, if we don't, then we introduce artifacts! vec3 basis_v = cross(basis_w, basis_u); // Right-handed uvw-basis! The homology is: u -> v -> w -> u -> ... // All our geometric primitives (just triangles) are diffuse, which reflect light uniformly in all directions! Generate random direction in hemisphere above hitpoint (see "Realistic Ray Tracing", P. Shirley) f32 rand_tau = rand_f32(seed_x,seed_y) * M_TAU; // Get random number on unit circle for azimuth f32 rand_one = rand_f32(seed_x,seed_y); // Get random number for elevation f32 rand_sqrt = sqrtf(rand_one); // No FAST intrinsic for sqrt? f32 cos_tau, sin_tau; __sincosf(rand_tau, &sin_tau,&cos_tau); return cos_tau*rand_sqrt*basis_u + sin_tau*rand_sqrt*basis_v + sqrtf(1.f-rand_one)*basis_w; // Random ray direction on the hemisphere/dome around a point! Cosine-weighted importance sampling, favours ray directions closer to normal direction! } // ---------------------------------------------------------------- static __device__ vec3 pt_radiance_path_integral(ray_t ray, fb_t fb, scene_t scene, uint* seed_x,uint* seed_y){ // i32 nlights,light_t* lights, i32 ntriangles,triangle_t* triangles, i32 ncylinders,cylinder_t* cylinders, i32 nspheres,sphere_t* spheres vec3 rgb = {0,0,0}; vec3 fade = {1,1,1}; // 0) Scene intersection! for(int bounce=0; bounce<fb.nbounces; ++bounce){ hit_t hit = pt_scene_intersect(ray, scene); if(hit.t==1e38f) return {0,0,0}; // No intersection/hit! Return black! vec3 hit_pos = ray.pos + hit.t*ray.dir; // @hit_pos is the hit position in WORLD COORDINATES! @hit.t is the hit position in RAY COORDINATES! // ---------------------------------------------------------------- vec3 obj_normal, obj_rgb, obj_emi; switch(hit.gtype){ case GTYPE_LIGHT:{ light_t light = scene.lights[hit.id]; obj_normal = pt_light_normal(light, hit_pos); obj_rgb = {0,0,0}; obj_emi = light.emission; }break; #if 0 case GTYPE_TRIANGLE:{ triangle_t triangle = scene.triangles[hit.id]; obj_normal = pt_triangle_normal(triangle, hit_pos); obj_rgb = rgb8u_to_rgb32f(triangle.albedo); obj_emi = {0,0,0}; }break; #endif #if 0 case GTYPE_CYLINDER:{ cylinder_t cylinder = scene.cylinders[hit.id]; obj_normal = pt_cylinder_normal(cylinder, hit_pos); obj_rgb = rgb8u_to_rgb32f(cylinder.albedo); obj_emi = {0,0,0}; }break; #endif #if 1 case GTYPE_SPHERE:{ sphere_t sphere = ((sphere_t*)scene.mesh0.data)[hit.id]; obj_normal = pt_sphere_normal(sphere, hit_pos); obj_rgb = bgr8u_to_rgb32f(sphere.albedo); obj_emi = {0,0,0}; }break; #endif } // ---------------------------------------------------------------- vec3 obj_normal_out = pt_normal_out(obj_normal, ray.dir); // "Outwards" normal, to create a "bounce"! vec3 bounce_dir = pt_hemisphere_randdir(obj_normal, seed_x,seed_y); // 1) Light transport! rgb = rgb + fade*obj_emi; // Add emission of current object to accumulated color (first term in rendering equation sum) fade = dot(obj_normal_out, bounce_dir) * obj_rgb * fade; // Integrate/sum/accumulate the fade! Weigh light/color energy using cosine of angle between normal and incident light! // 2) Ray/path bouncing! ray.pos = hit_pos + 0.0001f*obj_normal_out; // Launch a new raw starting by "bouncing" it from the object! Offset ray position slightly to prevent self intersection ray.dir = bounce_dir; } return rgb; } // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# static __forceinline__ __device__ aabb_t aabb3d_sphere(sphere_t sphere){ return {.min=sphere.pos - sphere.radius, .max=sphere.pos + sphere.radius}; } static __forceinline__ __device__ u64 gpu_zorder3D(vec3 x, aabb_t mesh_aabb){ // Compute the 1D position of a 3D position @x in a 1D Z-order curve living in 3D space, given a particular (global) AABB! NOTE: The AABB must be GLOBAL for the whole mesh that the 3D position belongs to! x.x0 = (x.x0 - mesh_aabb.min.x0) / (mesh_aabb.max.x0 - mesh_aabb.min.x0); // Map @x in @mesh_aabb to the 3D interval [0 .. 1]^3 x.x1 = (x.x1 - mesh_aabb.min.x1) / (mesh_aabb.max.x1 - mesh_aabb.min.x1); // Map @x in @mesh_aabb to the 3D interval [0 .. 1]^3 x.x2 = (x.x2 - mesh_aabb.min.x2) / (mesh_aabb.max.x2 - mesh_aabb.min.x2); // Map @x in @mesh_aabb to the 3D interval [0 .. 1]^3 u64 morton_a = (u64)x.x0 * (1ull<<BVH_MORTON_PRECISION); // Map @x in @mesh_aabb to the 3D interval [0 .. 2**BVH_MORTON_PRECISION]^3, meaning each coordinate can be represented using BVH_MORTON_PRECISION bits! (Although I think we lose the highest in the mesh AABB?) u64 morton_b = (u64)x.x1 * (1ull<<BVH_MORTON_PRECISION); // Map @x in @mesh_aabb to the 3D interval [0 .. 2**BVH_MORTON_PRECISION]^3, meaning each coordinate can be represented using BVH_MORTON_PRECISION bits! (Although I think we lose the highest in the mesh AABB?) u64 morton_c = (u64)x.x2 * (1ull<<BVH_MORTON_PRECISION); // Map @x in @mesh_aabb to the 3D interval [0 .. 2**BVH_MORTON_PRECISION]^3, meaning each coordinate can be represented using BVH_MORTON_PRECISION bits! (Although I think we lose the highest in the mesh AABB?) u64 morton_code = 0x0000000000000000ull; for(int i=0; i<BVH_MORTON_PRECISION; ++i){ // Combine into 63 bits morton code! morton_code |= (((((morton_a >> (BVH_MORTON_PRECISION-1-i))) & 1) << ((BVH_MORTON_PRECISION-i)*3 - 1)) | ((((morton_b >> (BVH_MORTON_PRECISION-1-i))) & 1) << ((BVH_MORTON_PRECISION-i)*3 - 2)) | ((((morton_c >> (BVH_MORTON_PRECISION-1-i))) & 1) << ((BVH_MORTON_PRECISION-i)*3 - 3))); } return morton_code; } // ----------------------------------------------------------------------------------------------------------------------------# extern "C" __global__ void ker_light_shader(fb_t fb, scene_t scene){ // Mesh of lights! f32 p = 1e1f; f32 x = 1e4f; f32 z = 1e2f; scene.lights[0] = {{-p,-p,+z}, { 0, 0,-x}, { 0,+x, 0}, {1.4,1.4,1.8}}; // Left face! scene.lights[1] = {{+p,-p,+z}, { 0,+x, 0}, { 0, 0,-x}, {1.4,1.4,1.8}}; // Right face! scene.lights[2] = {{-p,-p,+z}, {+x, 0, 0}, { 0, 0,-x}, {1.4,1.4,1.8}}; // Bottom face! scene.lights[3] = {{-p,+p,+z}, { 0, 0,-x}, {+x, 0, 0}, {1.4,1.4,1.8}}; // Top face! scene.lights[4] = {{-p,-p,-p}, {+x, 0, 0}, { 0,+x, 0}, rgb8u_to_rgb32f(0x080808)}; // Back face! scene.lights[5] = {{-p,-p,+z}, {+x, 0, 0}, { 0,+x, 0}, {1.4,1.4,1.8}}; // Front face! } // ----------------------------------------------------------------------------------------------------------------------------# extern "C" __global__ void ker_mesh0_shader(fb_t fb, scene_t scene){ i32 thr_lidx = blockIdx.x*blockDim.x + threadIdx.x; if(thr_lidx>=scene.mesh0.nelems) return; sphere_t* spheres = (sphere_t*)scene.mesh0.data; // ---------------------------------------------------------------- quat rot_yz = versor(scene.rot.x0, {1,0,0}); quat rot_zx = versor(scene.rot.x1, {0,1,0}); quat rot_xy = versor(scene.rot.x2, {0,0,1}); quat rot = rot_yz*rot_zx*rot_xy; spheres[thr_lidx].pos = qrotl(spheres[thr_lidx].pos, rot) + scene.mov; // ---------------------------------------------------------------- aabb_t mesh_aabb = {.min={-1,-1,-1}, .max={1,1,1}}; // Global AABB for ALL the triangles in this mesh! sphere_t sphere = spheres[thr_lidx]; scene.mesh0.aabbs[ thr_lidx] = aabb3d_sphere(sphere); scene.mesh0.mortons[thr_lidx] = gpu_zorder3D(sphere.pos, mesh_aabb); // TODO! Everything is zero! scene.mesh0.idxs[ thr_lidx] = thr_lidx; // printf("%d\n", scene.mesh0.idxs[thr_lidx]); } // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# /* @section */ extern "C" __global__ void ker_pixel_shader(fb_t fb, scene_t scene){ i32 thr_lvl1_2idx_x = blockIdx.x*blockDim.x + threadIdx.x; i32 thr_lvl1_2idx_y = blockIdx.y*blockDim.y + threadIdx.y; if(fb.tile_pos_c+fb.tile_dim_w<=fb.tile_pos_c+thr_lvl1_2idx_x || fb.tile_pos_r+fb.tile_dim_h<=fb.tile_pos_r+thr_lvl1_2idx_y) return; u32 seed_x = thr_lvl1_2idx_x + fb.seed; u32 seed_y = thr_lvl1_2idx_y + fb.seed; // TODO! Maybe we should hoist this out of the kernel, since the camera computations are the same for all threads and all GPUs! vec3 cam_pos = fb.cam_pos + fb.cam_mov; quat cam_rot_yz = versor(fb.cam_rot.x0, {1,0,0}); quat cam_rot_zx = versor(fb.cam_rot.x1, {0,1,0}); quat cam_rot_xy = versor(fb.cam_rot.x2, {0,0,1}); quat cam_rot = cam_rot_yz*cam_rot_zx*cam_rot_xy; vec3 cam_dir = qrotl(fb.cam_dir, cam_rot); vec3 cam_dir_x = qrotl(.5f*PM_CAM_FOV * (vec3){(f32)fb.img_dim_w/fb.img_dim_h, 0, 0}, cam_rot); // Cam ray is directed at the lower-left corner of the screen! vec3 cam_dir_y = .5f*PM_CAM_FOV * normalize(cross(cam_dir, -1*cam_dir_x)); // Cam ray is directed at the lower-left corner of the screen! // ---------------------------------------------------------------- vec3 px_rgb = {0,0,0}; // Final pixel color! Init to zero for each pixel! for(int sample=0; sample<fb.nsamples; ++sample){ // Samples per pixel! Camera rays are pushed forward to start in interior f32 cam_dx = (thr_lvl1_2idx_x + rand_f32(&seed_x,&seed_y)) / fb.img_dim_w - .5f; f32 cam_dy = (thr_lvl1_2idx_y + rand_f32(&seed_x,&seed_y)) / fb.img_dim_h - .5f + (f32)fb.tile_pos_r/fb.img_dim_h; vec3 px_dir = cam_dir + cam_dx*cam_dir_x + cam_dy*cam_dir_y; vec3 px_pos = cam_pos; ray_t px_ray = {px_pos, normalize(px_dir)}; px_rgb = px_rgb + 1.f/fb.nsamples * pt_radiance_path_integral(px_ray, fb, scene, &seed_x,&seed_y); } // ---------------------------------------------------------------- u32 tile_lidx = thr_lvl1_2idx_y*fb.img_dim_w + thr_lvl1_2idx_x; fb.tile_accum[tile_lidx] = fb.tile_accum[tile_lidx] + px_rgb; vec3 rgb = fb.tile_accum[tile_lidx] / (fb.frame+1); fb.tile_data[tile_lidx] = rgb32f_to_rgbu8(clamp01(rgb)); }
83f96529f3ccb9b9a04c08f6ddf7ca6a522ff379.cu
#include "pm.h" // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# /* @section */ static __forceinline__ __device__ __host__ vec3 operator+(vec3 v, f32 s){ return {v.x0+s, v.x1+s, v.x2+s}; } // Scalar addition! static __forceinline__ __device__ __host__ vec3 operator-(vec3 v, f32 s){ return {v.x0-s, v.x1-s, v.x2-s}; } // Scalar subtraction! static __forceinline__ __device__ __host__ vec3 operator*(f32 s, vec3 v){ return {s*v.x0, s*v.x1, s*v.x2}; } // Scalar multiplication! static __forceinline__ __device__ __host__ vec3 operator/(vec3 v, f32 s){ return {v.x0/s, v.x1/s, v.x2/s}; } // Scalar division! static __forceinline__ __device__ __host__ vec3 operator+(vec3 v0, vec3 v1){ return {v0.x0+v1.x0, v0.x1+v1.x1, v0.x2+v1.x2}; } // Vector elementwise addition! static __forceinline__ __device__ __host__ vec3 operator-(vec3 v0, vec3 v1){ return {v0.x0-v1.x0, v0.x1-v1.x1, v0.x2-v1.x2}; } // Vector elementwise subtraction! static __forceinline__ __device__ __host__ vec3 operator*(vec3 v0, vec3 v1){ return {v0.x0*v1.x0, v0.x1*v1.x1, v0.x2*v1.x2}; } // Vector elementwise multiplication! static __forceinline__ __device__ __host__ vec3 operator/(vec3 v0, vec3 v1){ return {v0.x0/v1.x0, v0.x1/v1.x1, v0.x2/v1.x2}; } // Vector elementwise division! static __forceinline__ __device__ __host__ f32 dot( vec3 v0, vec3 v1){ return v0.x0*v1.x0 + v0.x1*v1.x1 + v0.x2*v1.x2; } // Quite important for triangle intersection and a bit for the path tracer! static __forceinline__ __device__ __host__ vec3 cross(vec3 v0, vec3 v1){ // Homology of R3: 0 --> 1 --> 2 --> 0 --> 1 --> 2 --> 0 --> ... return {v0.x1*v1.x2 - v0.x2*v1.x1, // 0 --> 1 --> 2 v0.x2*v1.x0 - v0.x0*v1.x2, // 1 --> 2 --> 0 v0.x0*v1.x1 - v0.x1*v1.x0}; // 2 --> 0 --> 1 } static __forceinline__ __device__ __host__ vec3 normalize(vec3 v){ f32 s = rsqrtf(dot(v,v)); return {s*v.x0, s*v.x1, s*v.x2}; } // ----------------------------------------------------------------------------------------------------------------------------# static __forceinline__ __device__ __host__ quat operator*(f32 s, quat q){ return {s*q.x0, s*q.x1, s*q.x2, s*q.x3}; } // Scalar multiplication! static __forceinline__ __device__ __host__ quat operator+(quat v0, quat v1){ return {v0.x0+v1.x0, v0.x1+v1.x1, v0.x2+v1.x2, v0.x3+v1.x3}; } // Vector addition! static __forceinline__ __device__ __host__ quat operator*(quat q0, quat q1){ return {q0.x0*q1.x0 - q0.x1*q1.x1 - q0.x2*q1.x2 - q0.x3*q1.x3, q0.x0*q1.x1 + q0.x1*q1.x0 + q0.x2*q1.x3 - q0.x3*q1.x2, q0.x0*q1.x2 - q0.x1*q1.x3 + q0.x2*q1.x0 + q0.x3*q1.x1, q0.x0*q1.x3 + q0.x1*q1.x2 - q0.x2*q1.x1 + q0.x3*q1.x0}; } static __forceinline__ __device__ __host__ quat conj( quat q){ return {q.x0, -q.x1, -q.x2, -q.x3}; } // The quaternion inverse of a quaternion `q` is just `conj(q) / quad(q)`, just like for complex numbers! static __forceinline__ __device__ __host__ f32 dot( quat q0, quat q1){ return q0.x0*q1.x0 + q0.x1*q1.x1 + q0.x2*q1.x2 + q0.x3*q1.x3; } // Quite important for triangle intersection and a bit for the path tracer! static __forceinline__ __device__ __host__ quat normalize(quat q){ return rsqrtf(dot(q,q)) * q; } static __forceinline__ __device__ __host__ quat versor(f32 angle, vec3 dir){ vec3 v = __sinf(.5*angle)*normalize(dir); return {__cosf(.5*angle), v.x0,v.x1,v.x2}; // If @dir isn't a `direction vector` (ie. a unit vector), then the rotation speed is not constant, methinks! } static __forceinline__ __device__ __host__ vec3 qrotl(vec3 v, quat versor){ // WARN! @versor must be a unit-quaternion! quat p_rot = versor * (quat){0,v.x0,v.x1,v.x2} * conj(versor); // Left-conjugation by @versor! The quaternion-inverse of a unit-quaternion is its quaternion-conjugate! return {p_rot.x1, p_rot.x2, p_rot.x3}; } static __forceinline__ __device__ __host__ vec3 qrotr(vec3 v, quat versor){ // WARN! @versor must be a unit-quaternion! quat p_rot = conj(versor) * (quat){0,v.x0,v.x1,v.x2} * versor; // Right-conjugation by @versor! The quaternion-inverse of a unit-quaternion is its quaternion-conjugate! return {p_rot.x1, p_rot.x2, p_rot.x3}; } // ----------------------------------------------------------------------------------------------------------------------------# __forceinline__ __device__ vec3 clamp01(vec3 v){ return {__saturatef(v.x0), __saturatef(v.x1), __saturatef(v.x2)}; } __forceinline__ __device__ f32 fminf(f32 a, f32 b, f32 c){ return fminf(fminf(a,b),c); } __forceinline__ __device__ f32 fmaxf(f32 a, f32 b, f32 c){ return fmaxf(fmaxf(a,b),c); } __forceinline__ __device__ f32 fminf(vec3 a){ return fminf(a.x0,a.x1,a.x2); } __forceinline__ __device__ f32 fmaxf(vec3 a){ return fmaxf(a.x0,a.x1,a.x2); } __forceinline__ __device__ vec3 fminf(vec3 a0, vec3 a1){ return {fminf(a0.x0,a1.x0), fminf(a0.x1,a1.x1), fminf(a0.x2,a1.x2)}; } __forceinline__ __device__ vec3 fmaxf(vec3 a0, vec3 a1){ return {fmaxf(a0.x0,a1.x0), fmaxf(a0.x1,a1.x1), fmaxf(a0.x2,a1.x2)}; } __forceinline__ __device__ vec3 fminf(vec3 a0, vec3 a1, vec3 a2){ return fminf(a2,fminf(a1,a0)); } __forceinline__ __device__ vec3 fmaxf(vec3 a0, vec3 a1, vec3 a2){ return fmaxf(a2,fmaxf(a1,a0)); } // ----------------------------------------------------------------------------------------------------------------------------# __forceinline__ __device__ f32 rgb_gamma_decode(f32 channel){ return __powf(channel, 2.2/1); } __forceinline__ __device__ f32 rgb_gamma_encode(f32 channel){ return __powf(channel, 1/2.2); } __forceinline__ __device__ f32 rgb_u8_to_f32( u32 channel){ return rgb_gamma_decode(channel/255.); } // Read "from disk" "to memory", map from nonlinear color space (for monitor displaying) to linear color space (for computations)! __forceinline__ __device__ u32 rgb_f32_to_u8( f32 channel){ return 255.*rgb_gamma_encode(channel) + .5; } // Write "from memory" "to disk", map from linear color space (for computations) to nonlinear color space (for monitor displaying)! __forceinline__ __device__ vec3 bgr8u_to_rgb32f(u32 bgr8u){ return {rgb_u8_to_f32((bgr8u>>0x10)&0xff), rgb_u8_to_f32((bgr8u>>0x08)&0xff), rgb_u8_to_f32((bgr8u>>0x00)&0xff)}; } __forceinline__ __device__ vec3 rgb8u_to_rgb32f(u32 bgr8u){ return {rgb_u8_to_f32((bgr8u>>0x00)&0xff), rgb_u8_to_f32((bgr8u>>0x08)&0xff), rgb_u8_to_f32((bgr8u>>0x10)&0xff)}; } __forceinline__ __device__ u32 rgb32f_to_rgbu8(vec3 rgbf32){ return (rgb_f32_to_u8(rgbf32.x0)<<0x00) | (rgb_f32_to_u8(rgbf32.x1)<<0x08) | (rgb_f32_to_u8(rgbf32.x2)<<0x10); } __forceinline__ __device__ f32 rand_f32(u32* seed0, u32* seed1){ // RNG from github.com/gz/rust-raytracer *seed0 = 36969*(*seed0&0xffff) + (*seed0>>0x10); *seed1 = 18489*(*seed1&0xffff) + (*seed1>>0x10); u32 val_u32 = 0x40000000 | (((*seed0<<0x10) + *seed1) & 0x007fffff); return .5f * (*(f32*)&val_u32) - 1.f; } // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# /* @section path tracer! Based on Sam Lapere's path tracer: */ // ----------------------------------------------------------------------------------------------------------------------------# static __forceinline__ __device__ f32 pt_aabb_intersect(aabb_t self, ray_t ray){ // Intersect this primitive with a ray! Return the distance, or 0 if there's no hit! vec3 t_min = (self.min - ray.pos) / ray.dir; vec3 t_max = (self.max - ray.pos) / ray.dir; vec3 real_min = fminf(t_min, t_max); vec3 real_max = fmaxf(t_min, t_max); f32 maxmin = fmaxf(real_min); f32 minmax = fminf(real_max); if(minmax>=maxmin) return maxmin; else return 0.f; } static __forceinline__ __device__ vec3 pt_aabb_normal(aabb_t self, vec3 p){ vec3 normal; if( fabsf(self.min.x0-p.x0)<PM_EPSILON) normal={-1, 0, 0}; // Only 6 possible normals, one possibility for each face of the box! else if(fabsf(self.max.x0-p.x0)<PM_EPSILON) normal={ 1, 0, 0}; else if(fabsf(self.min.x1-p.x1)<PM_EPSILON) normal={ 0,-1, 0}; else if(fabsf(self.max.x1-p.x1)<PM_EPSILON) normal={ 0, 1, 0}; else if(fabsf(self.min.x2-p.x2)<PM_EPSILON) normal={ 0, 0,-1}; else normal={ 0, 0, 1}; return normal; // A normal MUST be a direction vector, ie. a unit vector! But notice each vector above IS already normalized! =D } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_triangle_intersect(triangle_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit! vec3 op = ray.pos - self.vert0; vec3 pvec = cross(ray.dir,self.edge02); f32 det = __fdividef(1.f, dot(self.edge01,pvec)); // `det<0` means FRONT! f32 u = det * dot(op,pvec); if(u<0.f || u >1.f) return 0.f; // No intersection! Early exit DOES help! vec3 qvec = cross(op,self.edge01); f32 v = det * dot(ray.dir,qvec); if(v<0.f || u+v>1.f) return 0.f; // No intersection! return det * dot(self.edge02,qvec); // `det<0` means FRONT! } static __forceinline__ __device__ vec3 pt_triangle_normal(triangle_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: it's value is the same across all points on the surface! return normalize(cross(self.edge01,self.edge02)); // The cross product of two triangle edges yields a vector orthogonal to the triangle plane! A normal MUST be a direction vector, ie. a unit vector! } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_light_intersect(light_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit! return pt_triangle_intersect({self.vert0, self.edge01, self.edge02}, ray); } static __forceinline__ __device__ vec3 pt_light_normal(light_t self, vec3 x){ // A triangle has to curvature, so it's normal vector field is a CONSTANT vector field: it's value is the same across all points on the surface! return normalize(cross(self.edge01,self.edge02)); // The cross product of two triangle edges yields a vector orthogonal to the triangle plane! A normal MUST be a direction vector, ie. a unit vector! } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_cylinder_intersect(cylinder_t self, ray_t ray){ // Intersect this geometric primitive with a ray! Return the distance, or 0 if there's no hit! if(self.height==0.f) return 0.f; // This allows us to have "trivial" primitives in the mesh and not break the path tracer! vec3 op = ray.pos - self.pos; f32 op_dir = dot(op,self.dir); vec3 op_dir_vec = op_dir*self.dir; vec3 x = ray.dir - dot(ray.dir,self.dir)*self.dir; vec3 y = op - op_dir_vec; f32 A = dot(x,x); f32 B = 2.f*dot(x,y); f32 C = dot(y,y) - self.radius*self.radius; f32 dscr = B*B - 4*A*C; // Discriminant of the quadratic equation! if(dscr<0.f) return 0.f; else{ f32 top; vec3 bot_base = self.pos; vec3 top_base = self.pos + self.height*self.dir; top = -B-sqrtf(dscr); if(top>0.f){ f32 t=top/(2.f*A); vec3 q=ray.pos+t*ray.dir; if(dot(self.dir, q-bot_base)>0.f && dot(self.dir, q-top_base)<0.f) return t; else return 0.f; } // No 'FAST intrinsic for sqrt? top = -B+sqrtf(dscr); if(top>0.f){ f32 t=top/(2.f*A); vec3 q=ray.pos+t*ray.dir; if(dot(self.dir, q-bot_base)>0.f && dot(self.dir, q-top_base)<0.f) return t; else return 0.f; } // No 'FAST intrinsic for sqrt? } return 0.f; } static __forceinline__ __device__ vec3 pt_cylinder_normal(cylinder_t self, vec3 x){ // Return the normal to the cylinder at a point @x vec3 a = x - self.pos; vec3 b = self.dir; vec3 r = a - dot(a,b)*b; // a - dot(a,b)/dot(b,b)*b; // Vector rejection of `a` on `b`, but optimized since `b` is a unit vector! return normalize(r); } // ---------------------------------------------------------------- static __forceinline__ __device__ f32 pt_sphere_intersect(sphere_t self, ray_t ray){ if(self.radius==0.f) return 0.f; vec3 op = self.pos - ray.pos; f32 b = dot(op, ray.dir); // `b` term in the sphere's quadratic equation f32 disc = b*b - dot(op,op) + self.radius*self.radius; // The disc in the sphere's quadratic equation if(disc<0.f) return 0.f; // If the discriminant is negative, then there's only complex roots! disc = __fsqrt_rn(disc); // If discriminant non-negative, then check for roots using negative and positive discriminant! f32 t; t = b-disc; if(t>0.f) return t; // Pick closest point in front of ray origin? t = b+disc; if(t>0.f) return t; return 0.f; } static __forceinline__ __device__ vec3 pt_sphere_normal(sphere_t self, vec3 x){ return normalize(x - self.pos); } // ---------------------------------------------------------------- static __forceinline__ __device__ hit_t pt_scene_intersect(ray_t ray, scene_t scene){ hit_t hit = {gtype:GTYPE_UNKNOWN, id:0xffffffff, t:1e38}; // ---------------------------------------------------------------- // Record the position of the closest intersection point in RAY COORDINATES (which are 1-dimensional, so you need a single number), and also the ID of the object in question for(int i=0; i<scene.nlights; ++i){ f32 t = pt_light_intersect(scene.lights[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_LIGHT; } #if 0 for(int i=0; i<scene.ntriangles; ++i){ f32 t = pt_triangle_intersect(scene.triangles[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_TRIANGLE; } #endif #if 0 for(i32 i=0; i<scene.ncylinders; ++i){ f32 t = pt_cylinder_intersect(scene.cylinders[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_CYLINDER; } #endif #if 0 for(i32 i=0; i<scene.nspheres; ++i){ f32 t = pt_sphere_intersect(scene.spheres[i], ray); if(t<PM_EPSILON || t>hit.t) continue; hit.t = t; hit.id = i; hit.gtype = GTYPE_SPHERE; } #endif // ---------------------------------------------------------------- #if 1 // Mesh intersection with a BVH! 5x-10x faster than without a BVH! { bvh_node_t* node_stack[BVH_STACK_NELEMS]; // Use static allocation! Use stack to traverse BVH to save space, cost is O(height) i32 stack_idx = BVH_STACK_NELEMS; node_stack[--stack_idx] = scene.mesh0.tree_nodes; // The root is the LAST element of the stack, ie. the element at position BVH_STACK_NELEMS! while(stack_idx != BVH_STACK_NELEMS){ // Stack-based recursion! bvh_node_t* node = node_stack[stack_idx++]; if(!pt_aabb_intersect(node->node_aabb, ray)) continue; // In BVH-based path tracing, MOST of the BVH intersection-traversal is ray/AABB! To intersect a NON-LEAF (every level other than the last), we use ray/AABB intersection! To intersect a LEAF (only the last level), we use ray/triangle intersection! if(!node->is_leaf){ // We'll only call the primitive-intersection routine at the LEAVES of the BVH! node_stack[--stack_idx] = node->right; node_stack[--stack_idx] = node->left; // if(stack_idx<0){ printf("The BVH stack not big enough! Increase BVH_STACK_NELEMS!\n"); return hit; } continue; } // The spheres don't need an AABB! The non-leaf nodes don't need a sphere or a sphere index! u32 id = node->primitive_idx; // sphere_t primitive = node->primitive; // Storing the sphere full data is just a bit slower than storing just the sphere index! sphere_t sphere = ((sphere_t*)scene.mesh0.data)[id]; f32 t = pt_sphere_intersect(sphere, ray); if(t<PM_EPSILON || t>hit.t) continue; // Ray/sphere intersection only happens at the leaves! hit.t = t; hit.id = id; // NOW we need the sphere struct to hold the sphere index, since we're NOT traversing the sphere array in linear order! hit.gtype = scene.mesh0.gtype; } } #endif // Mesh intersection with a BVH! // ---------------------------------------------------------------- return hit; } // ---------------------------------------------------------------- static __forceinline__ __device__ vec3 pt_normal_out(vec3 normal, vec3 ray_dir){ return dot(normal,ray_dir)<0 ? normal : -1*normal; // "Outwards" normal, to create a "bounce"! } static __forceinline__ __device__ vec3 pt_hemisphere_randdir(vec3 normal, uint* seed_x, uint* seed_y){ // Sample a random direction on the dome/hemisphere around the hitpoint base on the normal at that point! // Compute local orthonormal basis uvw at hitpoint, to compute the (random) ray direction. 1st vector is the normal, 2nd vector is orthogonal to 1st, 3rd vector is orthogonal to first others vec3 basis_w = normal; vec3 axis = fabs(basis_w.x0)<.1f ? (vec3){1,0,0} : (vec3){0,1,0}; vec3 basis_u = normalize(cross(axis, basis_w)); // We shouldn't need to normalize this, but, if we don't, then we introduce artifacts! vec3 basis_v = cross(basis_w, basis_u); // Right-handed uvw-basis! The homology is: u -> v -> w -> u -> ... // All our geometric primitives (just triangles) are diffuse, which reflect light uniformly in all directions! Generate random direction in hemisphere above hitpoint (see "Realistic Ray Tracing", P. Shirley) f32 rand_tau = rand_f32(seed_x,seed_y) * M_TAU; // Get random number on unit circle for azimuth f32 rand_one = rand_f32(seed_x,seed_y); // Get random number for elevation f32 rand_sqrt = sqrtf(rand_one); // No FAST intrinsic for sqrt? f32 cos_tau, sin_tau; __sincosf(rand_tau, &sin_tau,&cos_tau); return cos_tau*rand_sqrt*basis_u + sin_tau*rand_sqrt*basis_v + sqrtf(1.f-rand_one)*basis_w; // Random ray direction on the hemisphere/dome around a point! Cosine-weighted importance sampling, favours ray directions closer to normal direction! } // ---------------------------------------------------------------- static __device__ vec3 pt_radiance_path_integral(ray_t ray, fb_t fb, scene_t scene, uint* seed_x,uint* seed_y){ // i32 nlights,light_t* lights, i32 ntriangles,triangle_t* triangles, i32 ncylinders,cylinder_t* cylinders, i32 nspheres,sphere_t* spheres vec3 rgb = {0,0,0}; vec3 fade = {1,1,1}; // 0) Scene intersection! for(int bounce=0; bounce<fb.nbounces; ++bounce){ hit_t hit = pt_scene_intersect(ray, scene); if(hit.t==1e38f) return {0,0,0}; // No intersection/hit! Return black! vec3 hit_pos = ray.pos + hit.t*ray.dir; // @hit_pos is the hit position in WORLD COORDINATES! @hit.t is the hit position in RAY COORDINATES! // ---------------------------------------------------------------- vec3 obj_normal, obj_rgb, obj_emi; switch(hit.gtype){ case GTYPE_LIGHT:{ light_t light = scene.lights[hit.id]; obj_normal = pt_light_normal(light, hit_pos); obj_rgb = {0,0,0}; obj_emi = light.emission; }break; #if 0 case GTYPE_TRIANGLE:{ triangle_t triangle = scene.triangles[hit.id]; obj_normal = pt_triangle_normal(triangle, hit_pos); obj_rgb = rgb8u_to_rgb32f(triangle.albedo); obj_emi = {0,0,0}; }break; #endif #if 0 case GTYPE_CYLINDER:{ cylinder_t cylinder = scene.cylinders[hit.id]; obj_normal = pt_cylinder_normal(cylinder, hit_pos); obj_rgb = rgb8u_to_rgb32f(cylinder.albedo); obj_emi = {0,0,0}; }break; #endif #if 1 case GTYPE_SPHERE:{ sphere_t sphere = ((sphere_t*)scene.mesh0.data)[hit.id]; obj_normal = pt_sphere_normal(sphere, hit_pos); obj_rgb = bgr8u_to_rgb32f(sphere.albedo); obj_emi = {0,0,0}; }break; #endif } // ---------------------------------------------------------------- vec3 obj_normal_out = pt_normal_out(obj_normal, ray.dir); // "Outwards" normal, to create a "bounce"! vec3 bounce_dir = pt_hemisphere_randdir(obj_normal, seed_x,seed_y); // 1) Light transport! rgb = rgb + fade*obj_emi; // Add emission of current object to accumulated color (first term in rendering equation sum) fade = dot(obj_normal_out, bounce_dir) * obj_rgb * fade; // Integrate/sum/accumulate the fade! Weigh light/color energy using cosine of angle between normal and incident light! // 2) Ray/path bouncing! ray.pos = hit_pos + 0.0001f*obj_normal_out; // Launch a new raw starting by "bouncing" it from the object! Offset ray position slightly to prevent self intersection ray.dir = bounce_dir; } return rgb; } // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# static __forceinline__ __device__ aabb_t aabb3d_sphere(sphere_t sphere){ return {.min=sphere.pos - sphere.radius, .max=sphere.pos + sphere.radius}; } static __forceinline__ __device__ u64 gpu_zorder3D(vec3 x, aabb_t mesh_aabb){ // Compute the 1D position of a 3D position @x in a 1D Z-order curve living in 3D space, given a particular (global) AABB! NOTE: The AABB must be GLOBAL for the whole mesh that the 3D position belongs to! x.x0 = (x.x0 - mesh_aabb.min.x0) / (mesh_aabb.max.x0 - mesh_aabb.min.x0); // Map @x in @mesh_aabb to the 3D interval [0 .. 1]^3 x.x1 = (x.x1 - mesh_aabb.min.x1) / (mesh_aabb.max.x1 - mesh_aabb.min.x1); // Map @x in @mesh_aabb to the 3D interval [0 .. 1]^3 x.x2 = (x.x2 - mesh_aabb.min.x2) / (mesh_aabb.max.x2 - mesh_aabb.min.x2); // Map @x in @mesh_aabb to the 3D interval [0 .. 1]^3 u64 morton_a = (u64)x.x0 * (1ull<<BVH_MORTON_PRECISION); // Map @x in @mesh_aabb to the 3D interval [0 .. 2**BVH_MORTON_PRECISION]^3, meaning each coordinate can be represented using BVH_MORTON_PRECISION bits! (Although I think we lose the highest in the mesh AABB?) u64 morton_b = (u64)x.x1 * (1ull<<BVH_MORTON_PRECISION); // Map @x in @mesh_aabb to the 3D interval [0 .. 2**BVH_MORTON_PRECISION]^3, meaning each coordinate can be represented using BVH_MORTON_PRECISION bits! (Although I think we lose the highest in the mesh AABB?) u64 morton_c = (u64)x.x2 * (1ull<<BVH_MORTON_PRECISION); // Map @x in @mesh_aabb to the 3D interval [0 .. 2**BVH_MORTON_PRECISION]^3, meaning each coordinate can be represented using BVH_MORTON_PRECISION bits! (Although I think we lose the highest in the mesh AABB?) u64 morton_code = 0x0000000000000000ull; for(int i=0; i<BVH_MORTON_PRECISION; ++i){ // Combine into 63 bits morton code! morton_code |= (((((morton_a >> (BVH_MORTON_PRECISION-1-i))) & 1) << ((BVH_MORTON_PRECISION-i)*3 - 1)) | ((((morton_b >> (BVH_MORTON_PRECISION-1-i))) & 1) << ((BVH_MORTON_PRECISION-i)*3 - 2)) | ((((morton_c >> (BVH_MORTON_PRECISION-1-i))) & 1) << ((BVH_MORTON_PRECISION-i)*3 - 3))); } return morton_code; } // ----------------------------------------------------------------------------------------------------------------------------# extern "C" __global__ void ker_light_shader(fb_t fb, scene_t scene){ // Mesh of lights! f32 p = 1e1f; f32 x = 1e4f; f32 z = 1e2f; scene.lights[0] = {{-p,-p,+z}, { 0, 0,-x}, { 0,+x, 0}, {1.4,1.4,1.8}}; // Left face! scene.lights[1] = {{+p,-p,+z}, { 0,+x, 0}, { 0, 0,-x}, {1.4,1.4,1.8}}; // Right face! scene.lights[2] = {{-p,-p,+z}, {+x, 0, 0}, { 0, 0,-x}, {1.4,1.4,1.8}}; // Bottom face! scene.lights[3] = {{-p,+p,+z}, { 0, 0,-x}, {+x, 0, 0}, {1.4,1.4,1.8}}; // Top face! scene.lights[4] = {{-p,-p,-p}, {+x, 0, 0}, { 0,+x, 0}, rgb8u_to_rgb32f(0x080808)}; // Back face! scene.lights[5] = {{-p,-p,+z}, {+x, 0, 0}, { 0,+x, 0}, {1.4,1.4,1.8}}; // Front face! } // ----------------------------------------------------------------------------------------------------------------------------# extern "C" __global__ void ker_mesh0_shader(fb_t fb, scene_t scene){ i32 thr_lidx = blockIdx.x*blockDim.x + threadIdx.x; if(thr_lidx>=scene.mesh0.nelems) return; sphere_t* spheres = (sphere_t*)scene.mesh0.data; // ---------------------------------------------------------------- quat rot_yz = versor(scene.rot.x0, {1,0,0}); quat rot_zx = versor(scene.rot.x1, {0,1,0}); quat rot_xy = versor(scene.rot.x2, {0,0,1}); quat rot = rot_yz*rot_zx*rot_xy; spheres[thr_lidx].pos = qrotl(spheres[thr_lidx].pos, rot) + scene.mov; // ---------------------------------------------------------------- aabb_t mesh_aabb = {.min={-1,-1,-1}, .max={1,1,1}}; // Global AABB for ALL the triangles in this mesh! sphere_t sphere = spheres[thr_lidx]; scene.mesh0.aabbs[ thr_lidx] = aabb3d_sphere(sphere); scene.mesh0.mortons[thr_lidx] = gpu_zorder3D(sphere.pos, mesh_aabb); // TODO! Everything is zero! scene.mesh0.idxs[ thr_lidx] = thr_lidx; // printf("%d\n", scene.mesh0.idxs[thr_lidx]); } // ----------------------------------------------------------------------------------------------------------------------------# // ----------------------------------------------------------------------------------------------------------------------------# /* @section */ extern "C" __global__ void ker_pixel_shader(fb_t fb, scene_t scene){ i32 thr_lvl1_2idx_x = blockIdx.x*blockDim.x + threadIdx.x; i32 thr_lvl1_2idx_y = blockIdx.y*blockDim.y + threadIdx.y; if(fb.tile_pos_c+fb.tile_dim_w<=fb.tile_pos_c+thr_lvl1_2idx_x || fb.tile_pos_r+fb.tile_dim_h<=fb.tile_pos_r+thr_lvl1_2idx_y) return; u32 seed_x = thr_lvl1_2idx_x + fb.seed; u32 seed_y = thr_lvl1_2idx_y + fb.seed; // TODO! Maybe we should hoist this out of the kernel, since the camera computations are the same for all threads and all GPUs! vec3 cam_pos = fb.cam_pos + fb.cam_mov; quat cam_rot_yz = versor(fb.cam_rot.x0, {1,0,0}); quat cam_rot_zx = versor(fb.cam_rot.x1, {0,1,0}); quat cam_rot_xy = versor(fb.cam_rot.x2, {0,0,1}); quat cam_rot = cam_rot_yz*cam_rot_zx*cam_rot_xy; vec3 cam_dir = qrotl(fb.cam_dir, cam_rot); vec3 cam_dir_x = qrotl(.5f*PM_CAM_FOV * (vec3){(f32)fb.img_dim_w/fb.img_dim_h, 0, 0}, cam_rot); // Cam ray is directed at the lower-left corner of the screen! vec3 cam_dir_y = .5f*PM_CAM_FOV * normalize(cross(cam_dir, -1*cam_dir_x)); // Cam ray is directed at the lower-left corner of the screen! // ---------------------------------------------------------------- vec3 px_rgb = {0,0,0}; // Final pixel color! Init to zero for each pixel! for(int sample=0; sample<fb.nsamples; ++sample){ // Samples per pixel! Camera rays are pushed forward to start in interior f32 cam_dx = (thr_lvl1_2idx_x + rand_f32(&seed_x,&seed_y)) / fb.img_dim_w - .5f; f32 cam_dy = (thr_lvl1_2idx_y + rand_f32(&seed_x,&seed_y)) / fb.img_dim_h - .5f + (f32)fb.tile_pos_r/fb.img_dim_h; vec3 px_dir = cam_dir + cam_dx*cam_dir_x + cam_dy*cam_dir_y; vec3 px_pos = cam_pos; ray_t px_ray = {px_pos, normalize(px_dir)}; px_rgb = px_rgb + 1.f/fb.nsamples * pt_radiance_path_integral(px_ray, fb, scene, &seed_x,&seed_y); } // ---------------------------------------------------------------- u32 tile_lidx = thr_lvl1_2idx_y*fb.img_dim_w + thr_lvl1_2idx_x; fb.tile_accum[tile_lidx] = fb.tile_accum[tile_lidx] + px_rgb; vec3 rgb = fb.tile_accum[tile_lidx] / (fb.frame+1); fb.tile_data[tile_lidx] = rgb32f_to_rgbu8(clamp01(rgb)); }
548dbe02500fa8a3719cd0c13d0744b5ea09c1c0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <fcntl.h> #include <stdint.h> #include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "../gpu_usermap_abi.h" #include <ringbuf.cu.h> extern "C" { #include <rdma/ringbuf.h> } #define DEV_PATH "/dev/gpu_usermap" #define MAP_LEN 16384 #define unlikely(x) __builtin_expect(!!(x), 0) #define ASSERT_CUDA(val) \ if(unlikely((val))) {fprintf(stderr, __FILE__, __LINE__, "ERROR: errno = %3d : %s\n", static_cast<int>(val), hipGetErrorString(val)); exit(1);} void fill_mem(char* addr) { for (int i = 0; i < MAP_LEN; i++) { addr[i] = 'a' + (i % 26); } } bool check_mem(char *addr) { for (int i = 0; i < MAP_LEN; i++) { if (addr[i] != 'a' + (i % 26)) { fprintf(stderr, "ERROR: %d th character differs (%c instead of %c)\n", i, addr[i], 'a' + (i % 26)); return false; } } return true; } template<int N> __global__ void gpu_perf(g_ringbuf::ringbuf* rb) { unsigned long t1, t2; t1 = clock64(); for (int i = 0; i < N; i++) { g_ringbuf::ringbuf_produce(rb, 1); } t2 = clock64(); printf("clock per update: %ld\n", (t2-t1)/N); printf("dev size: %ld\n", sizeof(*rb)); } int ringbuf_gpudirect(ringbuf_t* phostptr, g_ringbuf::ringbuf** pdevptr) { int ret; gpu_usermap_req req; void *devptr; struct cuda_dev* dev = gpu_init(BEST_GPU); int fd = open(DEV_PATH, O_RDWR); if (fd < 0) { perror("open"); exit(1); } ASSERT_CUDA(hipMalloc(&devptr, MAP_LEN)); req.magic = GUSERMAP_MAGIC; req.gpu_addr = (uintptr_t)devptr; req.len = MAP_LEN; ret = write(fd, &req, sizeof(req)); if (ret != sizeof(req)) { perror("write"); fprintf(stderr, "ERROR: write ret: %d expected %d\n", ret, sizeof(req)); exit(1); } ringbuf_t addr = (ringbuf_t)mmap(NULL, MAP_LEN, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0); if (((void*)addr) == MAP_FAILED) { perror("mmap failed"); exit(1); } addr->_size = MAP_LEN; fprintf(stderr, "ringbuf size: %ld\n", addr->_size); addr->_buf = (uint8_t*)gpu_malloc(dev, MAP_LEN); if (!addr->_buf) { fprintf(stderr, "gpu_malloc failed to allocate\n"); exit(1); } ringbuf_reset(addr); *phostptr = addr; *pdevptr = (g_ringbuf::ringbuf*)devptr; return 0; } int ringbuf_zerocopy(ringbuf_t* phostptr, g_ringbuf::ringbuf** pdevptr) { struct cuda_dev* dev = gpu_init(BEST_GPU); *phostptr = ringbuf_gpu_new(dev, MAP_LEN, (devptr*)pdevptr); return 0; } void usage() { fprintf(stderr, "Usage: ./test_gpu_usermap_ringbuf [case]\n"); fprintf(stderr, " case 0 -- gpudirect\n"); fprintf(stderr, " case 1 -- zerocopy\n"); } int main(int argc, char** argv) { ringbuf_t hostptr; g_ringbuf::ringbuf *devptr; int ret; if (argc < 2) { usage(); exit(1); } if (argv[1][0] == '0') { ret = ringbuf_gpudirect(&hostptr, &devptr); } else if (argv[1][0] == '1') { ret = ringbuf_zerocopy(&hostptr, &devptr); } else { usage(); exit(1); } if (ret) { fprintf(stderr, "hostptr allocation failed ret: %d\n", ret); exit(1); } #define NR_ITEMS 1000000 hostptr->_size = MAP_LEN; hipLaunchKernelGGL(( gpu_perf<NR_ITEMS>), dim3(1), dim3(1), 0, 0, devptr); timeval tv1, tv2, tv3; gettimeofday(&tv1, NULL); size_t sum = 0; size_t to_consume; while (sum < NR_ITEMS) { to_consume = ringbuf_bytes_used(hostptr); if (to_consume > 0) { ringbuf_consume(hostptr, to_consume); } sum += to_consume; } ASSERT_CUDA(hipDeviceSynchronize()); gettimeofday(&tv2, NULL); //munmap((void*)hostptr, MAP_LEN); // int idx_second_non_zero = 0, idx_last; // for (int i = 0; i < NR_ITEMS; i++) { // if (arr[i] != 0 && idx_second_non_zero == 0) // idx_second_non_zero = i+1; // if (arr[i] >= (NR_ITEMS - 1)) { // idx_last = i; // break; // } // } // printf("idx difference: %d avg interval: %d\n", // (idx_last - idx_second_non_zero), // NR_ITEMS / (idx_last - idx_second_non_zero)); timersub(&tv2, &tv1, &tv3); double t_ms = (tv3.tv_sec*1000.0 + tv3.tv_usec/1000.0); printf("total time: %.2f ms, for each pingpong: %2f us \n", t_ms, t_ms*1000/(NR_ITEMS-1)); return 0; }
548dbe02500fa8a3719cd0c13d0744b5ea09c1c0.cu
#include <stdio.h> #include <fcntl.h> #include <stdint.h> #include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/mman.h> #include <sys/time.h> #include <cuda_runtime.h> #include "../gpu_usermap_abi.h" #include <ringbuf.cu.h> extern "C" { #include <rdma/ringbuf.h> } #define DEV_PATH "/dev/gpu_usermap" #define MAP_LEN 16384 #define unlikely(x) __builtin_expect(!!(x), 0) #define ASSERT_CUDA(val) \ if(unlikely((val))) {fprintf(stderr, __FILE__, __LINE__, "ERROR: errno = %3d : %s\n", static_cast<int>(val), cudaGetErrorString(val)); exit(1);} void fill_mem(char* addr) { for (int i = 0; i < MAP_LEN; i++) { addr[i] = 'a' + (i % 26); } } bool check_mem(char *addr) { for (int i = 0; i < MAP_LEN; i++) { if (addr[i] != 'a' + (i % 26)) { fprintf(stderr, "ERROR: %d th character differs (%c instead of %c)\n", i, addr[i], 'a' + (i % 26)); return false; } } return true; } template<int N> __global__ void gpu_perf(g_ringbuf::ringbuf* rb) { unsigned long t1, t2; t1 = clock64(); for (int i = 0; i < N; i++) { g_ringbuf::ringbuf_produce(rb, 1); } t2 = clock64(); printf("clock per update: %ld\n", (t2-t1)/N); printf("dev size: %ld\n", sizeof(*rb)); } int ringbuf_gpudirect(ringbuf_t* phostptr, g_ringbuf::ringbuf** pdevptr) { int ret; gpu_usermap_req req; void *devptr; struct cuda_dev* dev = gpu_init(BEST_GPU); int fd = open(DEV_PATH, O_RDWR); if (fd < 0) { perror("open"); exit(1); } ASSERT_CUDA(cudaMalloc(&devptr, MAP_LEN)); req.magic = GUSERMAP_MAGIC; req.gpu_addr = (uintptr_t)devptr; req.len = MAP_LEN; ret = write(fd, &req, sizeof(req)); if (ret != sizeof(req)) { perror("write"); fprintf(stderr, "ERROR: write ret: %d expected %d\n", ret, sizeof(req)); exit(1); } ringbuf_t addr = (ringbuf_t)mmap(NULL, MAP_LEN, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0); if (((void*)addr) == MAP_FAILED) { perror("mmap failed"); exit(1); } addr->_size = MAP_LEN; fprintf(stderr, "ringbuf size: %ld\n", addr->_size); addr->_buf = (uint8_t*)gpu_malloc(dev, MAP_LEN); if (!addr->_buf) { fprintf(stderr, "gpu_malloc failed to allocate\n"); exit(1); } ringbuf_reset(addr); *phostptr = addr; *pdevptr = (g_ringbuf::ringbuf*)devptr; return 0; } int ringbuf_zerocopy(ringbuf_t* phostptr, g_ringbuf::ringbuf** pdevptr) { struct cuda_dev* dev = gpu_init(BEST_GPU); *phostptr = ringbuf_gpu_new(dev, MAP_LEN, (devptr*)pdevptr); return 0; } void usage() { fprintf(stderr, "Usage: ./test_gpu_usermap_ringbuf [case]\n"); fprintf(stderr, " case 0 -- gpudirect\n"); fprintf(stderr, " case 1 -- zerocopy\n"); } int main(int argc, char** argv) { ringbuf_t hostptr; g_ringbuf::ringbuf *devptr; int ret; if (argc < 2) { usage(); exit(1); } if (argv[1][0] == '0') { ret = ringbuf_gpudirect(&hostptr, &devptr); } else if (argv[1][0] == '1') { ret = ringbuf_zerocopy(&hostptr, &devptr); } else { usage(); exit(1); } if (ret) { fprintf(stderr, "hostptr allocation failed ret: %d\n", ret); exit(1); } #define NR_ITEMS 1000000 hostptr->_size = MAP_LEN; gpu_perf<NR_ITEMS><<<1, 1>>>(devptr); timeval tv1, tv2, tv3; gettimeofday(&tv1, NULL); size_t sum = 0; size_t to_consume; while (sum < NR_ITEMS) { to_consume = ringbuf_bytes_used(hostptr); if (to_consume > 0) { ringbuf_consume(hostptr, to_consume); } sum += to_consume; } ASSERT_CUDA(cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); //munmap((void*)hostptr, MAP_LEN); // int idx_second_non_zero = 0, idx_last; // for (int i = 0; i < NR_ITEMS; i++) { // if (arr[i] != 0 && idx_second_non_zero == 0) // idx_second_non_zero = i+1; // if (arr[i] >= (NR_ITEMS - 1)) { // idx_last = i; // break; // } // } // printf("idx difference: %d avg interval: %d\n", // (idx_last - idx_second_non_zero), // NR_ITEMS / (idx_last - idx_second_non_zero)); timersub(&tv2, &tv1, &tv3); double t_ms = (tv3.tv_sec*1000.0 + tv3.tv_usec/1000.0); printf("total time: %.2f ms, for each pingpong: %2f us \n", t_ms, t_ms*1000/(NR_ITEMS-1)); return 0; }
94a66e3884a77f1b86706c5ad108da9da0f71c25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __managed__ int sum=0; __global__ void Array_sum(int *a,int *n) { int tid=threadIdx.x; if(tid<*n) atomicAdd(&sum,a[tid]); } int main() { int n=10,i; //printf("Enter N:"); //scanf("%d",&n); int a[n]; int *cuda_a,*cuda_n; for(i=0;i<n;i++) { a[i]=rand()%100; printf("%d ",a[i]); } printf("\n"); hipMalloc((void**)&cuda_a,n*sizeof(int)); hipMalloc((void**)&cuda_n,sizeof(int)); hipMemcpy(cuda_a,a,n*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(cuda_n,&n,sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( Array_sum) , dim3(1),dim3(n), 0, 0, cuda_a,cuda_n); printf("Sum:%d\n",sum); hipFree(cuda_a); hipFree(cuda_n); return 0; }
94a66e3884a77f1b86706c5ad108da9da0f71c25.cu
#include<stdio.h> __managed__ int sum=0; __global__ void Array_sum(int *a,int *n) { int tid=threadIdx.x; if(tid<*n) atomicAdd(&sum,a[tid]); } int main() { int n=10,i; //printf("Enter N:"); //scanf("%d",&n); int a[n]; int *cuda_a,*cuda_n; for(i=0;i<n;i++) { a[i]=rand()%100; printf("%d ",a[i]); } printf("\n"); cudaMalloc((void**)&cuda_a,n*sizeof(int)); cudaMalloc((void**)&cuda_n,sizeof(int)); cudaMemcpy(cuda_a,a,n*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(cuda_n,&n,sizeof(int),cudaMemcpyHostToDevice); Array_sum <<<1,n>>>(cuda_a,cuda_n); printf("Sum:%d\n",sum); cudaFree(cuda_a); cudaFree(cuda_n); return 0; }
eb85e07b8906e440aff49792f6b0d398a1718715.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "hip/hip_runtime.h" #include "def.h" inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192 }, // Kepler Generation (SM 3.0) GK10x class { 0x32, 192 }, // Kepler Generation (SM 3.2) GK10x class { 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class { 0x37, 192 }, // Kepler Generation (SM 3.7) GK21x class { 0x50, 128 }, // Maxwell Generation (SM 5.0) GM10x class { 0x52, 128 }, // Maxwell Generation (SM 5.2) GM20x class { -1, -1 } }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one to run properly printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } void md_device_init(int argc, char *argv[]) { int deviceCount; hipError_t err = hipGetDeviceCount(&deviceCount); if (err != hipSuccess) { fprintf(stderr, "error: hipGetDeviceCount failed.\n"); exit(EXIT_FAILURE); } if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (deviceCount == 1) { // just one device or an emulated device present, no choice int dev=0; hipDeviceProp_t deviceProp; hipError_t err = hipGetDeviceProperties(&deviceProp, dev); if (err != hipSuccess) { fprintf(stderr, "error: hipGetDeviceProperties failed.\n"); exit(EXIT_FAILURE); } if (deviceProp.major < 1) { fprintf(stderr, "error: device does not support CUDA.\n"); exit(EXIT_FAILURE); } hipSetDevice(dev); md_device_report(); } else { // several devices present, so make list of usable devices // and have one choosen among the currently available ones std::vector<int> usable_devices; for (int dev=0; dev<deviceCount; dev++) { hipDeviceProp_t deviceProp; hipError_t err = hipGetDeviceProperties(&deviceProp, dev); if (err != hipSuccess) { fprintf(stderr, "error: hipGetDeviceProperties failed.\n"); exit(EXIT_FAILURE); } if ((deviceProp.major >= 1) && (deviceProp.multiProcessorCount >= 2) && (deviceProp.computeMode != hipComputeModeProhibited)) { usable_devices.push_back(dev); } } if (usable_devices.size() == 0) { fprintf(stderr, "error: no usable devices supporting CUDA.\n"); exit(EXIT_FAILURE); } //hipError_t err = hipSetValidDevices(&usable_devices[0], usable_devices.size()); //if (err != hipSuccess ) { // fprintf(stderr, "error: hipSetValidDevices failed.\n"); // exit(EXIT_FAILURE); // } // trigger device initialization by a non-device management function call hipSetDevice(0); hipDeviceSynchronize(); md_device_report(); } } void md_device_report() { int dev; hipGetDevice(&dev); hipDeviceProp_t prop; hipDeviceProp_t devProp; hipError_t err = hipGetDeviceProperties(&prop, dev); if (err != hipSuccess) { fprintf(stderr, "error: hipGetDeviceProperties failed.\n"); exit(EXIT_FAILURE); } fprintf(stdout, "CUDA: Using device %d: %s\n", dev, prop.name); printf( " --- General Information for device %d ---\n", 0 ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", 0 ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); return; /* devProp = prop; printf("#################################################\n"); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %ld\n", devProp.totalGlobalMem); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Total shared memory per block: %ld\n", devProp.sharedMemPerBlock); printf("Total registers per block: %ld\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); */ printf("################################################################\n"); int driverVersion = 0, runtimeVersion = 0; //hipSetDevice(dev); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); // Console log hipDriverGetVersion(&driverVersion); hipRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, (driverVersion % 100) / 10, runtimeVersion / 1000, (runtimeVersion % 100) / 10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); char msg[256]; sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem / 1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 5000 // This is supported in CUDA 5.0 (runtime API device properties) printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } #else // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API) int memoryClock; getCudaAttribute<int>(&memoryClock, hipDeviceAttributeMemoryClockRate, dev); printf(" Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f); int memBusWidth; getCudaAttribute<int>(&memBusWidth, hipDeviceAttributeMemoryBusWidth, dev); printf(" Memory Bus Width: %d-bit\n", memBusWidth); int L2CacheSize; getCudaAttribute<int>(&L2CacheSize, hipDeviceAttributeL2CacheSize, dev); if (L2CacheSize) { printf(" L2 Cache Size: %d bytes\n", L2CacheSize); } #endif printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)"); #endif printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::hipSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::hipSetDevice() with this device)", "Prohibited (no host thread can use ::hipSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::hipSetDevice() with this device)", "Unknown", NULL }; printf(" Compute Mode:\n"); printf(" < %s >\n", sComputeMode[deviceProp.computeMode]); } void resetDevce(){ hipDeviceReset(); } void cudaCopyToDevice(void *d, void *h, int size){ hipMemcpy(d, h, size, hipMemcpyHostToDevice); } class VecNormSqr{ public: __host__ __device__ real operator()(const VectorR &a){ return a.x*a.x + a.y*a.y; } }; void squareVec(dvector<VectorR> &v, dvector<real> &vv){ thrust::transform(v.begin(), v.end(), vv.begin(), VecNormSqr()); } real findMax(dvector<real> &v){ return thrust::reduce(v.begin(), v.end(), (real)(0.0), thrust::maximum<real>()); } void multVec(dvector<real> &vv, dvector<real> &m ){ //std::transform(a.begin(), a.end(), b.begin(), r.begin(), std::multiplies<float>()); thrust::transform(vv.begin(), vv.end(), m.begin(), vv.begin(), thrust::multiplies<real>()); } real __host__ reduce(dvector<real> &vv){ return thrust::reduce(vv.begin(), vv.end()); } /* template<typename T> void clearDevVector(dvector<T> &v){ v.clear(); thrust::device_vector<T>().swap(v); } template void clearDevVector(dvector<int> v); */ void clearDevVector(dvector<int> &v){ v.clear(); thrust::device_vector<int>().swap(v); } void clearDevVector(dvector < real > &v){ v.clear(); thrust::device_vector<real>().swap(v); } void clearDevVector(dvector < VectorR > &v){ v.clear(); thrust::device_vector<VectorR>().swap(v); } void clearDevVector(dvector < tetraIndexes > &v){ v.clear(); thrust::device_vector<tetraIndexes>().swap(v); } void clearDevVector(dvector < mat3x3 > &v){ v.clear(); thrust::device_vector<mat3x3>().swap(v); }
eb85e07b8906e440aff49792f6b0d398a1718715.cu
#include <vector> #include "cuda_runtime.h" #include "def.h" inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192 }, // Kepler Generation (SM 3.0) GK10x class { 0x32, 192 }, // Kepler Generation (SM 3.2) GK10x class { 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class { 0x37, 192 }, // Kepler Generation (SM 3.7) GK21x class { 0x50, 128 }, // Maxwell Generation (SM 5.0) GM10x class { 0x52, 128 }, // Maxwell Generation (SM 5.2) GM20x class { -1, -1 } }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one to run properly printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } void md_device_init(int argc, char *argv[]) { int deviceCount; cudaError err = cudaGetDeviceCount(&deviceCount); if (err != cudaSuccess) { fprintf(stderr, "error: cudaGetDeviceCount failed.\n"); exit(EXIT_FAILURE); } if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } if (deviceCount == 1) { // just one device or an emulated device present, no choice int dev=0; cudaDeviceProp deviceProp; cudaError err = cudaGetDeviceProperties(&deviceProp, dev); if (err != cudaSuccess) { fprintf(stderr, "error: cudaGetDeviceProperties failed.\n"); exit(EXIT_FAILURE); } if (deviceProp.major < 1) { fprintf(stderr, "error: device does not support CUDA.\n"); exit(EXIT_FAILURE); } cudaSetDevice(dev); md_device_report(); } else { // several devices present, so make list of usable devices // and have one choosen among the currently available ones std::vector<int> usable_devices; for (int dev=0; dev<deviceCount; dev++) { cudaDeviceProp deviceProp; cudaError err = cudaGetDeviceProperties(&deviceProp, dev); if (err != cudaSuccess) { fprintf(stderr, "error: cudaGetDeviceProperties failed.\n"); exit(EXIT_FAILURE); } if ((deviceProp.major >= 1) && (deviceProp.multiProcessorCount >= 2) && (deviceProp.computeMode != cudaComputeModeProhibited)) { usable_devices.push_back(dev); } } if (usable_devices.size() == 0) { fprintf(stderr, "error: no usable devices supporting CUDA.\n"); exit(EXIT_FAILURE); } //cudaError err = cudaSetValidDevices(&usable_devices[0], usable_devices.size()); //if (err != cudaSuccess ) { // fprintf(stderr, "error: cudaSetValidDevices failed.\n"); // exit(EXIT_FAILURE); // } // trigger device initialization by a non-device management function call cudaSetDevice(0); cudaThreadSynchronize(); md_device_report(); } } void md_device_report() { int dev; cudaGetDevice(&dev); cudaDeviceProp prop; cudaDeviceProp devProp; cudaError err = cudaGetDeviceProperties(&prop, dev); if (err != cudaSuccess) { fprintf(stderr, "error: cudaGetDeviceProperties failed.\n"); exit(EXIT_FAILURE); } fprintf(stdout, "CUDA: Using device %d: %s\n", dev, prop.name); printf( " --- General Information for device %d ---\n", 0 ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", 0 ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); return; /* devProp = prop; printf("#################################################\n"); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %ld\n", devProp.totalGlobalMem); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Total shared memory per block: %ld\n", devProp.sharedMemPerBlock); printf("Total registers per block: %ld\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); */ printf("################################################################\n"); int driverVersion = 0, runtimeVersion = 0; //cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); // Console log cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, (driverVersion % 100) / 10, runtimeVersion / 1000, (runtimeVersion % 100) / 10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); char msg[256]; sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem / 1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 5000 // This is supported in CUDA 5.0 (runtime API device properties) printf(" Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { printf(" L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } #else // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API) int memoryClock; getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev); printf(" Memory Clock rate: %.0f Mhz\n", memoryClock * 1e-3f); int memBusWidth; getCudaAttribute<int>(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev); printf(" Memory Bus Width: %d-bit\n", memBusWidth); int L2CacheSize; getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev); if (L2CacheSize) { printf(" L2 Cache Size: %d bytes\n", L2CacheSize); } #endif printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)"); #endif printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)", "Prohibited (no host thread can use ::cudaSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)", "Unknown", NULL }; printf(" Compute Mode:\n"); printf(" < %s >\n", sComputeMode[deviceProp.computeMode]); } void resetDevce(){ cudaDeviceReset(); } void cudaCopyToDevice(void *d, void *h, int size){ cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); } class VecNormSqr{ public: __host__ __device__ real operator()(const VectorR &a){ return a.x*a.x + a.y*a.y; } }; void squareVec(dvector<VectorR> &v, dvector<real> &vv){ thrust::transform(v.begin(), v.end(), vv.begin(), VecNormSqr()); } real findMax(dvector<real> &v){ return thrust::reduce(v.begin(), v.end(), (real)(0.0), thrust::maximum<real>()); } void multVec(dvector<real> &vv, dvector<real> &m ){ //std::transform(a.begin(), a.end(), b.begin(), r.begin(), std::multiplies<float>()); thrust::transform(vv.begin(), vv.end(), m.begin(), vv.begin(), thrust::multiplies<real>()); } real __host__ reduce(dvector<real> &vv){ return thrust::reduce(vv.begin(), vv.end()); } /* template<typename T> void clearDevVector(dvector<T> &v){ v.clear(); thrust::device_vector<T>().swap(v); } template void clearDevVector(dvector<int> v); */ void clearDevVector(dvector<int> &v){ v.clear(); thrust::device_vector<int>().swap(v); } void clearDevVector(dvector < real > &v){ v.clear(); thrust::device_vector<real>().swap(v); } void clearDevVector(dvector < VectorR > &v){ v.clear(); thrust::device_vector<VectorR>().swap(v); } void clearDevVector(dvector < tetraIndexes > &v){ v.clear(); thrust::device_vector<tetraIndexes>().swap(v); } void clearDevVector(dvector < mat3x3 > &v){ v.clear(); thrust::device_vector<mat3x3>().swap(v); }
7de1175af177cf75725537820110ae51ba377b6e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <stdio.h> #include <vector> #include <algorithm> #include <iterator> #include <utility> #include <math.h> #include <omp.h> #include <hip/hip_runtime.h> #include "util.h" #include "kernel.h" #include <bits/stdc++.h> using namespace std; long n_rows, n_cols, nnz; int tile_sizeX = 256; int tile_sizeY = 99999999; int k=100; int BLOCKSIZE=512; inline hipError_t checkCuda(hipError_t result, int s){ if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", hipGetErrorString(result), s); assert(result == hipSuccess); } return result; } void sddmm_GPU(int * d_row_ptr, int * d_row_ind, int *d_col_ind, float * d_val_ind, float * d_W, float *d_H, int *d_tiled_ind, int *d_lastIdx, long new_nnz){ int n_tile = n_cols/tile_sizeX + 1; int n_tile_r = n_rows/tile_sizeY + 1; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipStream_t stream[n_tile]; for (int i = 0; i < n_tile; i++){ hipStreamCreate(&(stream[i])); } float mili =0, copyTime = 0 ; dim3 block(BLOCKSIZE,1,1), grid(1,1,1); //grid.x = (new_nnz + BLOCKSIZE - 1) / BLOCKSIZE; grid.x = (n_cols/tile_sizeX+1); // grid.x = (32 * n_rows + BLOCKSIZE - 1) / BLOCKSIZE; checkCuda(hipEventRecord(start), __LINE__); for (int t_st = 0; t_st < k ; t_st +=32){ for (int r_t = 0; r_t < n_tile_r; ++r_t){ int r_st = r_t * tile_sizeY; hipLaunchKernelGGL(( comp_kernel_COO), dim3(grid), dim3(block), 0, stream[0], d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_tiled_ind, d_lastIdx+n_tile*r_t, t_st, r_st); } } checkCuda(hipEventRecord(stop), __LINE__); hipEventSynchronize(stop); //hipDeviceSynchronize(); checkCuda(hipEventElapsedTime(&mili, start, stop), __LINE__); hipDeviceSynchronize(); cout << "GPU time " << mili << "ms"<< endl; } void sddmm_CPU_CSR(int * row_ptr, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) long tot =0 ; #pragma omp parallel for reduction(+:tot) for (int r = 0; r < n_rows; ++r){ tot += row_ptr[r+1] - row_ptr[r]; float sm =0 ; for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind){ int row = r; int col = col_ind[ind]; int nnz = row_ptr[r+1]-row_ptr[r]; float val = val_ind[ind]; sm=0; for (int t = 0; t < k; ++t){ sm += W[row * k + t] * H[col * k + t]; // cout <<W[row * k + t] <<" "<<H[col * k + t]<< endl; } p_ind[ind] = sm * val_ind[ind]; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; } } cout << "CPU tot " << tot << endl; for (int r = 500000; r < 500005; ++r) // for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind) cout << "row " << r << " " <<" "<< p_ind[r]<< endl; } void sddmm_CPU_COO(int * row_ind, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) double start_time = omp_get_wtime(); omp_set_dynamic(0); omp_set_num_threads(28); #pragma omp parallel for //reduction(+:tot) for (int ind = 0; ind < nnz; ind++){ float sm =0 ; int row = row_ind[ind]; int col = col_ind[ind]; for (int t = 0; t < k; ++t) // sm = H[col];// * k + t]; sm += W[row * k + t] * H[col * k + t]; // p_ind[ind] = sm;// * val_ind[ind]; p_ind[ind] = sm; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; // } } double CPU_time = omp_get_wtime() - start_time; //correctness check printf("\nomp time CPU : %.4f \n\n", CPU_time*1000); } void init(int *rows, int *cols, float* vals){ int n_bin=10; int *count = new int[n_bin]; int *row_ptr = new int[n_rows+1]; float *p_ind = new float[nnz]; float *W = new float[n_rows*k]; float *W_t = new float[n_rows*k]; float *H = new float[n_cols*k]; float *H_t = new float[n_cols*k]; int n_tile_c = n_cols/tile_sizeX + 1; int n_tile_r = n_rows/tile_sizeY + 1; int *lastIdx_tile = new int[n_tile_c*n_tile_r+1]; int *row_holder = new int[n_rows]; float *d_val, *d_W, *d_H, *d_W_t; int *d_row_ptr, *d_col_ind, *d_row_ind, *d_tiled_ind, *d_lastIdx; int n_tileX = n_cols/tile_sizeX+1; int n_tileY = n_rows/tile_sizeY+1; long new_nnz =0 ; initial(W, n_rows, k); initial(H, n_cols, k); make_HTasH(H, H_t, n_cols, k); make_HTasH(W, W_t, n_rows, k); cout << "n_tiles in x: " <<n_tile_c << " n_tiles in y: "<< n_tile_r<< endl; int *new_rows = new int[nnz]; int *new_cols = new int[nnz ]; float *new_vals = new float[nnz ]; int *tiled_ind = new int [nnz ]; // int *new_rows = new int[nnz]; // int *new_cols = new int[nnz]; // float *new_vals = new float[nnz]; //converting col sorted matrix to row sorted //unsorted_make_CSR(rows, cols, vals, nnz, n_rows, n_cols, row_ptr); //assuming sorted make_CSR(rows, cols, vals, nnz, n_rows, row_ptr, row_holder); //comp_bin(n_bin, count, n_rows, row_ptr, nnz); rewrite_matrix_1D(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, tile_sizeX, tile_sizeY, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz, row_holder); // write_mat(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tile_sizeY, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz, row_holder); // rewrite_col_sorted_matrix(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz); double t0 = seconds(); // sddmm_CPU_CSR(row_ptr, cols, vals, W, H, p_ind); sddmm_CPU_COO(rows, cols, vals, W, H, p_ind); //***********Starting GPU**************** checkCuda(hipMalloc((void**)&d_W, k*n_rows*sizeof(float)),0); checkCuda(hipMalloc((void**)&d_H, k*n_cols*sizeof(float)),1); // checkCuda(hipMalloc((void**)&d_row_ptr, (n_rows+1)*sizeof(int)),2); checkCuda(hipMalloc((void**)&d_row_ind, new_nnz*sizeof(int)),4); //checkCuda(hipMalloc((void**)&d_col_ind, new_nnz*sizeof(int)),4); checkCuda(hipMalloc((void**)&d_val, new_nnz*sizeof(float)),4); checkCuda(hipMalloc((void**)&d_lastIdx, (n_tile_c*n_tile_r+1)*sizeof(float)),4); // checkCuda(hipMalloc((void**)&d_tiled_ind, nnz*sizeof(int)),4); // checkCuda(hipMemcpy(d_row_ptr, &(row_ptr[0]), (n_rows+1)*sizeof(int), hipMemcpyHostToDevice),4); checkCuda(hipMemcpy(d_row_ind, &(new_rows[0]), new_nnz*sizeof(int), hipMemcpyHostToDevice),4); //checkCuda(hipMemcpy(d_col_ind, &(new_cols[0]), new_nnz*sizeof(int), hipMemcpyHostToDevice),4);; checkCuda(hipMemcpy(d_val, &(new_vals[0]), new_nnz*sizeof(float), hipMemcpyHostToDevice),4);; checkCuda(hipMemcpy(d_lastIdx, &(lastIdx_tile[0]), (n_tile_c*n_tile_r+1)*sizeof(float), hipMemcpyHostToDevice),4);; hipMemset(d_val, 0, nnz*sizeof(float)); // checkCuda(hipMemcpy(d_tiled_ind, &(tiled_ind[0]), nnz*sizeof(int), hipMemcpyHostToDevice),4);; hipMemcpy(d_W, &(W[0]), n_rows * k *sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(d_W, &(W_t[0]), n_rows * k *sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_H, &(H[0]), n_cols * k *sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(d_H, &(H_t[0]), n_cols * k *sizeof(float), hipMemcpyHostToDevice); sddmm_GPU(d_row_ptr, d_row_ind, d_col_ind, d_val, d_W, d_H, d_tiled_ind, d_lastIdx, new_nnz ); //******** correctness check float GPU_tot = 0, CPU_tot =0, CPU_tot_orig =0 ; float *p_ind_temp = new float[new_nnz]; checkCuda(hipMemcpy(&(p_ind_temp[0]), d_val, new_nnz*sizeof(float), hipMemcpyDeviceToHost),4);; for (int i = 0; i < nnz; ++i){ CPU_tot += p_ind[tiled_ind[i]]; CPU_tot_orig += p_ind[i]; // cout << "p_ind " << p_ind[tiled_ind[i]] << " " << p_ind[i] << " new,old ind: "<<tiled_ind[i] <<" "<<i<< endl; } for (int i = 511; i < 511+2; ++i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; for (int i = nnz-1; i > nnz-3; --i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; long diff_tot = 0; for (int i = 0; i < new_nnz; ++i){ if(abs(p_ind_temp[i]-p_ind[tiled_ind[i]]) > .00001){ diff_tot ++; if(diff_tot < 5) printf("CPU GPU diff %d: %f %f %f \n", i, p_ind_temp[i], p_ind[tiled_ind[i]],p_ind_temp[i]-p_ind[tiled_ind[i]] ); } } cout << "diff values in CPU and GPU: " << diff_tot << endl; //freeing device allocation hipFree( d_row_ptr ); hipFree( d_row_ind); hipFree( d_col_ind); hipFree( d_val); hipFree( d_W ); hipFree( d_H ); delete(rows); delete(cols); delete(vals); } int main(int argc, char* argv[]){ ifstream fp(argv[1]); k = atoi(argv[2]); tile_sizeX = atoi(argv[3]); string str; getline(fp,str); while(!isdigit(str[0])){ getline(fp,str); } istringstream is(str); is >> n_rows; is >> n_cols; is >> nnz; //fp >> n_rows >> n_cols >> nnz; long orig_nnz=nnz, rid=0,cid=0; float vid=0; int *rows = new int[nnz]; int *cols = new int[nnz]; float *vals = new float[nnz]; long idx=0; for (long o_idx = 0; o_idx < orig_nnz; ++o_idx) { fp >> rid >> cid >> vid; rows[idx]=rid-1; cols[idx]=cid-1; vals[idx]=vid; idx++; } cout << "Rows: "<<n_rows << " Cols: "<<n_cols <<" Nnz: "<< nnz << " tile-size: " << tile_sizeX<< " k: "<<k << endl; nnz=idx; init(rows, cols, vals); }
7de1175af177cf75725537820110ae51ba377b6e.cu
#include <iostream> #include <fstream> #include <stdio.h> #include <vector> #include <algorithm> #include <iterator> #include <utility> #include <math.h> #include <omp.h> #include <cuda.h> #include "util.h" #include "kernel.h" #include <bits/stdc++.h> using namespace std; long n_rows, n_cols, nnz; int tile_sizeX = 256; int tile_sizeY = 99999999; int k=100; int BLOCKSIZE=512; inline cudaError_t checkCuda(cudaError_t result, int s){ if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error in line : %s - %d\n", cudaGetErrorString(result), s); assert(result == cudaSuccess); } return result; } void sddmm_GPU(int * d_row_ptr, int * d_row_ind, int *d_col_ind, float * d_val_ind, float * d_W, float *d_H, int *d_tiled_ind, int *d_lastIdx, long new_nnz){ int n_tile = n_cols/tile_sizeX + 1; int n_tile_r = n_rows/tile_sizeY + 1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaStream_t stream[n_tile]; for (int i = 0; i < n_tile; i++){ cudaStreamCreate(&(stream[i])); } float mili =0, copyTime = 0 ; dim3 block(BLOCKSIZE,1,1), grid(1,1,1); //grid.x = (new_nnz + BLOCKSIZE - 1) / BLOCKSIZE; grid.x = (n_cols/tile_sizeX+1); // grid.x = (32 * n_rows + BLOCKSIZE - 1) / BLOCKSIZE; checkCuda(cudaEventRecord(start), __LINE__); for (int t_st = 0; t_st < k ; t_st +=32){ for (int r_t = 0; r_t < n_tile_r; ++r_t){ int r_st = r_t * tile_sizeY; comp_kernel_COO<<<grid, block, 0, stream[0]>>>(d_row_ind, d_col_ind, d_val_ind, d_W, d_H, new_nnz, n_rows, n_cols, k, d_tiled_ind, d_lastIdx+n_tile*r_t, t_st, r_st); } } checkCuda(cudaEventRecord(stop), __LINE__); cudaEventSynchronize(stop); //cudaDeviceSynchronize(); checkCuda(cudaEventElapsedTime(&mili, start, stop), __LINE__); cudaDeviceSynchronize(); cout << "GPU time " << mili << "ms"<< endl; } void sddmm_CPU_CSR(int * row_ptr, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) long tot =0 ; #pragma omp parallel for reduction(+:tot) for (int r = 0; r < n_rows; ++r){ tot += row_ptr[r+1] - row_ptr[r]; float sm =0 ; for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind){ int row = r; int col = col_ind[ind]; int nnz = row_ptr[r+1]-row_ptr[r]; float val = val_ind[ind]; sm=0; for (int t = 0; t < k; ++t){ sm += W[row * k + t] * H[col * k + t]; // cout <<W[row * k + t] <<" "<<H[col * k + t]<< endl; } p_ind[ind] = sm * val_ind[ind]; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; } } cout << "CPU tot " << tot << endl; for (int r = 500000; r < 500005; ++r) // for (int ind = row_ptr[r]; ind < row_ptr[r+1]; ++ind) cout << "row " << r << " " <<" "<< p_ind[r]<< endl; } void sddmm_CPU_COO(int * row_ind, int *col_ind, float * val_ind, float * W, float *H, float * p_ind){ // reduction(+:rmse) double start_time = omp_get_wtime(); omp_set_dynamic(0); omp_set_num_threads(28); #pragma omp parallel for //reduction(+:tot) for (int ind = 0; ind < nnz; ind++){ float sm =0 ; int row = row_ind[ind]; int col = col_ind[ind]; for (int t = 0; t < k; ++t) // sm = H[col];// * k + t]; sm += W[row * k + t] * H[col * k + t]; // p_ind[ind] = sm;// * val_ind[ind]; p_ind[ind] = sm; // cout << "ind " << row<<" "<<col << ":: " <<" "<< p_ind[ind] << " = " << sm <<" * "<< val_ind[ind]<< endl; // } } double CPU_time = omp_get_wtime() - start_time; //correctness check printf("\nomp time CPU : %.4f \n\n", CPU_time*1000); } void init(int *rows, int *cols, float* vals){ int n_bin=10; int *count = new int[n_bin]; int *row_ptr = new int[n_rows+1]; float *p_ind = new float[nnz]; float *W = new float[n_rows*k]; float *W_t = new float[n_rows*k]; float *H = new float[n_cols*k]; float *H_t = new float[n_cols*k]; int n_tile_c = n_cols/tile_sizeX + 1; int n_tile_r = n_rows/tile_sizeY + 1; int *lastIdx_tile = new int[n_tile_c*n_tile_r+1]; int *row_holder = new int[n_rows]; float *d_val, *d_W, *d_H, *d_W_t; int *d_row_ptr, *d_col_ind, *d_row_ind, *d_tiled_ind, *d_lastIdx; int n_tileX = n_cols/tile_sizeX+1; int n_tileY = n_rows/tile_sizeY+1; long new_nnz =0 ; initial(W, n_rows, k); initial(H, n_cols, k); make_HTasH(H, H_t, n_cols, k); make_HTasH(W, W_t, n_rows, k); cout << "n_tiles in x: " <<n_tile_c << " n_tiles in y: "<< n_tile_r<< endl; int *new_rows = new int[nnz]; int *new_cols = new int[nnz ]; float *new_vals = new float[nnz ]; int *tiled_ind = new int [nnz ]; // int *new_rows = new int[nnz]; // int *new_cols = new int[nnz]; // float *new_vals = new float[nnz]; //converting col sorted matrix to row sorted //unsorted_make_CSR(rows, cols, vals, nnz, n_rows, n_cols, row_ptr); //assuming sorted make_CSR(rows, cols, vals, nnz, n_rows, row_ptr, row_holder); //comp_bin(n_bin, count, n_rows, row_ptr, nnz); rewrite_matrix_1D(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, tile_sizeX, tile_sizeY, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz, row_holder); // write_mat(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tile_sizeY, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz, row_holder); // rewrite_col_sorted_matrix(row_ptr, rows, cols, vals, new_rows, new_cols, new_vals, nnz, n_rows, n_cols, // tile_sizeX, tiled_ind, lastIdx_tile, BLOCKSIZE, new_nnz); double t0 = seconds(); // sddmm_CPU_CSR(row_ptr, cols, vals, W, H, p_ind); sddmm_CPU_COO(rows, cols, vals, W, H, p_ind); //***********Starting GPU**************** checkCuda(cudaMalloc((void**)&d_W, k*n_rows*sizeof(float)),0); checkCuda(cudaMalloc((void**)&d_H, k*n_cols*sizeof(float)),1); // checkCuda(cudaMalloc((void**)&d_row_ptr, (n_rows+1)*sizeof(int)),2); checkCuda(cudaMalloc((void**)&d_row_ind, new_nnz*sizeof(int)),4); //checkCuda(cudaMalloc((void**)&d_col_ind, new_nnz*sizeof(int)),4); checkCuda(cudaMalloc((void**)&d_val, new_nnz*sizeof(float)),4); checkCuda(cudaMalloc((void**)&d_lastIdx, (n_tile_c*n_tile_r+1)*sizeof(float)),4); // checkCuda(cudaMalloc((void**)&d_tiled_ind, nnz*sizeof(int)),4); // checkCuda(cudaMemcpy(d_row_ptr, &(row_ptr[0]), (n_rows+1)*sizeof(int), cudaMemcpyHostToDevice),4); checkCuda(cudaMemcpy(d_row_ind, &(new_rows[0]), new_nnz*sizeof(int), cudaMemcpyHostToDevice),4); //checkCuda(cudaMemcpy(d_col_ind, &(new_cols[0]), new_nnz*sizeof(int), cudaMemcpyHostToDevice),4);; checkCuda(cudaMemcpy(d_val, &(new_vals[0]), new_nnz*sizeof(float), cudaMemcpyHostToDevice),4);; checkCuda(cudaMemcpy(d_lastIdx, &(lastIdx_tile[0]), (n_tile_c*n_tile_r+1)*sizeof(float), cudaMemcpyHostToDevice),4);; cudaMemset(d_val, 0, nnz*sizeof(float)); // checkCuda(cudaMemcpy(d_tiled_ind, &(tiled_ind[0]), nnz*sizeof(int), cudaMemcpyHostToDevice),4);; cudaMemcpy(d_W, &(W[0]), n_rows * k *sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(d_W, &(W_t[0]), n_rows * k *sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_H, &(H[0]), n_cols * k *sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(d_H, &(H_t[0]), n_cols * k *sizeof(float), cudaMemcpyHostToDevice); sddmm_GPU(d_row_ptr, d_row_ind, d_col_ind, d_val, d_W, d_H, d_tiled_ind, d_lastIdx, new_nnz ); //******** correctness check float GPU_tot = 0, CPU_tot =0, CPU_tot_orig =0 ; float *p_ind_temp = new float[new_nnz]; checkCuda(cudaMemcpy(&(p_ind_temp[0]), d_val, new_nnz*sizeof(float), cudaMemcpyDeviceToHost),4);; for (int i = 0; i < nnz; ++i){ CPU_tot += p_ind[tiled_ind[i]]; CPU_tot_orig += p_ind[i]; // cout << "p_ind " << p_ind[tiled_ind[i]] << " " << p_ind[i] << " new,old ind: "<<tiled_ind[i] <<" "<<i<< endl; } for (int i = 511; i < 511+2; ++i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; for (int i = nnz-1; i > nnz-3; --i) cout << "gp idx " << i << " " <<" GPU "<< p_ind_temp[i] << " CPU "<< p_ind[tiled_ind[i]]<<endl; long diff_tot = 0; for (int i = 0; i < new_nnz; ++i){ if(abs(p_ind_temp[i]-p_ind[tiled_ind[i]]) > .00001){ diff_tot ++; if(diff_tot < 5) printf("CPU GPU diff %d: %f %f %f \n", i, p_ind_temp[i], p_ind[tiled_ind[i]],p_ind_temp[i]-p_ind[tiled_ind[i]] ); } } cout << "diff values in CPU and GPU: " << diff_tot << endl; //freeing device allocation cudaFree( d_row_ptr ); cudaFree( d_row_ind); cudaFree( d_col_ind); cudaFree( d_val); cudaFree( d_W ); cudaFree( d_H ); delete(rows); delete(cols); delete(vals); } int main(int argc, char* argv[]){ ifstream fp(argv[1]); k = atoi(argv[2]); tile_sizeX = atoi(argv[3]); string str; getline(fp,str); while(!isdigit(str[0])){ getline(fp,str); } istringstream is(str); is >> n_rows; is >> n_cols; is >> nnz; //fp >> n_rows >> n_cols >> nnz; long orig_nnz=nnz, rid=0,cid=0; float vid=0; int *rows = new int[nnz]; int *cols = new int[nnz]; float *vals = new float[nnz]; long idx=0; for (long o_idx = 0; o_idx < orig_nnz; ++o_idx) { fp >> rid >> cid >> vid; rows[idx]=rid-1; cols[idx]=cid-1; vals[idx]=vid; idx++; } cout << "Rows: "<<n_rows << " Cols: "<<n_cols <<" Nnz: "<< nnz << " tile-size: " << tile_sizeX<< " k: "<<k << endl; nnz=idx; init(rows, cols, vals); }
106dcb02de6a1184ae0abd21bf6ba2475142fad0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "_bcnn_forward_maxpool_layer_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int in_h = 1; int in_w = 1; int in_c = 1; int stride = 2; int size = XSIZE*YSIZE; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int *indexes = NULL; hipMalloc(&indexes, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( _bcnn_forward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride,size,input,output,indexes); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( _bcnn_forward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride,size,input,output,indexes); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( _bcnn_forward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride,size,input,output,indexes); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
106dcb02de6a1184ae0abd21bf6ba2475142fad0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "_bcnn_forward_maxpool_layer_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int in_h = 1; int in_w = 1; int in_c = 1; int stride = 2; int size = XSIZE*YSIZE; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int *indexes = NULL; cudaMalloc(&indexes, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); _bcnn_forward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride,size,input,output,indexes); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { _bcnn_forward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride,size,input,output,indexes); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { _bcnn_forward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride,size,input,output,indexes); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f833226aeb13fb8b370ba8bafcd5232add3eb468.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file is part of an experimental software implementation of * vertex-localized graph motif search for GPUs utilizing the constrained * multilinear sieving framework. * * The source code is subject to the following license. * * The MIT License (MIT) * * Copyright (c) 2017 P. Kaski, S. Thejaswi * Copyright (c) 2014 A. Bjrklund, P. Kaski, . Kowalik, J. Lauri * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #include<stdio.h> #include<stdlib.h> #include<assert.h> #include<time.h> #include<sys/utsname.h> #include<string.h> #include<stdarg.h> #include<assert.h> #include<ctype.h> #include<sys/time.h> #include<cuda.h> /**************************************************** Configuration & types. */ #define THREADS_IN_WARP 32 typedef long int index_t; // default to 64-bit indexing #include"gf.h" #include"ffprng.h" #define MAX_K 32 #define MAX_SHADES 32 #define BUILD_PARALLEL // do a parallel CPU build #ifdef BUILD_PARALLEL #define MAX_THREADS 128 #include<omp.h> #endif typedef unsigned int shade_map_t; index_t have_devices = 0; index_t devices = 0; /******************************************************************** Flags. */ index_t flag_bin_input = 0; // default to ASCII input /************************************************************ Common macros. */ /* Linked list navigation macros. */ #define pnlinknext(to,el) { (el)->next = (to)->next; (el)->prev = (to); (to)->next->prev = (el); (to)->next = (el); } #define pnlinkprev(to,el) { (el)->prev = (to)->prev; (el)->next = (to); (to)->prev->next = (el); (to)->prev = (el); } #define pnunlink(el) { (el)->next->prev = (el)->prev; (el)->prev->next = (el)->next; } #define pnrelink(el) { (el)->next->prev = (el); (el)->prev->next = (el); } /********************************************************** Error reporting. */ #define ERROR(...) error(__FILE__,__LINE__,__func__,__VA_ARGS__); static void error(const char *fn, int line, const char *func, const char *format, ...) { va_list args; va_start(args, format); fprintf(stderr, "ERROR [file = %s, line = %d]\n" "%s: ", fn, line, func); vfprintf(stderr, format, args); fprintf(stderr, "\n"); va_end(args); abort(); } /******************************************************** Get the host name. */ #define MAX_HOSTNAME 256 const char *sysdep_hostname(void) { static char hn[MAX_HOSTNAME]; struct utsname undata; uname(&undata); strcpy(hn, undata.nodename); return hn; } /******************************************************** Available threads. */ index_t num_threads(void) { #ifdef BUILD_PARALLEL return omp_get_max_threads(); #else return 1; #endif } /********************************************* Memory allocation & tracking. */ #define MALLOC(x) malloc_wrapper(x) #define FREE(x) free_wrapper(x) index_t malloc_balance = 0; struct malloc_track_struct { void *p; size_t size; struct malloc_track_struct *prev; struct malloc_track_struct *next; }; typedef struct malloc_track_struct malloc_track_t; malloc_track_t malloc_track_root; size_t malloc_total = 0; #define MEMTRACK_STACK_CAPACITY 256 size_t memtrack_stack[MEMTRACK_STACK_CAPACITY]; index_t memtrack_stack_top = -1; void *malloc_wrapper(size_t size) { if(malloc_balance == 0) { malloc_track_root.prev = &malloc_track_root; malloc_track_root.next = &malloc_track_root; } void *p = malloc(size); if(p == NULL) ERROR("malloc fails"); malloc_balance++; malloc_track_t *t = (malloc_track_t *) malloc(sizeof(malloc_track_t)); t->p = p; t->size = size; pnlinkprev(&malloc_track_root, t); malloc_total += size; for(index_t i = 0; i <= memtrack_stack_top; i++) if(memtrack_stack[i] < malloc_total) memtrack_stack[i] = malloc_total; return p; } void free_wrapper(void *p) { malloc_track_t *t = malloc_track_root.next; for(; t != &malloc_track_root; t = t->next) { if(t->p == p) break; } if(t == &malloc_track_root) ERROR("FREE issued on a non-tracked pointer %p", p); malloc_total -= t->size; pnunlink(t); free(t); free(p); malloc_balance--; } index_t *alloc_idxtab(index_t n) { index_t *t = (index_t *) MALLOC(sizeof(index_t)*n); return t; } void push_memtrack(void) { assert(memtrack_stack_top + 1 < MEMTRACK_STACK_CAPACITY); memtrack_stack[++memtrack_stack_top] = malloc_total; } size_t pop_memtrack(void) { assert(memtrack_stack_top >= 0); return memtrack_stack[memtrack_stack_top--]; } size_t current_mem(void) { return malloc_total; } double inGiB(size_t s) { return (double) s / (1 << 30); } void print_current_mem(void) { fprintf(stdout, "{curr: %.2lfGiB}", inGiB(current_mem())); fflush(stdout); } void print_pop_memtrack(void) { fprintf(stdout, "{peak: %.2lfGiB}", inGiB(pop_memtrack())); fflush(stdout); } /******************************************************* Timing subroutines. */ #define TIME_STACK_CAPACITY 256 double start_stack[TIME_STACK_CAPACITY]; index_t start_stack_top = -1; void push_time(void) { assert(start_stack_top + 1 < TIME_STACK_CAPACITY); #ifdef BUILD_PARALLEL start_stack[++start_stack_top] = omp_get_wtime(); #else start_stack[++start_stack_top] = (double) clock()/CLOCKS_PER_SEC; #endif } double pop_time(void) { #ifdef BUILD_PARALLEL double wstop = omp_get_wtime(); #else double wstop = (double) clock()/CLOCKS_PER_SEC; #endif assert(start_stack_top >= 0); double wstart = start_stack[start_stack_top--]; return (double) (1000.0*(wstop-wstart)); } /****************************************************************** Sorting. */ void shellsort(index_t n, index_t *a) { index_t h = 1; index_t i; for(i = n/3; h < i; h = 3*h+1) ; do { for(i = h; i < n; i++) { index_t v = a[i]; index_t j = i; do { index_t t = a[j-h]; if(t <= v) break; a[j] = t; j -= h; } while(j >= h); a[j] = v; } h /= 3; } while(h > 0); } #define LEFT(x) (x<<1) #define RIGHT(x) ((x<<1)+1) #define PARENT(x) (x>>1) void heapsort_indext(index_t n, index_t *a) { /* Shift index origin from 0 to 1 for convenience. */ a--; /* Build heap */ for(index_t i = 2; i <= n; i++) { index_t x = i; while(x > 1) { index_t y = PARENT(x); if(a[x] <= a[y]) { /* heap property ok */ break; } /* Exchange a[x] and a[y] to enforce heap property */ index_t t = a[x]; a[x] = a[y]; a[y] = t; x = y; } } /* Repeat delete max and insert */ for(index_t i = n; i > 1; i--) { index_t t = a[i]; /* Delete max */ a[i] = a[1]; /* Insert t */ index_t x = 1; index_t y, z; while((y = LEFT(x)) < i) { z = RIGHT(x); if(z < i && a[y] < a[z]) { index_t s = z; z = y; y = s; } /* Invariant: a[y] >= a[z] */ if(t >= a[y]) { /* ok to insert here without violating heap property */ break; } /* Move a[y] up the heap */ a[x] = a[y]; x = y; } /* Insert here */ a[x] = t; } } /****************************************************** Bitmap manipulation. */ void bitset(index_t *map, index_t j, index_t value) { assert((value & (~1UL)) == 0); map[j/64] = (map[j/64] & ~(1UL << (j%64))) | ((value&1) << (j%64)); } index_t bitget(index_t *map, index_t j) { return (map[j/64]>>(j%64))&1UL; } /************************************************** Random numbers and such. */ index_t irand(void) { return (((index_t) rand())<<31)^((index_t) rand()); } /**************************************************** (Parallel) prefix sum. */ index_t prefixsum(index_t n, index_t *a, index_t k) { #ifdef BUILD_PARALLEL index_t s[MAX_THREADS]; index_t nt = num_threads(); assert(nt < MAX_THREADS); index_t length = n; index_t block_size = length/nt; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? length-1 : (start+block_size-1); index_t tsum = (stop-start+1)*k; for(index_t u = start; u <= stop; u++) tsum += a[u]; s[t] = tsum; } index_t run = 0; for(index_t t = 1; t <= nt; t++) { index_t v = s[t-1]; s[t-1] = run; run += v; } s[nt] = run; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? length-1 : (start+block_size-1); index_t trun = s[t]; for(index_t u = start; u <= stop; u++) { index_t tv = a[u]; a[u] = trun; trun += tv + k; } assert(trun == s[t+1]); } #else index_t run = 0; for(index_t u = 0; u < n; u++) { index_t tv = a[u]; a[u] = run; run += tv + k; } #endif return run; } /*********************** Search for an interval of values in a sorted array. */ inline index_t get_interval(index_t n, index_t *a, index_t lo_val, index_t hi_val, index_t *iv_start, index_t *iv_end) { assert(n >= 0); if(n == 0) { *iv_start = 0; return 0; } assert(lo_val <= hi_val); // find first element in interval (if any) with binary search index_t lo = 0; index_t hi = n-1; // at or above lo, and at or below hi (if any) while(lo < hi) { index_t mid = (lo+hi)/2; // lo <= mid < hi index_t v = a[mid]; if(hi_val < v) { hi = mid-1; // at or below hi (if any) } else { if(v < lo_val) lo = mid+1; // at or above lo (if any), lo <= hi else hi = mid; // at or below hi (exists) } // 0 <= lo <= n-1 } if(a[lo] < lo_val || a[lo] > hi_val) { // array contains no values in interval if(a[lo] < lo_val) { lo++; assert(lo == n || a[lo+1] > hi_val); } else { assert(lo == 0 || a[lo-1] < lo_val); } *iv_start = lo; *iv_end = hi; return 0; } assert(lo_val <= a[lo] && a[lo] <= hi_val); *iv_start = lo; // find interval end (last index in interval) with binary search lo = 0; hi = n-1; // last index (if any) is at or above lo, and at or below hi while(lo < hi) { index_t mid = (lo+hi+1)/2; // lo < mid <= hi index_t v = a[mid]; if(hi_val < v) { hi = mid-1; // at or below hi, lo <= hi } else { if(v < lo_val) lo = mid+1; // at or above lo else lo = mid; // at or above lo, lo <= hi } } assert(lo == hi); *iv_end = lo; // lo == hi return 1+*iv_end-*iv_start; // return cut size } /********************************** Initialize an array with random scalars. */ void randinits_scalar(scalar_t *a, index_t s, ffprng_scalar_t seed) { ffprng_t base; FFPRNG_INIT(base, seed); index_t nt = num_threads(); index_t block_size = s/nt; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t t = 0; t < nt; t++) { ffprng_t gen; index_t start = t*block_size; index_t stop = (t == nt-1) ? s-1 : (start+block_size-1); FFPRNG_FWD(gen, start, base); for(index_t i = start; i <= stop; i++) { ffprng_scalar_t rnd; FFPRNG_RAND(rnd, gen); scalar_t rs = (scalar_t) rnd; a[i] = rs; } } } /********************************************************************* CUDA. */ /************************ CUDA error wrapper (adapted from CUDA By Example). */ #define CUDA_WRAP(err) (error_wrap(err,__FILE__,__LINE__)) static void error_wrap(hipError_t err, const char *fn, int line) { if(err != hipSuccess) { fprintf(stderr, "error [%s, line %d]: %s\n", fn, line, hipGetErrorString(err)); fflush(stderr); exit(EXIT_FAILURE); } } /******************************************** Parallel line-sum for the GPU. */ /* * The following kernel adapted [in particular, sans the commentary!] * from Mark Harris, "Optimizing Parallel Reduction in CUDA", NVIDIA * * http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf * */ template <index_t block_size> __device__ void device_line_psum_finish(volatile line_t *s, index_t a) { // Remarks: // // 1) // Observe the volatile decl above to instruct the compiler // *not* to reorder the share mem transactions below // // 2) // What is below takes simultaneously place for a = 0,1,...,31 // __in parallel__, all data now in s[0],s[1],...,s[63] // if(block_size >= 64) LINE_ADD(s[a],s[a],s[a + 32]); // ... now in s[0],s[1],...,s[31] if(block_size >= 32) LINE_ADD(s[a],s[a],s[a + 16]); // ... now in s[0],s[1],...,s[15] if(block_size >= 16) LINE_ADD(s[a],s[a],s[a + 8]); // ... now in s[0],s[1],...,s[7] if(block_size >= 8) LINE_ADD(s[a],s[a],s[a + 4]); // ... now in s[0],s[1],s[2],s[3] if(block_size >= 4) LINE_ADD(s[a],s[a],s[a + 2]); // ... now in s[0],s[1] if(block_size >= 2) LINE_ADD(s[a],s[a],s[a + 1]); // ... now in s[0] } template <index_t block_size> __global__ void device_line_psum_block(index_t dg, index_t q, index_t seg, line_array_t *d_in, line_array_t *d_out) { // Many a thread hereby commence their labours in this block ... index_t a = threadIdx.x; // my index inside my block index_t span = 2*block_size; // one block spans *twice* the data index_t major = (index_t) blockIdx.x+blockIdx.y*gridDim.x; index_t i = major*span + a; // accumulate from here ... index_t i_end = i + q; // ... to here (exclusive) index_t stride = span*dg; // ... with a stride that isolates // us from whatever the // __other__ blocks are doing, // asynchronously extern __shared__ line_t s[]; // cells for me and my mates // (in my block); my cell is s[a], // I shall write to no other cells // (except at init) // Start my work, my brave mates working in parallel with me ... line_t sum; LINE_SET_ZERO(sum); while(i < i_end) { line_t t1, t2; LINE_LOAD(t1, d_in, seg, i); LINE_LOAD(t2, d_in, seg, i + block_size); // span twice the data LINE_ADD(t1, t1, t2); LINE_ADD(sum, sum, t1); i += stride; // ... stride past all the other blocks } LINE_MOV(s[a], sum); LINE_SET_ZERO(s[a+block_size]); // small inputs may refer here, so zero it __syncthreads(); // sync with my mates // All data now in s[0],s[1],...,s[min(511,block_size)] if(block_size >= 512) { if(a < 256) { LINE_ADD(s[a],s[a],s[a + 256]); } __syncthreads(); } // All data now in s[0],s[1],...,s[min(255,block_size)] if(block_size >= 256) { if(a < 128) { LINE_ADD(s[a],s[a],s[a + 128]); } __syncthreads(); } // All data now in s[0],s[1],...,s[min(127,block_size)] if(block_size >= 128) { if(a < 64) { LINE_ADD(s[a],s[a],s[a + 64]); } __syncthreads(); } // All data now in s[0],s[1],...,s[min(63,block_size)] if(a < 32) { // Most of my mates are done, but I remain in the wrap-up detail ... device_line_psum_finish<block_size>(s, a); } if(a == 0) { // Ha! I get to output all the efforts due to me and my mates ... LINE_STORE(d_out, seg, major, s[0]); } } __global__ void device_lastp_line(index_t p, index_t seg, line_array_t *d_in, scalar_t *d_sum_out, index_t zero_acc) { index_t v = blockDim.x*((index_t) blockIdx.x+blockIdx.y*gridDim.x)+threadIdx.x; if(v < p) { line_t l; LINE_LOAD(l, d_in, seg, v); scalar_t t; LINE_SUM(t, l); if(zero_acc == 0) { d_sum_out[v] ^= t; } else { d_sum_out[v] = t; } } } void driver_line_psum(index_t p, index_t l, index_t seg, line_array_t *d_s0, line_array_t *d_s1, scalar_t *d_sum, index_t zero_acc) { index_t n = l; // number of lines to sum up index_t pref_threads = 512; // preferred threads per block // (must be a power of 2) while(n > 1) { index_t dg, db; size_t sm; if(n >= 2*pref_threads) { db = pref_threads; dg = n/(2*db); // one block spans _twice_ the data } else { db = n/2; // one block spans _twice_ the data dg = 1; } sm = sizeof(line_t)*db*2; // enough share mem to span twice the threads index_t pdg = p*dg; // Create a 2D grid to satisfy GPU hardware index_t pdgx = pdg >= (1 << 16) ? (1 << 15) : pdg; index_t pdgy = pdg >= (1 << 16) ? pdg / (1 << 15) : 1; dim3 pdg2(pdgx,pdgy); dim3 db2(db,1); switch(db) { case 1024: hipLaunchKernelGGL(( device_line_psum_block<1024>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 512: hipLaunchKernelGGL(( device_line_psum_block<512>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 256: hipLaunchKernelGGL(( device_line_psum_block<256>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 128: hipLaunchKernelGGL(( device_line_psum_block<128>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 64: hipLaunchKernelGGL(( device_line_psum_block< 64>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 32: hipLaunchKernelGGL(( device_line_psum_block< 32>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 16: hipLaunchKernelGGL(( device_line_psum_block< 16>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 8: hipLaunchKernelGGL(( device_line_psum_block< 8>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 4: hipLaunchKernelGGL(( device_line_psum_block< 4>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 2: hipLaunchKernelGGL(( device_line_psum_block< 2>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; case 1: hipLaunchKernelGGL(( device_line_psum_block< 1>), dim3(pdg2),dim3(db2),sm, 0, dg,n,seg,d_s0,d_s1); break; default: assert(0); break; } hipDeviceSynchronize(); CUDA_WRAP(hipGetLastError()); n = dg; /* Restore invariant. */ line_array_t *save = d_s0; d_s0 = d_s1; d_s1 = save; } /* Invariant: Input (= Output) is in d_s0. */ /* Sum up the last lines to a scalar. */ index_t dbl = 1; index_t dgl = 1; while(dbl < p) dbl *= 2; if(dbl > pref_threads) { dgl = dbl / pref_threads; dbl = pref_threads; } index_t dglx = dgl >= (1 << 16) ? (1 << 15) : dgl; index_t dgly = dgl >= (1 << 16) ? dgl / (1 << 15) : 1; dim3 dgl2(dglx,dgly); dim3 dbl2(dbl,1); hipLaunchKernelGGL(( device_lastp_line), dim3(dgl2),dim3(dbl2), 0, 0, p, seg, d_s0, d_sum, zero_acc); hipDeviceSynchronize(); CUDA_WRAP(hipGetLastError()); } /************************** Init shade variables for sieve (host-side init). */ void init_shades(index_t n, index_t n0, index_t k, index_t num_shades, shade_map_t *h_s, ffprng_scalar_t seed, scalar_t *h_z) { assert(num_shades <= MAX_SHADES); scalar_t wdj[k*k]; ffprng_t base; FFPRNG_INIT(base, seed); for(index_t i = 0; i < k; i++) { for(index_t j = 0; j < k; j++) { ffprng_scalar_t rnd; FFPRNG_RAND(rnd, base); wdj[i*k+j] = (scalar_t) rnd; } } index_t nt = num_threads(); index_t block_size = n/nt; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t t = 0; t < nt; t++) { ffprng_t gen; index_t start = t*block_size; index_t stop = (t == nt-1) ? n-1 : (start+block_size-1); FFPRNG_FWD(gen, k*start, base); for(index_t i = start; i <= stop; i++) { if(i < n0) { scalar_t vi[k]; shade_map_t shades_u = h_s[i]; for(index_t j = 0; j < k; j++) { ffprng_scalar_t rnd; FFPRNG_RAND(rnd, gen); scalar_t rs = (scalar_t) rnd; rs = rs & (-((scalar_t)((shades_u >> j)&(j < num_shades)))); vi[j] = rs; } for(index_t j = 0; j < k; j++) { scalar_t uj = 0; for(index_t d = 0; d < k; d++) { scalar_t ln = 0; REF_SCALAR_MUL(ln, wdj[j*k+d], vi[d]); // SMUL [host]: n0*k*k REF_SCALAR_ADD(uj, uj, ln); } h_z[i*k+j] = uj; // SW [host]: n0*k } } else { for(index_t j = 0; j < k; j++) h_z[i*k+j] = 0; // SW [host]: (n-n0)*k } } } // total SW: n*k // total SMUL: n0*k*k } /****************************************************************** Sieving. */ #define LINE_IDX(n, gl, l, u, a) ((((l)-1)*(n)*(gl))+((u)*(gl))+(a)) __global__ void device_constrained_sieve_pre(index_t n, index_t k, index_t gl, index_t seg, index_t pfx, scalar_t *d_z, line_array_t *d_s) { index_t job = blockDim.x*(blockIdx.x+blockIdx.y*gridDim.x)+threadIdx.x; index_t u = job/gl; index_t a = job%gl; index_t aa = pfx + a*SCALARS_IN_LINE; line_t ln; LINE_SET_ZERO(ln); for(index_t j = 0; j < SCALARS_IN_LINE; j++) { index_t aaj = aa+j; scalar_t xuaaj; SCALAR_SET_ZERO(xuaaj); for(index_t l = 0; l < k; l++) { scalar_t z_ul = d_z[u*k+l]; // SR [warp, cached]: n*k z_ul = z_ul & (-(((aaj) >> l)&1)); SCALAR_ADD(xuaaj, xuaaj, z_ul); } LINE_STORE_SCALAR(ln, j, xuaaj); } index_t l1ua = LINE_IDX(n, gl, 1, u, a); LINE_STORE(d_s, seg, l1ua, ln); // LW: n*gl // total SR: n*k // total LW: n*gl } void driver_constrained_sieve_pre(index_t n, index_t k, index_t gl, index_t seg, index_t pfx, index_t dg, index_t db, scalar_t *d_z, line_array_t *d_s) { // Create a 2D grid to satisfy GPU hardware index_t dgx = dg >= (1 << 16) ? (1 << 15) : dg; index_t dgy = dg >= (1 << 16) ? dg / (1 << 15) : 1; dim3 dg2(dgx,dgy); dim3 db2(db,1); hipLaunchKernelGGL(( device_constrained_sieve_pre), dim3(dg2),dim3(db2), 0, 0, n,k,gl,seg,pfx,d_z,d_s); hipDeviceSynchronize(); CUDA_WRAP(hipGetLastError()); } /********************************** Generating function for k-arborescences. */ #ifndef VERTICES_PER_GENF_THREAD #define VERTICES_PER_GENF_THREAD 1 // must be a power of 2 #endif __global__ void device_karb_genf_round(index_t n, index_t l, index_t k, index_t gl, index_t b, index_t seg, index_t *d_pos, index_t *d_adj, scalar_t *d_y, line_array_t *d_s #ifdef GF_LOG_EXP_LOOKUP , scalar_t *d_lookup_log, scalar_t *d_lookup_exp #endif ) { index_t job = blockDim.x*(blockIdx.x+blockIdx.y*gridDim.x)+threadIdx.x; index_t a = job % gl; index_t u_start = (job / gl) * VERTICES_PER_GENF_THREAD; index_t u_end = u_start + VERTICES_PER_GENF_THREAD - 1; #pragma unroll 1 for(index_t u = u_start; u <= u_end; u++) { index_t p = d_pos[u]; // IR [warp]: (k-1)*n index_t deg = d_adj[p]; // IR [warp]: (k-1)*n line_t p_lu; LINE_SET_ZERO(p_lu); #pragma unroll 1 for(index_t j = 1; j <= deg; j++) { index_t v = d_adj[p+j]; // IR [warp, cached]: (k-1)*m line_t p_luv; LINE_SET_ZERO(p_luv); #pragma unroll 1 for(index_t l1 = 1; l1 < l; l1++) { // \sum_{l=2}^k \sum_{l1=1}^{l-1} 1 // = \sum_{l=2}^k (l-1) // = k(k-1)/2 index_t l2 = l-l1; index_t l1u = LINE_IDX(n, gl, l1, u, a); line_t p_l1u; LINE_LOAD(p_l1u, d_s, seg, l1u); // LR: m*gl*k(k-1)/2 index_t l2v = LINE_IDX(n, gl, l2, v, a); line_t p_l2v; LINE_LOAD(p_l2v, d_s, seg, l2v); // LR: m*gl*k(k-1)/2 line_t p_l1u_l2v; LINE_MUL(p_l1u_l2v, p_l1u, p_l2v); // LMUL: m*gl*k(k-1)/2 LINE_ADD(p_luv, p_luv, p_l1u_l2v); } scalar_t y_luv = d_y[(l-1)*b+p+j]; // SR [warp, cached]: (k-1)*m line_t res; LINE_MUL_SCALAR(res, p_luv, y_luv); // LMUL: m*gl*(k-1) LINE_ADD(p_lu, p_lu, res); } index_t lu = LINE_IDX(n, gl, l, u, a); LINE_STORE(d_s, seg, lu, p_lu); // LW: n*gl*(k-1) } // total IR: 2*(k-1)*n+(k-1)*m // total SR: (k-1)*m // total LR+LW: m*gl*k(k-1) + n*gl*(k-1) // total LMUL: m*gl*k(k-1)/2 + m*gl*(k-1) } line_array_t *driver_karb_genf(index_t n, index_t k, index_t gl, index_t b, index_t seg, index_t dg, index_t db, index_t *d_pos, index_t *d_adj, scalar_t *d_y, line_array_t *d_s #ifdef GF_LOG_EXP_LOOKUP , scalar_t *d_lookup_log, scalar_t *d_lookup_exp #endif ) { // Create a 2D grid to satisfy GPU hardware index_t dgx = dg >= (1 << 16) ? (1 << 15) : dg; index_t dgy = dg >= (1 << 16) ? dg / (1 << 15) : 1; dim3 dg2(dgx,dgy); dim3 db2(db,1); assert(k >= 1); if(k >= 2) { for(index_t l = 2; l <= k; l++) { hipLaunchKernelGGL(( device_karb_genf_round), dim3(dg2),dim3(db2), 0, 0, n, l, k, gl, b, seg, d_pos, d_adj, d_y, d_s #ifdef GF_LOG_EXP_LOOKUP , d_lookup_log, d_lookup_exp #endif ); hipDeviceSynchronize(); CUDA_WRAP(hipGetLastError()); } } return d_s; } /********************************************** Stub to warm up GPU devices. */ void lightup_stub(void) { fprintf(stdout, "lightup: "); push_time(); index_t n = 1024; index_t seed = 123456789; double time0; /* Figure out how many devices we have. */ int d_cnt; hipGetDeviceCount(&d_cnt); index_t d_use = d_cnt; if(have_devices) { if(devices > d_cnt) ERROR("only %d CUDA devices available, request for %ld devices", d_cnt, devices); d_use = devices; } fprintf(stdout, "{dev:%ld:%d}", d_use, d_cnt); fflush(stdout); index_t d_num[d_use]; for(index_t d = 0; d < d_use; d++) d_num[d] = d; /* Allocate space in host memory. */ scalar_t *h_x = (scalar_t *) MALLOC(n*sizeof(scalar_t)); randinits_scalar(h_x, n, seed); scalar_t *d_x[d_use]; /* Now light up the hardware. */ push_time(); fprintf(stdout, " {malloc:"); /* Allocate space in device memory and copy input. */ for(int d = 0; d < d_use; d++) { push_time(); hipSetDevice(d_num[d]); double time1 = pop_time(); push_time(); /* Set up space in device memory. */ CUDA_WRAP(hipMalloc(&d_x[d], n*sizeof(scalar_t))); double time2 = pop_time(); push_time(); /* Upload input to device. */ CUDA_WRAP(hipMemcpy(d_x[d], h_x, n*sizeof(scalar_t), hipMemcpyHostToDevice)); double time3 = pop_time(); fprintf(stdout, " [%d %.2lfms %.2lfms %.2lfms]", d, time1, time2, time3); } time0 = pop_time(); fprintf(stdout, " %.2lfms}", time0); /* Free working space in host memory. */ FREE(h_x); push_time(); fprintf(stdout, " {free:"); /* Free device memory. */ for(int d = 0; d < d_use; d++) { push_time(); hipSetDevice(d_num[d]); double time1 = pop_time(); push_time(); CUDA_WRAP(hipFree(d_x[d])); double time2 = pop_time(); fprintf(stdout, " [%d %.2lfms %.2lfms]", d, time1, time2); } time0 = pop_time(); fprintf(stdout, " %.2lfms}", time0); time0 = pop_time(); fprintf(stdout, " [%.2lfms]\n", time0); fflush(stdout); } /******************************************************* The k-motif oracle. */ #if defined(GPU_M2090) #define PEAK_ALLOC_FOR_DEVICE (3*((size_t) 1 << 30)) // 3 GiB peak line array allocation for the M2090 #elif defined(GPU_K40) || defined(GPU_K80) #define PEAK_ALLOC_FOR_DEVICE (3*((size_t) 1 << 30)) // 3 GiB peak line array allocation for the K40 #elif defined(GPU_P100) #define PEAK_ALLOC_FOR_DEVICE (16*((size_t) 1 << 30)) // 16 GiB peak line array allocation for the P100 #else #error "choose one of GPU_M2090 or GPU_K40 or GPU_K80 or GPU_P100" #endif index_t oracle(index_t n0, index_t k, index_t *h_pos, index_t *h_adj, index_t num_shades, shade_map_t *h_s, index_t seed, scalar_t *master_vsum) { assert(k < 31); assert(n0 > 0); index_t m0 = h_pos[n0-1]+h_adj[h_pos[n0-1]]+1-n0; index_t b0 = n0+m0; index_t n = 1; while(n < n0) n = n*2; index_t m = m0; index_t b = n+m; /* Invariant: n must be a power of two. */ /* Figure out how many devices we have. */ int d_cnt; hipGetDeviceCount(&d_cnt); index_t d_use = d_cnt; if(have_devices) { if(devices > d_cnt) ERROR("only %d CUDA devices available, request for %ld devices", d_cnt, devices); d_use = devices; } index_t d_num[d_use]; for(index_t d = 0; d < d_use; d++) d_num[d] = d; /* Allocate work to devices. */ index_t sum_size = 1 << k; assert(SCALARS_IN_LINE <= sum_size); index_t g = sum_size; // g scalars of work g /= d_use; assert(g*d_use == sum_size); while(LINE_ARRAY_SIZE((size_t) k*n*g) > PEAK_ALLOC_FOR_DEVICE) g /= 2; assert(g >= SCALARS_IN_LINE); index_t outer = sum_size / g; // number of iterations for outer loop index_t gl = g / SCALARS_IN_LINE; // gl scalar-lines of work index_t num_processors = 16; // should be a power of 2 index_t max_block = 32; // should be a power of 2 index_t work = n*gl; index_t work_per_processor = work / num_processors; index_t dg, db; if(work_per_processor < THREADS_IN_WARP) { dg = work / THREADS_IN_WARP; db = THREADS_IN_WARP; } else { db = work / num_processors; if(db > max_block) db = max_block; dg = work / db; } assert(dg >= 1); // must have enough work assert(db >= THREADS_IN_WARP); /* Invariant: n*gl == work == dg*db */ assert(dg % VERTICES_PER_GENF_THREAD == 0); /* Light up all devices and avoid cold start. */ lightup_stub(); /* Start timing. */ float time; hipEvent_t start, stop; hipSetDevice(d_num[0]); CUDA_WRAP(hipEventCreate(&start)); CUDA_WRAP(hipEventCreate(&stop)); CUDA_WRAP(hipEventRecord(start, 0)); /* Allocate working space in host memory. */ scalar_t *h_vsum = (scalar_t *) MALLOC(n*sizeof(scalar_t)); scalar_t *h_y = (scalar_t *) MALLOC(b*k*sizeof(scalar_t)); scalar_t *h_z = (scalar_t *) MALLOC(n*k*sizeof(scalar_t)); index_t *h_pospad = (index_t *) MALLOC((n-n0)*sizeof(index_t)); index_t *h_adjpad = (index_t *) MALLOC((b-b0)*sizeof(index_t)); /* Init & set up padding. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n; i++) h_vsum[i] = 0; init_shades(n, n0, k, num_shades, h_s, seed, h_z); randinits_scalar(h_y, b*k, seed); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n-n0; i++) h_pospad[i] = b0+i; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < b-b0; i++) h_adjpad[i] = 0; index_t *d_pos[d_use]; index_t *d_adj[d_use]; scalar_t *d_y[d_use]; scalar_t *d_z[d_use]; scalar_t *d_sum_out[d_use]; line_array_t *d_s[d_use]; scalar_t *d_vsum[d_use]; #ifdef GF_LOG_EXP_LOOKUP scalar_t *d_lookup_log[d_use]; scalar_t *d_lookup_exp[d_use]; gf_precompute_exp_log(); #endif index_t seg = LINE_SEGMENT_SIZE(k*n*g); /* Prepare input for each available device. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(int d = 0; d < d_use; d++) { hipSetDevice(d_num[d]); /* Set up inputs and scratch space in device memory. */ CUDA_WRAP(hipMalloc(&d_pos[d], n*sizeof(index_t))); CUDA_WRAP(hipMalloc(&d_adj[d], b*sizeof(index_t))); CUDA_WRAP(hipMalloc(&d_y[d], b*k*sizeof(scalar_t))); CUDA_WRAP(hipMalloc(&d_z[d], n*k*sizeof(scalar_t))); CUDA_WRAP(hipMalloc(&d_sum_out[d], sizeof(scalar_t))); CUDA_WRAP(hipMalloc(&d_vsum[d], n*sizeof(scalar_t))); CUDA_WRAP(hipMalloc(&d_s[d], LINE_ARRAY_SIZE(k*n*g))); /* Upload input to device. */ CUDA_WRAP(hipMemcpy(d_pos[d], h_pos, n0*sizeof(index_t), hipMemcpyHostToDevice)); CUDA_WRAP(hipMemcpy(d_adj[d], h_adj, b0*sizeof(index_t), hipMemcpyHostToDevice)); CUDA_WRAP(hipMemcpy(d_y[d], h_y, b*k*sizeof(scalar_t), hipMemcpyHostToDevice)); CUDA_WRAP(hipMemcpy(d_z[d], h_z, n*k*sizeof(scalar_t), hipMemcpyHostToDevice)); CUDA_WRAP(hipMemcpy(d_pos[d] + n0, h_pospad, (n-n0)*sizeof(index_t), hipMemcpyHostToDevice)); CUDA_WRAP(hipMemcpy(d_adj[d] + b0, h_adjpad, (b-b0)*sizeof(index_t), hipMemcpyHostToDevice)); #ifdef GF_LOG_EXP_LOOKUP CUDA_WRAP(hipMalloc(&d_lookup_log[d], GF_LOG_LOOKUP_SIZE)); CUDA_WRAP(hipMalloc(&d_lookup_exp[d], GF_EXP_LOOKUP_SIZE)); CUDA_WRAP(hipMemcpy(d_lookup_log[d], h_lookup_log, GF_LOG_LOOKUP_SIZE, hipMemcpyHostToDevice)); CUDA_WRAP(hipMemcpy(d_lookup_exp[d], h_lookup_exp, GF_EXP_LOOKUP_SIZE, hipMemcpyHostToDevice)); #endif } /* Free working space in host memory. */ FREE(h_y); FREE(h_z); FREE(h_pospad); FREE(h_adjpad); scalar_t master_sum; SCALAR_SET_ZERO(master_sum); /* Now run the work, in parallel on each device. * Use CPU-side multithreading for parallel launch. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(int d = 0; d < d_use; d++) { hipSetDevice(d_num[d]); for(index_t out = d; out < outer; out = (out + d_use)) { driver_constrained_sieve_pre(n, k, gl, seg, g*out, dg, db, d_z[d], d_s[d]); line_array_t *d_g = driver_karb_genf(n, k, gl, b, seg, dg/VERTICES_PER_GENF_THREAD, db, d_pos[d], d_adj[d], d_y[d], d_s[d] #ifdef GF_LOG_EXP_LOOKUP , d_lookup_log[d], d_lookup_exp[d] #endif ); driver_line_psum(n, gl, seg, d_g + (k-1)*n*gl, d_g, d_vsum[d], out == d ? 1 : 0); } } for(int d = 0; d < d_use; d++) { hipSetDevice(d_num[d]); CUDA_WRAP(hipMemcpy(h_vsum, d_vsum[d], n*sizeof(scalar_t), hipMemcpyDeviceToHost)); if(d == 0) { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n0; i++) master_vsum[i] = h_vsum[i]; } else { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n0; i++) master_vsum[i] ^= h_vsum[i]; } } for(int i = 0; i < n0; i++) REF_SCALAR_ADD(master_sum, master_sum, master_vsum[i]); /* Stop timing. */ hipSetDevice(d_num[0]); CUDA_WRAP(hipEventRecord(stop, 0)); CUDA_WRAP(hipEventSynchronize(stop)); CUDA_WRAP(hipEventElapsedTime(&time, start, stop)); /* All done, now print out some statistics. */ // total IR: 2*(k-1)*n+(k-1)*m (genf) // total SW: n*k (host init) // total SR: n*k (pre) // total SR: (k-1)*m (genf) // total LW: n*gl (pre) // total LR+LW: m*gl*k(k-1) + n*gl*(k-1) (genf) // total LR: n*gl (sum) // total SMUL: n0*k*k (host init) // total LMUL: m*gl*k(k-1)/2 + m*gl*(k-1) (genf) double line_rw_inner = (double) m*gl*k*(k-1) + n*gl*(k-1) + 2*n*gl; double line_mul_inner = (double) m*gl*k*(k-1)/2 + m*gl*(k-1); double line_rw_total = line_rw_inner*outer; double line_mul_total = line_mul_inner*outer; double bytes_rw_total = EFFECTIVE_BYTES_IN_LINE*line_rw_total; double scalar_mul_total = line_mul_total*SCALARS_IN_LINE; double rw_rate = bytes_rw_total / (time/1000.0); double mul_rate = scalar_mul_total / time; double total_instr_in_mul = LINE_MUL_INSTR*scalar_mul_total/SCALARS_IN_LINE; double instr_in_mul_rate = total_instr_in_mul / time; fprintf(stdout, "oracle: {dev:%ld:%ld} " SCALAR_FORMAT_STRING " %10.2fms [%6.3lfGiB %7.2lfGiB/s %7.2lfGHz %7.2fGHz] %ld %d", d_use, d_cnt, (scalar_t) master_sum, time, inGiB(LINE_ARRAY_SIZE(k*n*g)+ n*sizeof(index_t)+ b*sizeof(index_t)+ b*k*sizeof(scalar_t)+ n*k*sizeof(scalar_t)+ n*sizeof(scalar_t)+ sizeof(scalar_t)), rw_rate/((double)(1<<30)), mul_rate/((double)1e6), instr_in_mul_rate/((double) 1e6), gl, master_sum != 0); fflush(stdout); /* Free host memory. */ FREE(h_vsum); /* Free device memory. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(int d = 0; d < d_use; d++) { hipSetDevice(d_num[d]); CUDA_WRAP(hipFree(d_pos[d])); CUDA_WRAP(hipFree(d_adj[d])); CUDA_WRAP(hipFree(d_y[d])); CUDA_WRAP(hipFree(d_z[d])); CUDA_WRAP(hipFree(d_s[d])); CUDA_WRAP(hipFree(d_vsum[d])); CUDA_WRAP(hipFree(d_sum_out[d])); #ifdef PRECOMPUTE_GF_2_8 CUDA_WRAP(hipFree(d_lookup_log[d])); CUDA_WRAP(hipFree(d_lookup_exp[d])); #endif } return master_sum != 0; } /***************************************************************** End CUDA. */ /************************************************ Rudimentary graph builder. */ typedef struct { index_t num_vertices; index_t num_edges; index_t edge_capacity; index_t *edges; index_t *colors; } graph_t; static index_t *enlarge(index_t m, index_t m_was, index_t *was) { assert(m >= 0 && m_was >= 0); index_t *a = (index_t *) MALLOC(sizeof(index_t)*m); index_t i; if(was != (void *) 0) { for(i = 0; i < m_was; i++) { a[i] = was[i]; } FREE(was); } return a; } graph_t *graph_alloc(index_t n) { assert(n >= 0); index_t i; graph_t *g = (graph_t *) MALLOC(sizeof(graph_t)); g->num_vertices = n; g->num_edges = 0; g->edge_capacity = 100; g->edges = enlarge(2*g->edge_capacity, 0, (index_t *) 0); g->colors = (index_t *) MALLOC(sizeof(index_t)*n); for(i = 0; i < n; i++) g->colors[i] = -1; return g; } void graph_free(graph_t *g) { FREE(g->edges); FREE(g->colors); FREE(g); } void graph_add_edge(graph_t *g, index_t u, index_t v) { assert(u >= 0 && v >= 0 && u < g->num_vertices && v < g->num_vertices); if(g->num_edges == g->edge_capacity) { g->edges = enlarge(4*g->edge_capacity, 2*g->edge_capacity, g->edges); g->edge_capacity *= 2; } assert(g->num_edges < g->edge_capacity); index_t *e = g->edges + 2*g->num_edges; g->num_edges++; e[0] = u; e[1] = v; } index_t *graph_edgebuf(graph_t *g, index_t cap) { g->edges = enlarge(2*g->edge_capacity+2*cap, 2*g->edge_capacity, g->edges); index_t *e = g->edges + 2*g->num_edges; g->edge_capacity += cap; g->num_edges += cap; return e; } void graph_set_color(graph_t *g, index_t u, index_t c) { assert(u >= 0 && u < g->num_vertices && c >= 0); g->colors[u] = c; } /************************************ Basic motif query processing routines. */ struct motifq_struct { index_t is_stub; index_t n; index_t k; index_t *pos; index_t *adj; index_t nl; index_t *l; index_t ns; shade_map_t *shade; scalar_t *vsum; }; typedef struct motifq_struct motifq_t; void adjsort(index_t n, index_t *pos, index_t *adj) { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) { index_t pu = pos[u]; index_t deg = adj[pu]; heapsort_indext(deg, adj + pu + 1); } } void motifq_free(motifq_t *q) { if(!q->is_stub) { FREE(q->pos); FREE(q->adj); FREE(q->l); FREE(q->shade); FREE(q->vsum); } FREE(q); } index_t motifq_execute(motifq_t *q) { if(q->is_stub) return 0; return oracle(q->n, q->k, q->pos, q->adj, q->ns, q->shade, irand(), q->vsum); } /************** Project a query by cutting out a given interval of vertices. */ index_t get_poscut(index_t n, index_t *pos, index_t *adj, index_t lo_v, index_t hi_v, index_t *poscut) { // Note: assumes the adjacency lists are sorted assert(lo_v <= hi_v); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < lo_v; u++) { index_t pu = pos[u]; index_t deg = adj[pu]; index_t cs, ce; index_t l = get_interval(deg, adj + pu + 1, lo_v, hi_v, &cs, &ce); poscut[u] = deg - l; } #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = hi_v+1; u < n; u++) { index_t pu = pos[u]; index_t deg = adj[pu]; index_t cs, ce; index_t l = get_interval(deg, adj + pu + 1, lo_v, hi_v, &cs, &ce); poscut[u-hi_v-1+lo_v] = deg - l; } index_t ncut = n - (hi_v-lo_v+1); index_t run = prefixsum(ncut, poscut, 1); return run; } motifq_t *motifq_cut(motifq_t *q, index_t lo_v, index_t hi_v) { // Note: assumes the adjacency lists are sorted index_t n = q->n; index_t *pos = q->pos; index_t *adj = q->adj; assert(0 <= lo_v && lo_v <= hi_v && hi_v < n); // Fast-forward a stub NO when the interval // [lo_v,hi_v] contains an element in q->l for(index_t i = 0; i < q->nl; i++) { if(q->l[i] >= lo_v && q->l[i] <= hi_v) { motifq_t *qs = (motifq_t *) MALLOC(sizeof(motifq_t)); qs->is_stub = 1; return qs; } } index_t ncut = n - (hi_v-lo_v+1); index_t *poscut = alloc_idxtab(ncut); index_t bcut = get_poscut(n, pos, adj, lo_v, hi_v, poscut); index_t *adjcut = alloc_idxtab(bcut); index_t gap = hi_v-lo_v+1; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < ncut; v++) { index_t u = v; if(u >= lo_v) u += gap; index_t pu = pos[u]; index_t degu = adj[pu]; index_t cs, ce; index_t l = get_interval(degu, adj + pu + 1, lo_v, hi_v, &cs, &ce); index_t pv = poscut[v]; index_t degv = degu - l; adjcut[pv] = degv; // could parallelize this too for(index_t i = 0; i < cs; i++) adjcut[pv + 1 + i] = adj[pu + 1 + i]; // could parallelize this too for(index_t i = cs; i < degv; i++) adjcut[pv + 1 + i] = adj[pu + 1 + i + l] - gap; } motifq_t *qq = (motifq_t *) MALLOC(sizeof(motifq_t)); qq->is_stub = 0; qq->n = ncut; qq->k = q->k; qq->pos = poscut; qq->adj = adjcut; qq->nl = q->nl; qq->l = (index_t *) MALLOC(sizeof(index_t)*qq->nl); for(index_t i = 0; i < qq->nl; i++) { index_t u = q->l[i]; assert(u < lo_v || u > hi_v); if(u > hi_v) u -= gap; qq->l[i] = u; } qq->ns = q->ns; qq->shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*ncut); for(index_t v = 0; v < ncut; v++) { index_t u = v; if(u >= lo_v) u += gap; qq->shade[v] = q->shade[u]; } qq->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*qq->n); return qq; } /***************** Project a query with given projection & embedding arrays. */ #define PROJ_UNDEF 0xFFFFFFFFFFFFFFFFUL index_t get_posproj(index_t n, index_t *pos, index_t *adj, index_t nproj, index_t *proj, index_t *embed, index_t *posproj) { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < nproj; v++) { index_t u = embed[v]; index_t pu = pos[u]; index_t deg = adj[pu]; index_t degproj = 0; for(index_t i = 0; i < deg; i++) { index_t w = proj[adj[pu + 1 + i]]; if(w != PROJ_UNDEF) degproj++; } posproj[v] = degproj; } index_t run = prefixsum(nproj, posproj, 1); return run; } motifq_t *motifq_project(motifq_t *q, index_t nproj, index_t *proj, index_t *embed, index_t nl, index_t *l) { index_t n = q->n; index_t *pos = q->pos; index_t *adj = q->adj; index_t *posproj = alloc_idxtab(nproj); index_t bproj = get_posproj(n, pos, adj, nproj, proj, embed, posproj); index_t *adjproj = alloc_idxtab(bproj); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < nproj; v++) { index_t pv = posproj[v]; index_t u = embed[v]; index_t pu = pos[u]; index_t deg = adj[pu]; index_t degproj = 0; for(index_t i = 0; i < deg; i++) { index_t w = proj[adj[pu + 1 + i]]; if(w != PROJ_UNDEF) adjproj[pv + 1 + degproj++] = w; } adjproj[pv] = degproj; } motifq_t *qq = (motifq_t *) MALLOC(sizeof(motifq_t)); qq->is_stub = 0; qq->n = nproj; qq->k = q->k; qq->pos = posproj; qq->adj = adjproj; // Now project the l array assert(q->nl == 0); // l array comes from lister qq->nl = nl; qq->l = (index_t *) MALLOC(sizeof(index_t)*nl); for(index_t i = 0; i < nl; i++) { index_t u = proj[l[i]]; assert(u != PROJ_UNDEF); // query is a trivial NO ! qq->l[i] = u; } // Next set up the projected shades qq->ns = q->ns; qq->shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*nproj); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) { index_t v = proj[u]; if(v != PROJ_UNDEF) qq->shade[v] = q->shade[u]; } // Reserve a unique shade to every vertex in l // while keeping the remaining shades available // Reserve shades first ... index_t *l_shade = (index_t *) MALLOC(sizeof(index_t)*nl); shade_map_t reserved_shades = 0; for(index_t i = 0; i < nl; i++) { index_t v = qq->l[i]; index_t j = 0; for(; j < qq->ns; j++) if(((qq->shade[v] >> j)&1) == 1 && ((reserved_shades >> j)&1) == 0) break; assert(j < qq->ns); reserved_shades |= 1UL << j; l_shade[i] = j; } // ... then clear all reserved shades in one pass #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < nproj; v++) qq->shade[v] &= ~reserved_shades; // ... and finally set reserved shades for(index_t i = 0; i < nl; i++) { index_t v = qq->l[i]; qq->shade[v] = 1UL << l_shade[i]; } FREE(l_shade); qq->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*qq->n); return qq; } /*************************************************** The interval extractor. */ struct ivlist_struct { index_t start; index_t end; struct ivlist_struct *prev; struct ivlist_struct *next; }; typedef struct ivlist_struct ivlist_t; typedef struct ivext_struct { index_t n; index_t k; ivlist_t *queue; ivlist_t *active_queue_head; ivlist_t *spare_queue_head; ivlist_t *embed_list; } ivext_t; void ivext_enqueue_spare(ivext_t *e, ivlist_t *iv) { pnlinknext(e->spare_queue_head,iv); } void ivext_enqueue_active(ivext_t *e, ivlist_t *iv) { pnlinkprev(e->active_queue_head,iv); } ivlist_t *ivext_dequeue_first_nonsingleton(ivext_t *e) { ivlist_t *iv = e->active_queue_head->next; for(; iv != e->active_queue_head; iv = iv->next) if(iv->end - iv->start + 1 > 1) break; assert(iv != e->active_queue_head); pnunlink(iv); return iv; } ivlist_t *ivext_get_spare(ivext_t *e) { assert(e->spare_queue_head->next != e->spare_queue_head); ivlist_t *iv = e->spare_queue_head->next; pnunlink(iv); return iv; } void ivext_reset(ivext_t *e) { e->active_queue_head = e->queue + 0; e->spare_queue_head = e->queue + 1; e->active_queue_head->next = e->active_queue_head; e->active_queue_head->prev = e->active_queue_head; e->spare_queue_head->prev = e->spare_queue_head; e->spare_queue_head->next = e->spare_queue_head; e->embed_list = (ivlist_t *) 0; for(index_t i = 0; i < e->k + 2; i++) ivext_enqueue_spare(e, e->queue + 2 + i); // rot-safe ivlist_t *iv = ivext_get_spare(e); iv->start = 0; iv->end = e->n-1; ivext_enqueue_active(e, iv); } ivext_t *ivext_alloc(index_t n, index_t k) { ivext_t *e = (ivext_t *) MALLOC(sizeof(ivext_t)); e->n = n; e->k = k; e->queue = (ivlist_t *) MALLOC(sizeof(ivlist_t)*(k+4)); // rot-safe ivext_reset(e); return e; } void ivext_free(ivext_t *e) { ivlist_t *el = e->embed_list; while(el != (ivlist_t *) 0) { ivlist_t *temp = el; el = el->next; FREE(temp); } FREE(e->queue); FREE(e); } void ivext_project(ivext_t *e, ivlist_t *iv) { for(ivlist_t *z = e->active_queue_head->next; z != e->active_queue_head; z = z->next) { assert(z->end < iv->start || z->start > iv->end); if(z->start > iv->end) { z->start -= iv->end-iv->start+1; z->end -= iv->end-iv->start+1; } } ivlist_t *em = (ivlist_t *) MALLOC(sizeof(ivlist_t)); em->start = iv->start; em->end = iv->end; em->next = e->embed_list; e->embed_list = em; } index_t ivext_embed(ivext_t *e, index_t u) { ivlist_t *el = e->embed_list; while(el != (ivlist_t *) 0) { if(u >= el->start) u += el->end - el->start + 1; el = el->next; } return u; } ivlist_t *ivext_halve(ivext_t *e, ivlist_t *iv) { assert(iv->end - iv->start + 1 >= 2); index_t mid = (iv->start + iv->end)/2; // mid < iv->end ivlist_t *h = ivext_get_spare(e); h->start = iv->start; h->end = mid; iv->start = mid+1; return h; } index_t ivext_queue_size(ivext_t *e) { index_t s = 0; for(ivlist_t *iv = e->active_queue_head->next; iv != e->active_queue_head; iv = iv->next) s += iv->end-iv->start+1; return s; } index_t ivext_num_active_intervals(ivext_t *e) { index_t s = 0; for(ivlist_t *iv = e->active_queue_head->next; iv != e->active_queue_head; iv = iv->next) s++; return s; } void ivext_queue_print(FILE *out, ivext_t *e, index_t rot) { index_t j = 0; char x[16384]; char y[16384]; y[0] = '\0'; sprintf(x, "%c%12ld [", rot == 0 ? ' ' : 'R', ivext_queue_size(e)); strcat(y, x); for(ivlist_t *iv = e->active_queue_head->next; iv != e->active_queue_head; iv = iv->next) { assert(iv->start <= iv->end); if(iv->start < iv->end) sprintf(x, "%s[%ld:%ld]", j++ == 0 ? "" : ",", ivext_embed(e, iv->start), ivext_embed(e, iv->end)); else sprintf(x, "%s[%ld]", j++ == 0 ? "[" : ",", ivext_embed(e, iv->start)); strcat(y, x); } strcat(y, "] "); fprintf(out, "%-120s", y); fflush(out); } index_t extract_match(index_t is_root, motifq_t *query, index_t *match) { // Assumes adjancency lists of query are sorted. fprintf(stdout, "extract: %ld %ld %ld\n", query->n, query->k, query->nl); push_time(); assert(query->k <= query->n); ivext_t *e = ivext_alloc(query->n, query->k); ivext_queue_print(stdout, e, 0); if(!motifq_execute(query)) { fprintf(stdout, " -- false\n"); ivext_free(e); if(!is_root) motifq_free(query); double time = pop_time(); fprintf(stdout, "extract done [%.2lf ms]\n", time); return 0; } fprintf(stdout, " -- true\n"); while(ivext_queue_size(e) > e->k) { ivlist_t *iv = ivext_dequeue_first_nonsingleton(e); ivlist_t *h = ivext_halve(e, iv); ivext_enqueue_active(e, iv); motifq_t *qq = motifq_cut(query, h->start, h->end); ivext_queue_print(stdout, e, 0); if(motifq_execute(qq)) { fprintf(stdout, " -- true\n"); if(!is_root) motifq_free(query); query = qq; is_root = 0; ivext_project(e, h); ivext_enqueue_spare(e, h); } else { fprintf(stdout, " -- false\n"); motifq_free(qq); pnunlink(iv); ivext_enqueue_active(e, h); qq = motifq_cut(query, iv->start, iv->end); ivext_queue_print(stdout, e, 0); if(motifq_execute(qq)) { fprintf(stdout, " -- true\n"); if(!is_root) motifq_free(query); query = qq; is_root = 0; ivext_project(e, iv); ivext_enqueue_spare(e, iv); } else { fprintf(stdout, " -- false\n"); motifq_free(qq); ivext_enqueue_active(e, iv); while(ivext_num_active_intervals(e) > e->k) { // Rotate queue until outlier is out ... ivlist_t *iv = e->active_queue_head->next; pnunlink(iv); qq = motifq_cut(query, iv->start, iv->end); ivext_queue_print(stdout, e, 1); if(motifq_execute(qq)) { fprintf(stdout, " -- true\n"); if(!is_root) motifq_free(query); query = qq; is_root = 0; ivext_project(e, iv); ivext_enqueue_spare(e, iv); } else { fprintf(stdout, " -- false\n"); motifq_free(qq); ivext_enqueue_active(e, iv); } } } } } for(index_t i = 0; i < query->k; i++) match[i] = ivext_embed(e, i); ivext_free(e); if(!is_root) motifq_free(query); double time = pop_time(); fprintf(stdout, "extract done [%.2lf ms]\n", time); return 1; } /*************************************************************** The lister. */ #define M_QUERY 0 #define M_OPEN 1 #define M_CLOSE 2 #define M_REWIND_U 3 #define M_REWIND_L 4 index_t command_mnemonic(index_t command) { return command >> 60; } index_t command_index(index_t command) { return command & (~(0xFFUL<<60)); } index_t to_command_idx(index_t mnemonic, index_t idx) { assert(idx < (1UL << 60)); return (mnemonic << 60)|idx; } index_t to_command(index_t mnemonic) { return to_command_idx(mnemonic, 0UL); } typedef struct { index_t n; // number of elements in universe index_t k; // size of the sets to be listed index_t *u; // upper bound as a bitmap index_t u_size; // size of upper bound index_t *l; // lower bound index_t l_size; // size of lower bound index_t *stack; // a stack for maintaining state index_t stack_capacity; // ... the capacity of the stack index_t top; // index of stack top motifq_t *root; // the root query } lister_t; void lister_push(lister_t *t, index_t word) { assert(t->top + 1 < t->stack_capacity); t->stack[++t->top] = word; } index_t lister_pop(lister_t *t) { return t->stack[t->top--]; } index_t lister_have_work(lister_t *t) { return t->top >= 0; } index_t lister_in_l(lister_t *t, index_t j) { for(index_t i = 0; i < t->l_size; i++) if(t->l[i] == j) return 1; return 0; } void lister_push_l(lister_t *t, index_t j) { assert(!lister_in_l(t, j) && t->l_size < t->k); t->l[t->l_size++] = j; } void lister_pop_l(lister_t *t) { assert(t->l_size > 0); t->l_size--; } void lister_reset(lister_t *t) { t->l_size = 0; t->top = -1; lister_push(t, to_command(M_QUERY)); for(index_t i = 0; i < t->n; i++) bitset(t->u, i, 1); t->u_size = t->n; } lister_t *lister_alloc(index_t n, index_t k, motifq_t *root) { assert(n >= 1 && n < (1UL << 60) && k >= 1 && k <= n); lister_t *t = (lister_t *) MALLOC(sizeof(lister_t)); t->n = n; t->k = k; t->u = alloc_idxtab((n+63)/64); t->l = alloc_idxtab(k); t->stack_capacity = n + k*(k+1+2*k) + 1; t->stack = alloc_idxtab(t->stack_capacity); lister_reset(t); t->root = root; if(t->root != (motifq_t *) 0) { assert(t->root->n == t->n); assert(t->root->k == t->k); assert(t->root->nl == 0); } return t; } void lister_free(lister_t *t) { if(t->root != (motifq_t *) 0) motifq_free(t->root); FREE(t->u); FREE(t->l); FREE(t->stack); FREE(t); } void lister_get_proj_embed(lister_t *t, index_t **proj_out, index_t **embed_out) { index_t n = t->n; index_t usize = t->u_size; index_t *embed = (index_t *) MALLOC(sizeof(index_t)*usize); index_t *proj = (index_t *) MALLOC(sizeof(index_t)*n); // could parallelize this (needs parallel prefix sum) index_t run = 0; for(index_t i = 0; i < n; i++) { if(bitget(t->u, i)) { proj[i] = run; embed[run] = i; run++; } else { proj[i] = PROJ_UNDEF; } } assert(run == usize); *proj_out = proj; *embed_out = embed; } void lister_query_setup(lister_t *t, motifq_t **q_out, index_t **embed_out) { index_t *proj; index_t *embed; // set up the projection with u and l lister_get_proj_embed(t, &proj, &embed); motifq_t *qq = motifq_project(t->root, t->u_size, proj, embed, t->l_size, t->l); FREE(proj); *q_out = qq; *embed_out = embed; } index_t lister_extract(lister_t *t, index_t *s) { // assumes t->u contains all elements of t->l // (otherwise query is trivial no) assert(t->root != (motifq_t *) 0); if(t->u_size == t->n) { // rush the root query without setting up a copy return extract_match(1, t->root, s); } else { // a first order of business is to set up the query // based on the current t->l and t->u; this includes // also setting up the embedding back to the root, // in case we are lucky and actually discover a match motifq_t *qq; // will be released by extractor index_t *embed; lister_query_setup(t, &qq, &embed); // now execute the interval extractor ... index_t got_match = extract_match(0, qq, s); // ... and embed the match (if any) if(got_match) { for(index_t i = 0; i < t->k; i++) s[i] = embed[s[i]]; } FREE(embed); return got_match; } } index_t lister_run(lister_t *t, index_t *s) { while(lister_have_work(t)) { index_t cmd = lister_pop(t); index_t mnem = command_mnemonic(cmd); index_t idx = command_index(cmd); switch(mnem) { case M_QUERY: if(t->k <= t->u_size && lister_extract(t, s)) { // we have discovered a match, which we need to // put on the stack to continue work when the user // requests this for(index_t i = 0; i < t->k; i++) lister_push(t, s[i]); lister_push(t, to_command_idx(M_OPEN, t->k-1)); // now report our discovery to user return 1; } break; case M_OPEN: { index_t *x = t->stack + t->top - t->k + 1; index_t k = 0; for(; k < idx; k++) if(!lister_in_l(t, x[k])) break; if(k == idx) { // opening on last element of x not in l // so we can dispense with x as long as we remember to // insert x[idx] back to u when rewinding for(index_t j = 0; j < t->k; j++) lister_pop(t); // axe x from stack if(!lister_in_l(t, x[idx])) { bitset(t->u, x[idx], 0); // remove x[idx] from u t->u_size--; lister_push(t, to_command_idx(M_REWIND_U, x[idx])); lister_push(t, to_command(M_QUERY)); } } else { // have still other elements of x that we need to // open on, so must keep x in stack // -- // invariant that controls stack size: // each open increases l by at least one lister_push(t, to_command_idx(M_CLOSE, idx)); if(!lister_in_l(t, x[idx])) { bitset(t->u, x[idx], 0); // remove x[idx] from u t->u_size--; lister_push(t, to_command_idx(M_REWIND_U, x[idx])); // force x[0],x[1],...,x[idx-1] to l index_t j = 0; for(; j < idx; j++) { if(!lister_in_l(t, x[j])) { if(t->l_size >= t->k) break; lister_push_l(t, x[j]); lister_push(t, to_command_idx(M_REWIND_L, x[j])); } } if(j == idx) lister_push(t, to_command(M_QUERY)); } } } break; case M_CLOSE: assert(idx > 0); lister_push(t, to_command_idx(M_OPEN, idx-1)); break; case M_REWIND_U: bitset(t->u, idx, 1); t->u_size++; break; case M_REWIND_L: lister_pop_l(t); break; } } lister_push(t, to_command(M_QUERY)); return 0; } /******************************************************* Root query builder. */ motifq_t *root_build(graph_t *g, index_t k, index_t *kk) { push_memtrack(); index_t n = g->num_vertices; index_t m = 2*g->num_edges; index_t *pos = alloc_idxtab(n); index_t *adj = alloc_idxtab(n+m); index_t ns = k; shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n); motifq_t *root = (motifq_t *) MALLOC(sizeof(motifq_t)); root->is_stub = 0; root->n = g->num_vertices; root->k = k; root->pos = pos; root->adj = adj; root->nl = 0; root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl); root->ns = ns; root->shade = shade; root->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*root->n); push_time(); fprintf(stdout, "root build ... "); fflush(stdout); push_time(); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) pos[u] = 0; double time = pop_time(); fprintf(stdout, "[zero: %.2lf ms] ", time); fflush(stdout); push_time(); index_t *e = g->edges; #ifdef BUILD_PARALLEL // Parallel occurrence count // -- each thread is responsible for a group of bins, // all threads scan the entire list of edges index_t nt = num_threads(); index_t block_size = n/nt; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? n-1 : (start+block_size-1); for(index_t j = 0; j < m; j++) { index_t u = e[j]; if(start <= u && u <= stop) pos[u]++; // I am responsible for u, record adjacency to u } } #else for(index_t j = 0; j < m; j++) pos[e[j]]++; #endif index_t run = prefixsum(n, pos, 1); assert(run == n+m); time = pop_time(); fprintf(stdout, "[pos: %.2lf ms] ", time); fflush(stdout); push_time(); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) adj[pos[u]] = 0; e = g->edges; #ifdef BUILD_PARALLEL // Parallel aggregation to bins // -- each thread is responsible for a group of bins, // all threads scan the entire list of edges nt = num_threads(); block_size = n/nt; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? n-1 : (start+block_size-1); for(index_t j = 0; j < m; j+=2) { index_t u0 = e[j+0]; index_t u1 = e[j+1]; if(start <= u0 && u0 <= stop) { // I am responsible for u0, record adjacency to u1 index_t pu0 = pos[u0]; adj[pu0 + 1 + adj[pu0]++] = u1; } if(start <= u1 && u1 <= stop) { // I am responsible for u1, record adjacency to u0 index_t pu1 = pos[u1]; adj[pu1 + 1 + adj[pu1]++] = u0; } } } #else for(index_t j = 0; j < m; j+=2) { index_t u0 = e[j+0]; index_t u1 = e[j+1]; index_t p0 = pos[u0]; index_t p1 = pos[u1]; adj[p1 + 1 + adj[p1]++] = u0; adj[p0 + 1 + adj[p0]++] = u1; } #endif time = pop_time(); fprintf(stdout, "[adj: %.2lf ms] ", time); fflush(stdout); push_time(); adjsort(n, pos, adj); time = pop_time(); fprintf(stdout, "[adjsort: %.2lf ms] ", time); fflush(stdout); push_time(); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) { shade_map_t s = 0; for(index_t j = 0; j < k; j++) if(g->colors[u] == kk[j]) s |= 1UL << j; shade[u] = s; } time = pop_time(); fprintf(stdout, "[shade: %.2lf ms] ", time); fflush(stdout); time = pop_time(); fprintf(stdout, "done. [%.2lf ms] ", time); print_pop_memtrack(); fprintf(stdout, " "); print_current_mem(); fprintf(stdout, "\n"); fflush(stdout); return root; } /***************************************************** Input reader (ASCII). */ void skipws(FILE *in) { int c; do { c = fgetc(in); if(c == '#') { do { c = fgetc(in); } while(c != EOF && c != '\n'); } } while(c != EOF && isspace(c)); if(c != EOF) ungetc(c, in); } #define CMD_NOP 0 #define CMD_TEST_UNIQUE 1 #define CMD_TEST_COUNT 2 #define CMD_RUN_ORACLE 3 #define CMD_LIST_FIRST 4 #define CMD_LIST_ALL 5 const char *cmd_legend[] = { "no operation", "test unique", "test count", "run oracle", "list first", "list all" }; void reader_ascii(FILE *in, graph_t **g_out, index_t *k_out, index_t **kk_out, index_t *cmd_out, index_t **cmd_args_out) { push_time(); push_memtrack(); index_t n = 0; index_t m = 0; graph_t *g = (graph_t *) 0; index_t i, j, d, k; index_t *kk = (index_t *) 0; index_t cmd = CMD_NOP; index_t *cmd_args = (index_t *) 0; skipws(in); while(!feof(in)) { skipws(in); int c = fgetc(in); switch(c) { case 'p': if(g != (graph_t *) 0) ERROR("duplicate parameter line"); skipws(in); if(fscanf(in, "motif %ld %ld", &n, &m) != 2) ERROR("invalid parameter line"); if(n <= 0 || m < 0) ERROR("invalid input parameters (n = %ld, m = %ld)", n, m); g = graph_alloc(n); break; case 'e': if(g == (graph_t *) 0) ERROR("parameter line must be given before edges"); skipws(in); if(fscanf(in, "%ld %ld", &i, &j) != 2) ERROR("invalid edge line"); if(i < 1 || i > n || j < 1 || j > n) ERROR("invalid edge (i = %ld, j = %ld with n = %ld)", i, j, n); graph_add_edge(g, i-1, j-1); break; case 'n': if(g == (graph_t *) 0) ERROR("parameter line must be given before vertex colors"); skipws(in); if(fscanf(in, "%ld %ld", &i, &d) != 2) ERROR("invalid color line"); if(i < 1 || i > n || d < 1) ERROR("invalid color line (i = %ld, d = %ld with n = %ld)", i, d, n); graph_set_color(g, i-1, d-1); break; case 'k': if(g == (graph_t *) 0) ERROR("parameter line must be given before motif"); skipws(in); if(fscanf(in, "%ld", &k) != 1) ERROR("invalid motif line"); if(k < 1 || k > n) ERROR("invalid motif line (k = %ld with n = %d)", k, n); kk = alloc_idxtab(k); for(index_t u = 0; u < k; u++) { skipws(in); if(fscanf(in, "%ld", &i) != 1) ERROR("error parsing motif line"); if(i < 1) ERROR("invalid color on motif line (i = %ld)", i); kk[u] = i-1; } break; case 't': if(g == (graph_t *) 0 || kk == (index_t *) 0) ERROR("parameter and motif lines must be given before test"); skipws(in); { char cmdstr[128]; if(fscanf(in, "%100s", cmdstr) != 1) ERROR("invalid test command"); if(!strcmp(cmdstr, "unique")) { cmd_args = alloc_idxtab(k); for(index_t u = 0; u < k; u++) { skipws(in); if(fscanf(in, "%ld", &i) != 1) ERROR("error parsing test line"); if(i < 1 || i > n) ERROR("invalid test line entry (i = %ld)", i); cmd_args[u] = i-1; } heapsort_indext(k, cmd_args); for(index_t u = 1; u < k; u++) if(cmd_args[u-1] >= cmd_args[u]) ERROR("test line contains duplicate entries"); cmd = CMD_TEST_UNIQUE; } else { if(!strcmp(cmdstr, "count")) { cmd_args = alloc_idxtab(1); skipws(in); if(fscanf(in, "%ld", &i) != 1) ERROR("error parsing test line"); if(i < 0) ERROR("count on test line cannot be negative"); cmd = CMD_TEST_COUNT; cmd_args[0] = i; } else { ERROR("unrecognized test command \"%s\"", cmdstr); } } } break; case EOF: break; default: ERROR("parse error"); } } if(g == (graph_t *) 0) ERROR("no graph given in input"); if(kk == (index_t *) 0) ERROR("no motif given in input"); for(index_t i = 0; i < n; i++) { if(g->colors[i] == -1) ERROR("no color assigned to vertex i = %ld", i); } double time = pop_time(); fprintf(stdout, "input: n = %ld, m = %ld, k = %ld [%.2lf ms] ", g->num_vertices, g->num_edges, k, time); print_pop_memtrack(); fprintf(stdout, " "); print_current_mem(); fprintf(stdout, "\n"); *g_out = g; *k_out = k; *kk_out = kk; *cmd_out = cmd; *cmd_args_out = cmd_args; } /**************************************************** Input reader (binary). */ #define BIN_MAGIC 0x1234567890ABCDEFUL void reader_bin(FILE *in, graph_t **g_out, index_t *k_out, index_t **kk_out, index_t *cmd_out, index_t **cmd_args_out) { push_time(); push_memtrack(); index_t magic = 0; index_t n = 0; index_t m = 0; graph_t *g = (graph_t *) 0; index_t k = 0; index_t has_target = 0; index_t *kk = (index_t *) 0; index_t cmd = CMD_NOP; index_t *cmd_args = (index_t *) 0; if(fread(&magic, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); if(magic != BIN_MAGIC) ERROR("error reading input"); if(fread(&n, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); if(fread(&m, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); assert(n >= 0 && m >= 0 && m%2 == 0); g = graph_alloc(n); index_t *e = graph_edgebuf(g, m/2); if(fread(e, sizeof(index_t), m, in) != m) ERROR("error reading input"); if(fread(g->colors, sizeof(index_t), n, in) != n) ERROR("error reading input"); if(fread(&has_target, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); assert(has_target == 0 || has_target == 1); if(has_target) { if(fread(&k, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); assert(k >= 0); kk = alloc_idxtab(k); if(fread(kk, sizeof(index_t), k, in) != k) ERROR("error reading input"); if(fread(&cmd, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); switch(cmd) { case CMD_NOP: break; case CMD_TEST_UNIQUE: cmd_args = alloc_idxtab(k); if(fread(cmd_args, sizeof(index_t), k, in) != k) ERROR("error reading input"); shellsort(k, cmd_args); break; case CMD_TEST_COUNT: cmd_args = alloc_idxtab(1); if(fread(cmd_args, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); break; default: ERROR("invalid command in binary input stream"); break; } } double time = pop_time(); fprintf(stdout, "input: n = %ld, m = %ld, k = %ld [%.2lf ms] ", g->num_vertices, g->num_edges, k, time); print_pop_memtrack(); fprintf(stdout, " "); print_current_mem(); fprintf(stdout, "\n"); *g_out = g; *k_out = k; *kk_out = kk; *cmd_out = cmd; *cmd_args_out = cmd_args; } /****************************************************** Program entry point. */ int main(int argc, char **argv) { push_time(); push_memtrack(); index_t arg_cmd = CMD_NOP; index_t have_seed = 0; index_t seed = 123456789; for(index_t f = 1; f < argc; f++) { if(argv[f][0] == '-') { if(!strcmp(argv[f], "-bin")) { flag_bin_input = 1; } if(!strcmp(argv[f], "-ascii")) { flag_bin_input = 0; } if(!strcmp(argv[f], "-oracle")) { arg_cmd = CMD_RUN_ORACLE; } if(!strcmp(argv[f], "-first")) { arg_cmd = CMD_LIST_FIRST; } if(!strcmp(argv[f], "-all")) { arg_cmd = CMD_LIST_ALL; } if(!strcmp(argv[f], "-seed")) { if(f == argc - 1) ERROR("random seed missing from command line"); seed = atol(argv[++f]); have_seed = 1; } if(!strcmp(argv[f], "-devices")) { if(f == argc - 1) ERROR("number of devices missing from command line"); devices = atol(argv[++f]); have_devices = 1; } } } fprintf(stdout, "invoked as:"); for(index_t f = 0; f < argc; f++) fprintf(stdout, " %s", argv[f]); fprintf(stdout, "\n"); if(have_seed == 0) { fprintf(stdout, "no random seed given, defaulting to %ld\n", seed); } fprintf(stdout, "random seed = %ld\n", seed); srand(seed); graph_t *g; index_t k; index_t *kk; index_t input_cmd; index_t *cmd_args; if(flag_bin_input) { reader_bin(stdin, &g, &k, &kk, &input_cmd, &cmd_args); } else { reader_ascii(stdin, &g, &k, &kk, &input_cmd, &cmd_args); } index_t cmd = input_cmd; // by default execute command in input stream if(arg_cmd != CMD_NOP) cmd = arg_cmd; // override command in input stream motifq_t *root = root_build(g, k, kk); graph_free(g); FREE(kk); fprintf(stdout, "command: %s\n", cmd_legend[cmd]); fflush(stdout); push_time(); switch(cmd) { case CMD_NOP: motifq_free(root); break; case CMD_TEST_UNIQUE: { index_t n = root->n; index_t k = root->k; lister_t *t = lister_alloc(n, k, root); index_t *get = alloc_idxtab(k); index_t ct = 0; while(lister_run(t, get)) { assert(ct == 0); fprintf(stdout, "found %ld: ", ct); for(index_t i = 0; i < k; i++) fprintf(stdout, "%ld%s", get[i], i == k-1 ? "\n" : " "); for(index_t l = 0; l < k; l++) assert(get[l] == cmd_args[l]); ct++; } assert(ct == 1); FREE(get); lister_free(t); } break; case CMD_LIST_FIRST: case CMD_LIST_ALL: case CMD_TEST_COUNT: { index_t n = root->n; index_t k = root->k; lister_t *t = lister_alloc(n, k, root); index_t *get = alloc_idxtab(k); index_t ct = 0; while(lister_run(t, get)) { fprintf(stdout, "found %ld: ", ct); for(index_t i = 0; i < k; i++) fprintf(stdout, "%ld%s", get[i], i == k-1 ? "\n" : " "); ct++; if(cmd == CMD_LIST_FIRST) break; } if(cmd == CMD_TEST_COUNT) { fprintf(stdout, "count = %ld, target = %ld\n", ct, cmd_args[0]); assert(ct == cmd_args[0]); } FREE(get); lister_free(t); } break; case CMD_RUN_ORACLE: if(motifq_execute(root)) { index_t support_size = 0; assert(!root->is_stub); scalar_t *master_vsum = root->vsum; for(index_t i = 0; i < root->n; i++) { if(master_vsum[i] != 0) { support_size++; } } fprintf(stdout, " -- true [%ld]\n", support_size); } else { fprintf(stdout, " -- false [0]\n"); } motifq_free(root); break; default: assert(0); break; } double time = pop_time(); fprintf(stdout, "command done [%.2lf ms]\n", time); if(input_cmd != CMD_NOP) FREE(cmd_args); time = pop_time(); fprintf(stdout, "grand total [%.2lf ms] ", time); print_pop_memtrack(); fprintf(stdout, "\n"); fprintf(stdout, "host: %s\n", sysdep_hostname()); fprintf(stdout, "build: %s\n", LINE_TYPE); fprintf(stdout, "compiler: gcc %d.%d.%d\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__); fflush(stdout); assert(malloc_balance == 0); assert(memtrack_stack_top < 0); return 0; }
f833226aeb13fb8b370ba8bafcd5232add3eb468.cu
/* * This file is part of an experimental software implementation of * vertex-localized graph motif search for GPUs utilizing the constrained * multilinear sieving framework. * * The source code is subject to the following license. * * The MIT License (MIT) * * Copyright (c) 2017 P. Kaski, S. Thejaswi * Copyright (c) 2014 A. Björklund, P. Kaski, Ł. Kowalik, J. Lauri * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #include<stdio.h> #include<stdlib.h> #include<assert.h> #include<time.h> #include<sys/utsname.h> #include<string.h> #include<stdarg.h> #include<assert.h> #include<ctype.h> #include<sys/time.h> #include<cuda.h> /**************************************************** Configuration & types. */ #define THREADS_IN_WARP 32 typedef long int index_t; // default to 64-bit indexing #include"gf.h" #include"ffprng.h" #define MAX_K 32 #define MAX_SHADES 32 #define BUILD_PARALLEL // do a parallel CPU build #ifdef BUILD_PARALLEL #define MAX_THREADS 128 #include<omp.h> #endif typedef unsigned int shade_map_t; index_t have_devices = 0; index_t devices = 0; /******************************************************************** Flags. */ index_t flag_bin_input = 0; // default to ASCII input /************************************************************ Common macros. */ /* Linked list navigation macros. */ #define pnlinknext(to,el) { (el)->next = (to)->next; (el)->prev = (to); (to)->next->prev = (el); (to)->next = (el); } #define pnlinkprev(to,el) { (el)->prev = (to)->prev; (el)->next = (to); (to)->prev->next = (el); (to)->prev = (el); } #define pnunlink(el) { (el)->next->prev = (el)->prev; (el)->prev->next = (el)->next; } #define pnrelink(el) { (el)->next->prev = (el); (el)->prev->next = (el); } /********************************************************** Error reporting. */ #define ERROR(...) error(__FILE__,__LINE__,__func__,__VA_ARGS__); static void error(const char *fn, int line, const char *func, const char *format, ...) { va_list args; va_start(args, format); fprintf(stderr, "ERROR [file = %s, line = %d]\n" "%s: ", fn, line, func); vfprintf(stderr, format, args); fprintf(stderr, "\n"); va_end(args); abort(); } /******************************************************** Get the host name. */ #define MAX_HOSTNAME 256 const char *sysdep_hostname(void) { static char hn[MAX_HOSTNAME]; struct utsname undata; uname(&undata); strcpy(hn, undata.nodename); return hn; } /******************************************************** Available threads. */ index_t num_threads(void) { #ifdef BUILD_PARALLEL return omp_get_max_threads(); #else return 1; #endif } /********************************************* Memory allocation & tracking. */ #define MALLOC(x) malloc_wrapper(x) #define FREE(x) free_wrapper(x) index_t malloc_balance = 0; struct malloc_track_struct { void *p; size_t size; struct malloc_track_struct *prev; struct malloc_track_struct *next; }; typedef struct malloc_track_struct malloc_track_t; malloc_track_t malloc_track_root; size_t malloc_total = 0; #define MEMTRACK_STACK_CAPACITY 256 size_t memtrack_stack[MEMTRACK_STACK_CAPACITY]; index_t memtrack_stack_top = -1; void *malloc_wrapper(size_t size) { if(malloc_balance == 0) { malloc_track_root.prev = &malloc_track_root; malloc_track_root.next = &malloc_track_root; } void *p = malloc(size); if(p == NULL) ERROR("malloc fails"); malloc_balance++; malloc_track_t *t = (malloc_track_t *) malloc(sizeof(malloc_track_t)); t->p = p; t->size = size; pnlinkprev(&malloc_track_root, t); malloc_total += size; for(index_t i = 0; i <= memtrack_stack_top; i++) if(memtrack_stack[i] < malloc_total) memtrack_stack[i] = malloc_total; return p; } void free_wrapper(void *p) { malloc_track_t *t = malloc_track_root.next; for(; t != &malloc_track_root; t = t->next) { if(t->p == p) break; } if(t == &malloc_track_root) ERROR("FREE issued on a non-tracked pointer %p", p); malloc_total -= t->size; pnunlink(t); free(t); free(p); malloc_balance--; } index_t *alloc_idxtab(index_t n) { index_t *t = (index_t *) MALLOC(sizeof(index_t)*n); return t; } void push_memtrack(void) { assert(memtrack_stack_top + 1 < MEMTRACK_STACK_CAPACITY); memtrack_stack[++memtrack_stack_top] = malloc_total; } size_t pop_memtrack(void) { assert(memtrack_stack_top >= 0); return memtrack_stack[memtrack_stack_top--]; } size_t current_mem(void) { return malloc_total; } double inGiB(size_t s) { return (double) s / (1 << 30); } void print_current_mem(void) { fprintf(stdout, "{curr: %.2lfGiB}", inGiB(current_mem())); fflush(stdout); } void print_pop_memtrack(void) { fprintf(stdout, "{peak: %.2lfGiB}", inGiB(pop_memtrack())); fflush(stdout); } /******************************************************* Timing subroutines. */ #define TIME_STACK_CAPACITY 256 double start_stack[TIME_STACK_CAPACITY]; index_t start_stack_top = -1; void push_time(void) { assert(start_stack_top + 1 < TIME_STACK_CAPACITY); #ifdef BUILD_PARALLEL start_stack[++start_stack_top] = omp_get_wtime(); #else start_stack[++start_stack_top] = (double) clock()/CLOCKS_PER_SEC; #endif } double pop_time(void) { #ifdef BUILD_PARALLEL double wstop = omp_get_wtime(); #else double wstop = (double) clock()/CLOCKS_PER_SEC; #endif assert(start_stack_top >= 0); double wstart = start_stack[start_stack_top--]; return (double) (1000.0*(wstop-wstart)); } /****************************************************************** Sorting. */ void shellsort(index_t n, index_t *a) { index_t h = 1; index_t i; for(i = n/3; h < i; h = 3*h+1) ; do { for(i = h; i < n; i++) { index_t v = a[i]; index_t j = i; do { index_t t = a[j-h]; if(t <= v) break; a[j] = t; j -= h; } while(j >= h); a[j] = v; } h /= 3; } while(h > 0); } #define LEFT(x) (x<<1) #define RIGHT(x) ((x<<1)+1) #define PARENT(x) (x>>1) void heapsort_indext(index_t n, index_t *a) { /* Shift index origin from 0 to 1 for convenience. */ a--; /* Build heap */ for(index_t i = 2; i <= n; i++) { index_t x = i; while(x > 1) { index_t y = PARENT(x); if(a[x] <= a[y]) { /* heap property ok */ break; } /* Exchange a[x] and a[y] to enforce heap property */ index_t t = a[x]; a[x] = a[y]; a[y] = t; x = y; } } /* Repeat delete max and insert */ for(index_t i = n; i > 1; i--) { index_t t = a[i]; /* Delete max */ a[i] = a[1]; /* Insert t */ index_t x = 1; index_t y, z; while((y = LEFT(x)) < i) { z = RIGHT(x); if(z < i && a[y] < a[z]) { index_t s = z; z = y; y = s; } /* Invariant: a[y] >= a[z] */ if(t >= a[y]) { /* ok to insert here without violating heap property */ break; } /* Move a[y] up the heap */ a[x] = a[y]; x = y; } /* Insert here */ a[x] = t; } } /****************************************************** Bitmap manipulation. */ void bitset(index_t *map, index_t j, index_t value) { assert((value & (~1UL)) == 0); map[j/64] = (map[j/64] & ~(1UL << (j%64))) | ((value&1) << (j%64)); } index_t bitget(index_t *map, index_t j) { return (map[j/64]>>(j%64))&1UL; } /************************************************** Random numbers and such. */ index_t irand(void) { return (((index_t) rand())<<31)^((index_t) rand()); } /**************************************************** (Parallel) prefix sum. */ index_t prefixsum(index_t n, index_t *a, index_t k) { #ifdef BUILD_PARALLEL index_t s[MAX_THREADS]; index_t nt = num_threads(); assert(nt < MAX_THREADS); index_t length = n; index_t block_size = length/nt; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? length-1 : (start+block_size-1); index_t tsum = (stop-start+1)*k; for(index_t u = start; u <= stop; u++) tsum += a[u]; s[t] = tsum; } index_t run = 0; for(index_t t = 1; t <= nt; t++) { index_t v = s[t-1]; s[t-1] = run; run += v; } s[nt] = run; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? length-1 : (start+block_size-1); index_t trun = s[t]; for(index_t u = start; u <= stop; u++) { index_t tv = a[u]; a[u] = trun; trun += tv + k; } assert(trun == s[t+1]); } #else index_t run = 0; for(index_t u = 0; u < n; u++) { index_t tv = a[u]; a[u] = run; run += tv + k; } #endif return run; } /*********************** Search for an interval of values in a sorted array. */ inline index_t get_interval(index_t n, index_t *a, index_t lo_val, index_t hi_val, index_t *iv_start, index_t *iv_end) { assert(n >= 0); if(n == 0) { *iv_start = 0; return 0; } assert(lo_val <= hi_val); // find first element in interval (if any) with binary search index_t lo = 0; index_t hi = n-1; // at or above lo, and at or below hi (if any) while(lo < hi) { index_t mid = (lo+hi)/2; // lo <= mid < hi index_t v = a[mid]; if(hi_val < v) { hi = mid-1; // at or below hi (if any) } else { if(v < lo_val) lo = mid+1; // at or above lo (if any), lo <= hi else hi = mid; // at or below hi (exists) } // 0 <= lo <= n-1 } if(a[lo] < lo_val || a[lo] > hi_val) { // array contains no values in interval if(a[lo] < lo_val) { lo++; assert(lo == n || a[lo+1] > hi_val); } else { assert(lo == 0 || a[lo-1] < lo_val); } *iv_start = lo; *iv_end = hi; return 0; } assert(lo_val <= a[lo] && a[lo] <= hi_val); *iv_start = lo; // find interval end (last index in interval) with binary search lo = 0; hi = n-1; // last index (if any) is at or above lo, and at or below hi while(lo < hi) { index_t mid = (lo+hi+1)/2; // lo < mid <= hi index_t v = a[mid]; if(hi_val < v) { hi = mid-1; // at or below hi, lo <= hi } else { if(v < lo_val) lo = mid+1; // at or above lo else lo = mid; // at or above lo, lo <= hi } } assert(lo == hi); *iv_end = lo; // lo == hi return 1+*iv_end-*iv_start; // return cut size } /********************************** Initialize an array with random scalars. */ void randinits_scalar(scalar_t *a, index_t s, ffprng_scalar_t seed) { ffprng_t base; FFPRNG_INIT(base, seed); index_t nt = num_threads(); index_t block_size = s/nt; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t t = 0; t < nt; t++) { ffprng_t gen; index_t start = t*block_size; index_t stop = (t == nt-1) ? s-1 : (start+block_size-1); FFPRNG_FWD(gen, start, base); for(index_t i = start; i <= stop; i++) { ffprng_scalar_t rnd; FFPRNG_RAND(rnd, gen); scalar_t rs = (scalar_t) rnd; a[i] = rs; } } } /********************************************************************* CUDA. */ /************************ CUDA error wrapper (adapted from CUDA By Example). */ #define CUDA_WRAP(err) (error_wrap(err,__FILE__,__LINE__)) static void error_wrap(cudaError_t err, const char *fn, int line) { if(err != cudaSuccess) { fprintf(stderr, "error [%s, line %d]: %s\n", fn, line, cudaGetErrorString(err)); fflush(stderr); exit(EXIT_FAILURE); } } /******************************************** Parallel line-sum for the GPU. */ /* * The following kernel adapted [in particular, sans the commentary!] * from Mark Harris, "Optimizing Parallel Reduction in CUDA", NVIDIA * * http://developer.download.nvidia.com/assets/cuda/files/reduction.pdf * */ template <index_t block_size> __device__ void device_line_psum_finish(volatile line_t *s, index_t a) { // Remarks: // // 1) // Observe the volatile decl above to instruct the compiler // *not* to reorder the share mem transactions below // // 2) // What is below takes simultaneously place for a = 0,1,...,31 // __in parallel__, all data now in s[0],s[1],...,s[63] // if(block_size >= 64) LINE_ADD(s[a],s[a],s[a + 32]); // ... now in s[0],s[1],...,s[31] if(block_size >= 32) LINE_ADD(s[a],s[a],s[a + 16]); // ... now in s[0],s[1],...,s[15] if(block_size >= 16) LINE_ADD(s[a],s[a],s[a + 8]); // ... now in s[0],s[1],...,s[7] if(block_size >= 8) LINE_ADD(s[a],s[a],s[a + 4]); // ... now in s[0],s[1],s[2],s[3] if(block_size >= 4) LINE_ADD(s[a],s[a],s[a + 2]); // ... now in s[0],s[1] if(block_size >= 2) LINE_ADD(s[a],s[a],s[a + 1]); // ... now in s[0] } template <index_t block_size> __global__ void device_line_psum_block(index_t dg, index_t q, index_t seg, line_array_t *d_in, line_array_t *d_out) { // Many a thread hereby commence their labours in this block ... index_t a = threadIdx.x; // my index inside my block index_t span = 2*block_size; // one block spans *twice* the data index_t major = (index_t) blockIdx.x+blockIdx.y*gridDim.x; index_t i = major*span + a; // accumulate from here ... index_t i_end = i + q; // ... to here (exclusive) index_t stride = span*dg; // ... with a stride that isolates // us from whatever the // __other__ blocks are doing, // asynchronously extern __shared__ line_t s[]; // cells for me and my mates // (in my block); my cell is s[a], // I shall write to no other cells // (except at init) // Start my work, my brave mates working in parallel with me ... line_t sum; LINE_SET_ZERO(sum); while(i < i_end) { line_t t1, t2; LINE_LOAD(t1, d_in, seg, i); LINE_LOAD(t2, d_in, seg, i + block_size); // span twice the data LINE_ADD(t1, t1, t2); LINE_ADD(sum, sum, t1); i += stride; // ... stride past all the other blocks } LINE_MOV(s[a], sum); LINE_SET_ZERO(s[a+block_size]); // small inputs may refer here, so zero it __syncthreads(); // sync with my mates // All data now in s[0],s[1],...,s[min(511,block_size)] if(block_size >= 512) { if(a < 256) { LINE_ADD(s[a],s[a],s[a + 256]); } __syncthreads(); } // All data now in s[0],s[1],...,s[min(255,block_size)] if(block_size >= 256) { if(a < 128) { LINE_ADD(s[a],s[a],s[a + 128]); } __syncthreads(); } // All data now in s[0],s[1],...,s[min(127,block_size)] if(block_size >= 128) { if(a < 64) { LINE_ADD(s[a],s[a],s[a + 64]); } __syncthreads(); } // All data now in s[0],s[1],...,s[min(63,block_size)] if(a < 32) { // Most of my mates are done, but I remain in the wrap-up detail ... device_line_psum_finish<block_size>(s, a); } if(a == 0) { // Ha! I get to output all the efforts due to me and my mates ... LINE_STORE(d_out, seg, major, s[0]); } } __global__ void device_lastp_line(index_t p, index_t seg, line_array_t *d_in, scalar_t *d_sum_out, index_t zero_acc) { index_t v = blockDim.x*((index_t) blockIdx.x+blockIdx.y*gridDim.x)+threadIdx.x; if(v < p) { line_t l; LINE_LOAD(l, d_in, seg, v); scalar_t t; LINE_SUM(t, l); if(zero_acc == 0) { d_sum_out[v] ^= t; } else { d_sum_out[v] = t; } } } void driver_line_psum(index_t p, index_t l, index_t seg, line_array_t *d_s0, line_array_t *d_s1, scalar_t *d_sum, index_t zero_acc) { index_t n = l; // number of lines to sum up index_t pref_threads = 512; // preferred threads per block // (must be a power of 2) while(n > 1) { index_t dg, db; size_t sm; if(n >= 2*pref_threads) { db = pref_threads; dg = n/(2*db); // one block spans _twice_ the data } else { db = n/2; // one block spans _twice_ the data dg = 1; } sm = sizeof(line_t)*db*2; // enough share mem to span twice the threads index_t pdg = p*dg; // Create a 2D grid to satisfy GPU hardware index_t pdgx = pdg >= (1 << 16) ? (1 << 15) : pdg; index_t pdgy = pdg >= (1 << 16) ? pdg / (1 << 15) : 1; dim3 pdg2(pdgx,pdgy); dim3 db2(db,1); switch(db) { case 1024: device_line_psum_block<1024><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 512: device_line_psum_block<512><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 256: device_line_psum_block<256><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 128: device_line_psum_block<128><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 64: device_line_psum_block< 64><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 32: device_line_psum_block< 32><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 16: device_line_psum_block< 16><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 8: device_line_psum_block< 8><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 4: device_line_psum_block< 4><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 2: device_line_psum_block< 2><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; case 1: device_line_psum_block< 1><<<pdg2,db2,sm>>>(dg,n,seg,d_s0,d_s1); break; default: assert(0); break; } cudaDeviceSynchronize(); CUDA_WRAP(cudaGetLastError()); n = dg; /* Restore invariant. */ line_array_t *save = d_s0; d_s0 = d_s1; d_s1 = save; } /* Invariant: Input (= Output) is in d_s0. */ /* Sum up the last lines to a scalar. */ index_t dbl = 1; index_t dgl = 1; while(dbl < p) dbl *= 2; if(dbl > pref_threads) { dgl = dbl / pref_threads; dbl = pref_threads; } index_t dglx = dgl >= (1 << 16) ? (1 << 15) : dgl; index_t dgly = dgl >= (1 << 16) ? dgl / (1 << 15) : 1; dim3 dgl2(dglx,dgly); dim3 dbl2(dbl,1); device_lastp_line<<<dgl2,dbl2>>>(p, seg, d_s0, d_sum, zero_acc); cudaDeviceSynchronize(); CUDA_WRAP(cudaGetLastError()); } /************************** Init shade variables for sieve (host-side init). */ void init_shades(index_t n, index_t n0, index_t k, index_t num_shades, shade_map_t *h_s, ffprng_scalar_t seed, scalar_t *h_z) { assert(num_shades <= MAX_SHADES); scalar_t wdj[k*k]; ffprng_t base; FFPRNG_INIT(base, seed); for(index_t i = 0; i < k; i++) { for(index_t j = 0; j < k; j++) { ffprng_scalar_t rnd; FFPRNG_RAND(rnd, base); wdj[i*k+j] = (scalar_t) rnd; } } index_t nt = num_threads(); index_t block_size = n/nt; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t t = 0; t < nt; t++) { ffprng_t gen; index_t start = t*block_size; index_t stop = (t == nt-1) ? n-1 : (start+block_size-1); FFPRNG_FWD(gen, k*start, base); for(index_t i = start; i <= stop; i++) { if(i < n0) { scalar_t vi[k]; shade_map_t shades_u = h_s[i]; for(index_t j = 0; j < k; j++) { ffprng_scalar_t rnd; FFPRNG_RAND(rnd, gen); scalar_t rs = (scalar_t) rnd; rs = rs & (-((scalar_t)((shades_u >> j)&(j < num_shades)))); vi[j] = rs; } for(index_t j = 0; j < k; j++) { scalar_t uj = 0; for(index_t d = 0; d < k; d++) { scalar_t ln = 0; REF_SCALAR_MUL(ln, wdj[j*k+d], vi[d]); // SMUL [host]: n0*k*k REF_SCALAR_ADD(uj, uj, ln); } h_z[i*k+j] = uj; // SW [host]: n0*k } } else { for(index_t j = 0; j < k; j++) h_z[i*k+j] = 0; // SW [host]: (n-n0)*k } } } // total SW: n*k // total SMUL: n0*k*k } /****************************************************************** Sieving. */ #define LINE_IDX(n, gl, l, u, a) ((((l)-1)*(n)*(gl))+((u)*(gl))+(a)) __global__ void device_constrained_sieve_pre(index_t n, index_t k, index_t gl, index_t seg, index_t pfx, scalar_t *d_z, line_array_t *d_s) { index_t job = blockDim.x*(blockIdx.x+blockIdx.y*gridDim.x)+threadIdx.x; index_t u = job/gl; index_t a = job%gl; index_t aa = pfx + a*SCALARS_IN_LINE; line_t ln; LINE_SET_ZERO(ln); for(index_t j = 0; j < SCALARS_IN_LINE; j++) { index_t aaj = aa+j; scalar_t xuaaj; SCALAR_SET_ZERO(xuaaj); for(index_t l = 0; l < k; l++) { scalar_t z_ul = d_z[u*k+l]; // SR [warp, cached]: n*k z_ul = z_ul & (-(((aaj) >> l)&1)); SCALAR_ADD(xuaaj, xuaaj, z_ul); } LINE_STORE_SCALAR(ln, j, xuaaj); } index_t l1ua = LINE_IDX(n, gl, 1, u, a); LINE_STORE(d_s, seg, l1ua, ln); // LW: n*gl // total SR: n*k // total LW: n*gl } void driver_constrained_sieve_pre(index_t n, index_t k, index_t gl, index_t seg, index_t pfx, index_t dg, index_t db, scalar_t *d_z, line_array_t *d_s) { // Create a 2D grid to satisfy GPU hardware index_t dgx = dg >= (1 << 16) ? (1 << 15) : dg; index_t dgy = dg >= (1 << 16) ? dg / (1 << 15) : 1; dim3 dg2(dgx,dgy); dim3 db2(db,1); device_constrained_sieve_pre<<<dg2,db2>>>(n,k,gl,seg,pfx,d_z,d_s); cudaDeviceSynchronize(); CUDA_WRAP(cudaGetLastError()); } /********************************** Generating function for k-arborescences. */ #ifndef VERTICES_PER_GENF_THREAD #define VERTICES_PER_GENF_THREAD 1 // must be a power of 2 #endif __global__ void device_karb_genf_round(index_t n, index_t l, index_t k, index_t gl, index_t b, index_t seg, index_t *d_pos, index_t *d_adj, scalar_t *d_y, line_array_t *d_s #ifdef GF_LOG_EXP_LOOKUP , scalar_t *d_lookup_log, scalar_t *d_lookup_exp #endif ) { index_t job = blockDim.x*(blockIdx.x+blockIdx.y*gridDim.x)+threadIdx.x; index_t a = job % gl; index_t u_start = (job / gl) * VERTICES_PER_GENF_THREAD; index_t u_end = u_start + VERTICES_PER_GENF_THREAD - 1; #pragma unroll 1 for(index_t u = u_start; u <= u_end; u++) { index_t p = d_pos[u]; // IR [warp]: (k-1)*n index_t deg = d_adj[p]; // IR [warp]: (k-1)*n line_t p_lu; LINE_SET_ZERO(p_lu); #pragma unroll 1 for(index_t j = 1; j <= deg; j++) { index_t v = d_adj[p+j]; // IR [warp, cached]: (k-1)*m line_t p_luv; LINE_SET_ZERO(p_luv); #pragma unroll 1 for(index_t l1 = 1; l1 < l; l1++) { // \sum_{l=2}^k \sum_{l1=1}^{l-1} 1 // = \sum_{l=2}^k (l-1) // = k(k-1)/2 index_t l2 = l-l1; index_t l1u = LINE_IDX(n, gl, l1, u, a); line_t p_l1u; LINE_LOAD(p_l1u, d_s, seg, l1u); // LR: m*gl*k(k-1)/2 index_t l2v = LINE_IDX(n, gl, l2, v, a); line_t p_l2v; LINE_LOAD(p_l2v, d_s, seg, l2v); // LR: m*gl*k(k-1)/2 line_t p_l1u_l2v; LINE_MUL(p_l1u_l2v, p_l1u, p_l2v); // LMUL: m*gl*k(k-1)/2 LINE_ADD(p_luv, p_luv, p_l1u_l2v); } scalar_t y_luv = d_y[(l-1)*b+p+j]; // SR [warp, cached]: (k-1)*m line_t res; LINE_MUL_SCALAR(res, p_luv, y_luv); // LMUL: m*gl*(k-1) LINE_ADD(p_lu, p_lu, res); } index_t lu = LINE_IDX(n, gl, l, u, a); LINE_STORE(d_s, seg, lu, p_lu); // LW: n*gl*(k-1) } // total IR: 2*(k-1)*n+(k-1)*m // total SR: (k-1)*m // total LR+LW: m*gl*k(k-1) + n*gl*(k-1) // total LMUL: m*gl*k(k-1)/2 + m*gl*(k-1) } line_array_t *driver_karb_genf(index_t n, index_t k, index_t gl, index_t b, index_t seg, index_t dg, index_t db, index_t *d_pos, index_t *d_adj, scalar_t *d_y, line_array_t *d_s #ifdef GF_LOG_EXP_LOOKUP , scalar_t *d_lookup_log, scalar_t *d_lookup_exp #endif ) { // Create a 2D grid to satisfy GPU hardware index_t dgx = dg >= (1 << 16) ? (1 << 15) : dg; index_t dgy = dg >= (1 << 16) ? dg / (1 << 15) : 1; dim3 dg2(dgx,dgy); dim3 db2(db,1); assert(k >= 1); if(k >= 2) { for(index_t l = 2; l <= k; l++) { device_karb_genf_round<<<dg2,db2>>>(n, l, k, gl, b, seg, d_pos, d_adj, d_y, d_s #ifdef GF_LOG_EXP_LOOKUP , d_lookup_log, d_lookup_exp #endif ); cudaDeviceSynchronize(); CUDA_WRAP(cudaGetLastError()); } } return d_s; } /********************************************** Stub to warm up GPU devices. */ void lightup_stub(void) { fprintf(stdout, "lightup: "); push_time(); index_t n = 1024; index_t seed = 123456789; double time0; /* Figure out how many devices we have. */ int d_cnt; cudaGetDeviceCount(&d_cnt); index_t d_use = d_cnt; if(have_devices) { if(devices > d_cnt) ERROR("only %d CUDA devices available, request for %ld devices", d_cnt, devices); d_use = devices; } fprintf(stdout, "{dev:%ld:%d}", d_use, d_cnt); fflush(stdout); index_t d_num[d_use]; for(index_t d = 0; d < d_use; d++) d_num[d] = d; /* Allocate space in host memory. */ scalar_t *h_x = (scalar_t *) MALLOC(n*sizeof(scalar_t)); randinits_scalar(h_x, n, seed); scalar_t *d_x[d_use]; /* Now light up the hardware. */ push_time(); fprintf(stdout, " {malloc:"); /* Allocate space in device memory and copy input. */ for(int d = 0; d < d_use; d++) { push_time(); cudaSetDevice(d_num[d]); double time1 = pop_time(); push_time(); /* Set up space in device memory. */ CUDA_WRAP(cudaMalloc(&d_x[d], n*sizeof(scalar_t))); double time2 = pop_time(); push_time(); /* Upload input to device. */ CUDA_WRAP(cudaMemcpy(d_x[d], h_x, n*sizeof(scalar_t), cudaMemcpyHostToDevice)); double time3 = pop_time(); fprintf(stdout, " [%d %.2lfms %.2lfms %.2lfms]", d, time1, time2, time3); } time0 = pop_time(); fprintf(stdout, " %.2lfms}", time0); /* Free working space in host memory. */ FREE(h_x); push_time(); fprintf(stdout, " {free:"); /* Free device memory. */ for(int d = 0; d < d_use; d++) { push_time(); cudaSetDevice(d_num[d]); double time1 = pop_time(); push_time(); CUDA_WRAP(cudaFree(d_x[d])); double time2 = pop_time(); fprintf(stdout, " [%d %.2lfms %.2lfms]", d, time1, time2); } time0 = pop_time(); fprintf(stdout, " %.2lfms}", time0); time0 = pop_time(); fprintf(stdout, " [%.2lfms]\n", time0); fflush(stdout); } /******************************************************* The k-motif oracle. */ #if defined(GPU_M2090) #define PEAK_ALLOC_FOR_DEVICE (3*((size_t) 1 << 30)) // 3 GiB peak line array allocation for the M2090 #elif defined(GPU_K40) || defined(GPU_K80) #define PEAK_ALLOC_FOR_DEVICE (3*((size_t) 1 << 30)) // 3 GiB peak line array allocation for the K40 #elif defined(GPU_P100) #define PEAK_ALLOC_FOR_DEVICE (16*((size_t) 1 << 30)) // 16 GiB peak line array allocation for the P100 #else #error "choose one of GPU_M2090 or GPU_K40 or GPU_K80 or GPU_P100" #endif index_t oracle(index_t n0, index_t k, index_t *h_pos, index_t *h_adj, index_t num_shades, shade_map_t *h_s, index_t seed, scalar_t *master_vsum) { assert(k < 31); assert(n0 > 0); index_t m0 = h_pos[n0-1]+h_adj[h_pos[n0-1]]+1-n0; index_t b0 = n0+m0; index_t n = 1; while(n < n0) n = n*2; index_t m = m0; index_t b = n+m; /* Invariant: n must be a power of two. */ /* Figure out how many devices we have. */ int d_cnt; cudaGetDeviceCount(&d_cnt); index_t d_use = d_cnt; if(have_devices) { if(devices > d_cnt) ERROR("only %d CUDA devices available, request for %ld devices", d_cnt, devices); d_use = devices; } index_t d_num[d_use]; for(index_t d = 0; d < d_use; d++) d_num[d] = d; /* Allocate work to devices. */ index_t sum_size = 1 << k; assert(SCALARS_IN_LINE <= sum_size); index_t g = sum_size; // g scalars of work g /= d_use; assert(g*d_use == sum_size); while(LINE_ARRAY_SIZE((size_t) k*n*g) > PEAK_ALLOC_FOR_DEVICE) g /= 2; assert(g >= SCALARS_IN_LINE); index_t outer = sum_size / g; // number of iterations for outer loop index_t gl = g / SCALARS_IN_LINE; // gl scalar-lines of work index_t num_processors = 16; // should be a power of 2 index_t max_block = 32; // should be a power of 2 index_t work = n*gl; index_t work_per_processor = work / num_processors; index_t dg, db; if(work_per_processor < THREADS_IN_WARP) { dg = work / THREADS_IN_WARP; db = THREADS_IN_WARP; } else { db = work / num_processors; if(db > max_block) db = max_block; dg = work / db; } assert(dg >= 1); // must have enough work assert(db >= THREADS_IN_WARP); /* Invariant: n*gl == work == dg*db */ assert(dg % VERTICES_PER_GENF_THREAD == 0); /* Light up all devices and avoid cold start. */ lightup_stub(); /* Start timing. */ float time; cudaEvent_t start, stop; cudaSetDevice(d_num[0]); CUDA_WRAP(cudaEventCreate(&start)); CUDA_WRAP(cudaEventCreate(&stop)); CUDA_WRAP(cudaEventRecord(start, 0)); /* Allocate working space in host memory. */ scalar_t *h_vsum = (scalar_t *) MALLOC(n*sizeof(scalar_t)); scalar_t *h_y = (scalar_t *) MALLOC(b*k*sizeof(scalar_t)); scalar_t *h_z = (scalar_t *) MALLOC(n*k*sizeof(scalar_t)); index_t *h_pospad = (index_t *) MALLOC((n-n0)*sizeof(index_t)); index_t *h_adjpad = (index_t *) MALLOC((b-b0)*sizeof(index_t)); /* Init & set up padding. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n; i++) h_vsum[i] = 0; init_shades(n, n0, k, num_shades, h_s, seed, h_z); randinits_scalar(h_y, b*k, seed); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n-n0; i++) h_pospad[i] = b0+i; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < b-b0; i++) h_adjpad[i] = 0; index_t *d_pos[d_use]; index_t *d_adj[d_use]; scalar_t *d_y[d_use]; scalar_t *d_z[d_use]; scalar_t *d_sum_out[d_use]; line_array_t *d_s[d_use]; scalar_t *d_vsum[d_use]; #ifdef GF_LOG_EXP_LOOKUP scalar_t *d_lookup_log[d_use]; scalar_t *d_lookup_exp[d_use]; gf_precompute_exp_log(); #endif index_t seg = LINE_SEGMENT_SIZE(k*n*g); /* Prepare input for each available device. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(int d = 0; d < d_use; d++) { cudaSetDevice(d_num[d]); /* Set up inputs and scratch space in device memory. */ CUDA_WRAP(cudaMalloc(&d_pos[d], n*sizeof(index_t))); CUDA_WRAP(cudaMalloc(&d_adj[d], b*sizeof(index_t))); CUDA_WRAP(cudaMalloc(&d_y[d], b*k*sizeof(scalar_t))); CUDA_WRAP(cudaMalloc(&d_z[d], n*k*sizeof(scalar_t))); CUDA_WRAP(cudaMalloc(&d_sum_out[d], sizeof(scalar_t))); CUDA_WRAP(cudaMalloc(&d_vsum[d], n*sizeof(scalar_t))); CUDA_WRAP(cudaMalloc(&d_s[d], LINE_ARRAY_SIZE(k*n*g))); /* Upload input to device. */ CUDA_WRAP(cudaMemcpy(d_pos[d], h_pos, n0*sizeof(index_t), cudaMemcpyHostToDevice)); CUDA_WRAP(cudaMemcpy(d_adj[d], h_adj, b0*sizeof(index_t), cudaMemcpyHostToDevice)); CUDA_WRAP(cudaMemcpy(d_y[d], h_y, b*k*sizeof(scalar_t), cudaMemcpyHostToDevice)); CUDA_WRAP(cudaMemcpy(d_z[d], h_z, n*k*sizeof(scalar_t), cudaMemcpyHostToDevice)); CUDA_WRAP(cudaMemcpy(d_pos[d] + n0, h_pospad, (n-n0)*sizeof(index_t), cudaMemcpyHostToDevice)); CUDA_WRAP(cudaMemcpy(d_adj[d] + b0, h_adjpad, (b-b0)*sizeof(index_t), cudaMemcpyHostToDevice)); #ifdef GF_LOG_EXP_LOOKUP CUDA_WRAP(cudaMalloc(&d_lookup_log[d], GF_LOG_LOOKUP_SIZE)); CUDA_WRAP(cudaMalloc(&d_lookup_exp[d], GF_EXP_LOOKUP_SIZE)); CUDA_WRAP(cudaMemcpy(d_lookup_log[d], h_lookup_log, GF_LOG_LOOKUP_SIZE, cudaMemcpyHostToDevice)); CUDA_WRAP(cudaMemcpy(d_lookup_exp[d], h_lookup_exp, GF_EXP_LOOKUP_SIZE, cudaMemcpyHostToDevice)); #endif } /* Free working space in host memory. */ FREE(h_y); FREE(h_z); FREE(h_pospad); FREE(h_adjpad); scalar_t master_sum; SCALAR_SET_ZERO(master_sum); /* Now run the work, in parallel on each device. * Use CPU-side multithreading for parallel launch. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(int d = 0; d < d_use; d++) { cudaSetDevice(d_num[d]); for(index_t out = d; out < outer; out = (out + d_use)) { driver_constrained_sieve_pre(n, k, gl, seg, g*out, dg, db, d_z[d], d_s[d]); line_array_t *d_g = driver_karb_genf(n, k, gl, b, seg, dg/VERTICES_PER_GENF_THREAD, db, d_pos[d], d_adj[d], d_y[d], d_s[d] #ifdef GF_LOG_EXP_LOOKUP , d_lookup_log[d], d_lookup_exp[d] #endif ); driver_line_psum(n, gl, seg, d_g + (k-1)*n*gl, d_g, d_vsum[d], out == d ? 1 : 0); } } for(int d = 0; d < d_use; d++) { cudaSetDevice(d_num[d]); CUDA_WRAP(cudaMemcpy(h_vsum, d_vsum[d], n*sizeof(scalar_t), cudaMemcpyDeviceToHost)); if(d == 0) { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n0; i++) master_vsum[i] = h_vsum[i]; } else { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t i = 0; i < n0; i++) master_vsum[i] ^= h_vsum[i]; } } for(int i = 0; i < n0; i++) REF_SCALAR_ADD(master_sum, master_sum, master_vsum[i]); /* Stop timing. */ cudaSetDevice(d_num[0]); CUDA_WRAP(cudaEventRecord(stop, 0)); CUDA_WRAP(cudaEventSynchronize(stop)); CUDA_WRAP(cudaEventElapsedTime(&time, start, stop)); /* All done, now print out some statistics. */ // total IR: 2*(k-1)*n+(k-1)*m (genf) // total SW: n*k (host init) // total SR: n*k (pre) // total SR: (k-1)*m (genf) // total LW: n*gl (pre) // total LR+LW: m*gl*k(k-1) + n*gl*(k-1) (genf) // total LR: n*gl (sum) // total SMUL: n0*k*k (host init) // total LMUL: m*gl*k(k-1)/2 + m*gl*(k-1) (genf) double line_rw_inner = (double) m*gl*k*(k-1) + n*gl*(k-1) + 2*n*gl; double line_mul_inner = (double) m*gl*k*(k-1)/2 + m*gl*(k-1); double line_rw_total = line_rw_inner*outer; double line_mul_total = line_mul_inner*outer; double bytes_rw_total = EFFECTIVE_BYTES_IN_LINE*line_rw_total; double scalar_mul_total = line_mul_total*SCALARS_IN_LINE; double rw_rate = bytes_rw_total / (time/1000.0); double mul_rate = scalar_mul_total / time; double total_instr_in_mul = LINE_MUL_INSTR*scalar_mul_total/SCALARS_IN_LINE; double instr_in_mul_rate = total_instr_in_mul / time; fprintf(stdout, "oracle: {dev:%ld:%ld} " SCALAR_FORMAT_STRING " %10.2fms [%6.3lfGiB %7.2lfGiB/s %7.2lfGHz %7.2fGHz] %ld %d", d_use, d_cnt, (scalar_t) master_sum, time, inGiB(LINE_ARRAY_SIZE(k*n*g)+ n*sizeof(index_t)+ b*sizeof(index_t)+ b*k*sizeof(scalar_t)+ n*k*sizeof(scalar_t)+ n*sizeof(scalar_t)+ sizeof(scalar_t)), rw_rate/((double)(1<<30)), mul_rate/((double)1e6), instr_in_mul_rate/((double) 1e6), gl, master_sum != 0); fflush(stdout); /* Free host memory. */ FREE(h_vsum); /* Free device memory. */ #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(int d = 0; d < d_use; d++) { cudaSetDevice(d_num[d]); CUDA_WRAP(cudaFree(d_pos[d])); CUDA_WRAP(cudaFree(d_adj[d])); CUDA_WRAP(cudaFree(d_y[d])); CUDA_WRAP(cudaFree(d_z[d])); CUDA_WRAP(cudaFree(d_s[d])); CUDA_WRAP(cudaFree(d_vsum[d])); CUDA_WRAP(cudaFree(d_sum_out[d])); #ifdef PRECOMPUTE_GF_2_8 CUDA_WRAP(cudaFree(d_lookup_log[d])); CUDA_WRAP(cudaFree(d_lookup_exp[d])); #endif } return master_sum != 0; } /***************************************************************** End CUDA. */ /************************************************ Rudimentary graph builder. */ typedef struct { index_t num_vertices; index_t num_edges; index_t edge_capacity; index_t *edges; index_t *colors; } graph_t; static index_t *enlarge(index_t m, index_t m_was, index_t *was) { assert(m >= 0 && m_was >= 0); index_t *a = (index_t *) MALLOC(sizeof(index_t)*m); index_t i; if(was != (void *) 0) { for(i = 0; i < m_was; i++) { a[i] = was[i]; } FREE(was); } return a; } graph_t *graph_alloc(index_t n) { assert(n >= 0); index_t i; graph_t *g = (graph_t *) MALLOC(sizeof(graph_t)); g->num_vertices = n; g->num_edges = 0; g->edge_capacity = 100; g->edges = enlarge(2*g->edge_capacity, 0, (index_t *) 0); g->colors = (index_t *) MALLOC(sizeof(index_t)*n); for(i = 0; i < n; i++) g->colors[i] = -1; return g; } void graph_free(graph_t *g) { FREE(g->edges); FREE(g->colors); FREE(g); } void graph_add_edge(graph_t *g, index_t u, index_t v) { assert(u >= 0 && v >= 0 && u < g->num_vertices && v < g->num_vertices); if(g->num_edges == g->edge_capacity) { g->edges = enlarge(4*g->edge_capacity, 2*g->edge_capacity, g->edges); g->edge_capacity *= 2; } assert(g->num_edges < g->edge_capacity); index_t *e = g->edges + 2*g->num_edges; g->num_edges++; e[0] = u; e[1] = v; } index_t *graph_edgebuf(graph_t *g, index_t cap) { g->edges = enlarge(2*g->edge_capacity+2*cap, 2*g->edge_capacity, g->edges); index_t *e = g->edges + 2*g->num_edges; g->edge_capacity += cap; g->num_edges += cap; return e; } void graph_set_color(graph_t *g, index_t u, index_t c) { assert(u >= 0 && u < g->num_vertices && c >= 0); g->colors[u] = c; } /************************************ Basic motif query processing routines. */ struct motifq_struct { index_t is_stub; index_t n; index_t k; index_t *pos; index_t *adj; index_t nl; index_t *l; index_t ns; shade_map_t *shade; scalar_t *vsum; }; typedef struct motifq_struct motifq_t; void adjsort(index_t n, index_t *pos, index_t *adj) { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) { index_t pu = pos[u]; index_t deg = adj[pu]; heapsort_indext(deg, adj + pu + 1); } } void motifq_free(motifq_t *q) { if(!q->is_stub) { FREE(q->pos); FREE(q->adj); FREE(q->l); FREE(q->shade); FREE(q->vsum); } FREE(q); } index_t motifq_execute(motifq_t *q) { if(q->is_stub) return 0; return oracle(q->n, q->k, q->pos, q->adj, q->ns, q->shade, irand(), q->vsum); } /************** Project a query by cutting out a given interval of vertices. */ index_t get_poscut(index_t n, index_t *pos, index_t *adj, index_t lo_v, index_t hi_v, index_t *poscut) { // Note: assumes the adjacency lists are sorted assert(lo_v <= hi_v); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < lo_v; u++) { index_t pu = pos[u]; index_t deg = adj[pu]; index_t cs, ce; index_t l = get_interval(deg, adj + pu + 1, lo_v, hi_v, &cs, &ce); poscut[u] = deg - l; } #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = hi_v+1; u < n; u++) { index_t pu = pos[u]; index_t deg = adj[pu]; index_t cs, ce; index_t l = get_interval(deg, adj + pu + 1, lo_v, hi_v, &cs, &ce); poscut[u-hi_v-1+lo_v] = deg - l; } index_t ncut = n - (hi_v-lo_v+1); index_t run = prefixsum(ncut, poscut, 1); return run; } motifq_t *motifq_cut(motifq_t *q, index_t lo_v, index_t hi_v) { // Note: assumes the adjacency lists are sorted index_t n = q->n; index_t *pos = q->pos; index_t *adj = q->adj; assert(0 <= lo_v && lo_v <= hi_v && hi_v < n); // Fast-forward a stub NO when the interval // [lo_v,hi_v] contains an element in q->l for(index_t i = 0; i < q->nl; i++) { if(q->l[i] >= lo_v && q->l[i] <= hi_v) { motifq_t *qs = (motifq_t *) MALLOC(sizeof(motifq_t)); qs->is_stub = 1; return qs; } } index_t ncut = n - (hi_v-lo_v+1); index_t *poscut = alloc_idxtab(ncut); index_t bcut = get_poscut(n, pos, adj, lo_v, hi_v, poscut); index_t *adjcut = alloc_idxtab(bcut); index_t gap = hi_v-lo_v+1; #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < ncut; v++) { index_t u = v; if(u >= lo_v) u += gap; index_t pu = pos[u]; index_t degu = adj[pu]; index_t cs, ce; index_t l = get_interval(degu, adj + pu + 1, lo_v, hi_v, &cs, &ce); index_t pv = poscut[v]; index_t degv = degu - l; adjcut[pv] = degv; // could parallelize this too for(index_t i = 0; i < cs; i++) adjcut[pv + 1 + i] = adj[pu + 1 + i]; // could parallelize this too for(index_t i = cs; i < degv; i++) adjcut[pv + 1 + i] = adj[pu + 1 + i + l] - gap; } motifq_t *qq = (motifq_t *) MALLOC(sizeof(motifq_t)); qq->is_stub = 0; qq->n = ncut; qq->k = q->k; qq->pos = poscut; qq->adj = adjcut; qq->nl = q->nl; qq->l = (index_t *) MALLOC(sizeof(index_t)*qq->nl); for(index_t i = 0; i < qq->nl; i++) { index_t u = q->l[i]; assert(u < lo_v || u > hi_v); if(u > hi_v) u -= gap; qq->l[i] = u; } qq->ns = q->ns; qq->shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*ncut); for(index_t v = 0; v < ncut; v++) { index_t u = v; if(u >= lo_v) u += gap; qq->shade[v] = q->shade[u]; } qq->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*qq->n); return qq; } /***************** Project a query with given projection & embedding arrays. */ #define PROJ_UNDEF 0xFFFFFFFFFFFFFFFFUL index_t get_posproj(index_t n, index_t *pos, index_t *adj, index_t nproj, index_t *proj, index_t *embed, index_t *posproj) { #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < nproj; v++) { index_t u = embed[v]; index_t pu = pos[u]; index_t deg = adj[pu]; index_t degproj = 0; for(index_t i = 0; i < deg; i++) { index_t w = proj[adj[pu + 1 + i]]; if(w != PROJ_UNDEF) degproj++; } posproj[v] = degproj; } index_t run = prefixsum(nproj, posproj, 1); return run; } motifq_t *motifq_project(motifq_t *q, index_t nproj, index_t *proj, index_t *embed, index_t nl, index_t *l) { index_t n = q->n; index_t *pos = q->pos; index_t *adj = q->adj; index_t *posproj = alloc_idxtab(nproj); index_t bproj = get_posproj(n, pos, adj, nproj, proj, embed, posproj); index_t *adjproj = alloc_idxtab(bproj); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < nproj; v++) { index_t pv = posproj[v]; index_t u = embed[v]; index_t pu = pos[u]; index_t deg = adj[pu]; index_t degproj = 0; for(index_t i = 0; i < deg; i++) { index_t w = proj[adj[pu + 1 + i]]; if(w != PROJ_UNDEF) adjproj[pv + 1 + degproj++] = w; } adjproj[pv] = degproj; } motifq_t *qq = (motifq_t *) MALLOC(sizeof(motifq_t)); qq->is_stub = 0; qq->n = nproj; qq->k = q->k; qq->pos = posproj; qq->adj = adjproj; // Now project the l array assert(q->nl == 0); // l array comes from lister qq->nl = nl; qq->l = (index_t *) MALLOC(sizeof(index_t)*nl); for(index_t i = 0; i < nl; i++) { index_t u = proj[l[i]]; assert(u != PROJ_UNDEF); // query is a trivial NO ! qq->l[i] = u; } // Next set up the projected shades qq->ns = q->ns; qq->shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*nproj); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) { index_t v = proj[u]; if(v != PROJ_UNDEF) qq->shade[v] = q->shade[u]; } // Reserve a unique shade to every vertex in l // while keeping the remaining shades available // Reserve shades first ... index_t *l_shade = (index_t *) MALLOC(sizeof(index_t)*nl); shade_map_t reserved_shades = 0; for(index_t i = 0; i < nl; i++) { index_t v = qq->l[i]; index_t j = 0; for(; j < qq->ns; j++) if(((qq->shade[v] >> j)&1) == 1 && ((reserved_shades >> j)&1) == 0) break; assert(j < qq->ns); reserved_shades |= 1UL << j; l_shade[i] = j; } // ... then clear all reserved shades in one pass #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t v = 0; v < nproj; v++) qq->shade[v] &= ~reserved_shades; // ... and finally set reserved shades for(index_t i = 0; i < nl; i++) { index_t v = qq->l[i]; qq->shade[v] = 1UL << l_shade[i]; } FREE(l_shade); qq->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*qq->n); return qq; } /*************************************************** The interval extractor. */ struct ivlist_struct { index_t start; index_t end; struct ivlist_struct *prev; struct ivlist_struct *next; }; typedef struct ivlist_struct ivlist_t; typedef struct ivext_struct { index_t n; index_t k; ivlist_t *queue; ivlist_t *active_queue_head; ivlist_t *spare_queue_head; ivlist_t *embed_list; } ivext_t; void ivext_enqueue_spare(ivext_t *e, ivlist_t *iv) { pnlinknext(e->spare_queue_head,iv); } void ivext_enqueue_active(ivext_t *e, ivlist_t *iv) { pnlinkprev(e->active_queue_head,iv); } ivlist_t *ivext_dequeue_first_nonsingleton(ivext_t *e) { ivlist_t *iv = e->active_queue_head->next; for(; iv != e->active_queue_head; iv = iv->next) if(iv->end - iv->start + 1 > 1) break; assert(iv != e->active_queue_head); pnunlink(iv); return iv; } ivlist_t *ivext_get_spare(ivext_t *e) { assert(e->spare_queue_head->next != e->spare_queue_head); ivlist_t *iv = e->spare_queue_head->next; pnunlink(iv); return iv; } void ivext_reset(ivext_t *e) { e->active_queue_head = e->queue + 0; e->spare_queue_head = e->queue + 1; e->active_queue_head->next = e->active_queue_head; e->active_queue_head->prev = e->active_queue_head; e->spare_queue_head->prev = e->spare_queue_head; e->spare_queue_head->next = e->spare_queue_head; e->embed_list = (ivlist_t *) 0; for(index_t i = 0; i < e->k + 2; i++) ivext_enqueue_spare(e, e->queue + 2 + i); // rot-safe ivlist_t *iv = ivext_get_spare(e); iv->start = 0; iv->end = e->n-1; ivext_enqueue_active(e, iv); } ivext_t *ivext_alloc(index_t n, index_t k) { ivext_t *e = (ivext_t *) MALLOC(sizeof(ivext_t)); e->n = n; e->k = k; e->queue = (ivlist_t *) MALLOC(sizeof(ivlist_t)*(k+4)); // rot-safe ivext_reset(e); return e; } void ivext_free(ivext_t *e) { ivlist_t *el = e->embed_list; while(el != (ivlist_t *) 0) { ivlist_t *temp = el; el = el->next; FREE(temp); } FREE(e->queue); FREE(e); } void ivext_project(ivext_t *e, ivlist_t *iv) { for(ivlist_t *z = e->active_queue_head->next; z != e->active_queue_head; z = z->next) { assert(z->end < iv->start || z->start > iv->end); if(z->start > iv->end) { z->start -= iv->end-iv->start+1; z->end -= iv->end-iv->start+1; } } ivlist_t *em = (ivlist_t *) MALLOC(sizeof(ivlist_t)); em->start = iv->start; em->end = iv->end; em->next = e->embed_list; e->embed_list = em; } index_t ivext_embed(ivext_t *e, index_t u) { ivlist_t *el = e->embed_list; while(el != (ivlist_t *) 0) { if(u >= el->start) u += el->end - el->start + 1; el = el->next; } return u; } ivlist_t *ivext_halve(ivext_t *e, ivlist_t *iv) { assert(iv->end - iv->start + 1 >= 2); index_t mid = (iv->start + iv->end)/2; // mid < iv->end ivlist_t *h = ivext_get_spare(e); h->start = iv->start; h->end = mid; iv->start = mid+1; return h; } index_t ivext_queue_size(ivext_t *e) { index_t s = 0; for(ivlist_t *iv = e->active_queue_head->next; iv != e->active_queue_head; iv = iv->next) s += iv->end-iv->start+1; return s; } index_t ivext_num_active_intervals(ivext_t *e) { index_t s = 0; for(ivlist_t *iv = e->active_queue_head->next; iv != e->active_queue_head; iv = iv->next) s++; return s; } void ivext_queue_print(FILE *out, ivext_t *e, index_t rot) { index_t j = 0; char x[16384]; char y[16384]; y[0] = '\0'; sprintf(x, "%c%12ld [", rot == 0 ? ' ' : 'R', ivext_queue_size(e)); strcat(y, x); for(ivlist_t *iv = e->active_queue_head->next; iv != e->active_queue_head; iv = iv->next) { assert(iv->start <= iv->end); if(iv->start < iv->end) sprintf(x, "%s[%ld:%ld]", j++ == 0 ? "" : ",", ivext_embed(e, iv->start), ivext_embed(e, iv->end)); else sprintf(x, "%s[%ld]", j++ == 0 ? "[" : ",", ivext_embed(e, iv->start)); strcat(y, x); } strcat(y, "] "); fprintf(out, "%-120s", y); fflush(out); } index_t extract_match(index_t is_root, motifq_t *query, index_t *match) { // Assumes adjancency lists of query are sorted. fprintf(stdout, "extract: %ld %ld %ld\n", query->n, query->k, query->nl); push_time(); assert(query->k <= query->n); ivext_t *e = ivext_alloc(query->n, query->k); ivext_queue_print(stdout, e, 0); if(!motifq_execute(query)) { fprintf(stdout, " -- false\n"); ivext_free(e); if(!is_root) motifq_free(query); double time = pop_time(); fprintf(stdout, "extract done [%.2lf ms]\n", time); return 0; } fprintf(stdout, " -- true\n"); while(ivext_queue_size(e) > e->k) { ivlist_t *iv = ivext_dequeue_first_nonsingleton(e); ivlist_t *h = ivext_halve(e, iv); ivext_enqueue_active(e, iv); motifq_t *qq = motifq_cut(query, h->start, h->end); ivext_queue_print(stdout, e, 0); if(motifq_execute(qq)) { fprintf(stdout, " -- true\n"); if(!is_root) motifq_free(query); query = qq; is_root = 0; ivext_project(e, h); ivext_enqueue_spare(e, h); } else { fprintf(stdout, " -- false\n"); motifq_free(qq); pnunlink(iv); ivext_enqueue_active(e, h); qq = motifq_cut(query, iv->start, iv->end); ivext_queue_print(stdout, e, 0); if(motifq_execute(qq)) { fprintf(stdout, " -- true\n"); if(!is_root) motifq_free(query); query = qq; is_root = 0; ivext_project(e, iv); ivext_enqueue_spare(e, iv); } else { fprintf(stdout, " -- false\n"); motifq_free(qq); ivext_enqueue_active(e, iv); while(ivext_num_active_intervals(e) > e->k) { // Rotate queue until outlier is out ... ivlist_t *iv = e->active_queue_head->next; pnunlink(iv); qq = motifq_cut(query, iv->start, iv->end); ivext_queue_print(stdout, e, 1); if(motifq_execute(qq)) { fprintf(stdout, " -- true\n"); if(!is_root) motifq_free(query); query = qq; is_root = 0; ivext_project(e, iv); ivext_enqueue_spare(e, iv); } else { fprintf(stdout, " -- false\n"); motifq_free(qq); ivext_enqueue_active(e, iv); } } } } } for(index_t i = 0; i < query->k; i++) match[i] = ivext_embed(e, i); ivext_free(e); if(!is_root) motifq_free(query); double time = pop_time(); fprintf(stdout, "extract done [%.2lf ms]\n", time); return 1; } /*************************************************************** The lister. */ #define M_QUERY 0 #define M_OPEN 1 #define M_CLOSE 2 #define M_REWIND_U 3 #define M_REWIND_L 4 index_t command_mnemonic(index_t command) { return command >> 60; } index_t command_index(index_t command) { return command & (~(0xFFUL<<60)); } index_t to_command_idx(index_t mnemonic, index_t idx) { assert(idx < (1UL << 60)); return (mnemonic << 60)|idx; } index_t to_command(index_t mnemonic) { return to_command_idx(mnemonic, 0UL); } typedef struct { index_t n; // number of elements in universe index_t k; // size of the sets to be listed index_t *u; // upper bound as a bitmap index_t u_size; // size of upper bound index_t *l; // lower bound index_t l_size; // size of lower bound index_t *stack; // a stack for maintaining state index_t stack_capacity; // ... the capacity of the stack index_t top; // index of stack top motifq_t *root; // the root query } lister_t; void lister_push(lister_t *t, index_t word) { assert(t->top + 1 < t->stack_capacity); t->stack[++t->top] = word; } index_t lister_pop(lister_t *t) { return t->stack[t->top--]; } index_t lister_have_work(lister_t *t) { return t->top >= 0; } index_t lister_in_l(lister_t *t, index_t j) { for(index_t i = 0; i < t->l_size; i++) if(t->l[i] == j) return 1; return 0; } void lister_push_l(lister_t *t, index_t j) { assert(!lister_in_l(t, j) && t->l_size < t->k); t->l[t->l_size++] = j; } void lister_pop_l(lister_t *t) { assert(t->l_size > 0); t->l_size--; } void lister_reset(lister_t *t) { t->l_size = 0; t->top = -1; lister_push(t, to_command(M_QUERY)); for(index_t i = 0; i < t->n; i++) bitset(t->u, i, 1); t->u_size = t->n; } lister_t *lister_alloc(index_t n, index_t k, motifq_t *root) { assert(n >= 1 && n < (1UL << 60) && k >= 1 && k <= n); lister_t *t = (lister_t *) MALLOC(sizeof(lister_t)); t->n = n; t->k = k; t->u = alloc_idxtab((n+63)/64); t->l = alloc_idxtab(k); t->stack_capacity = n + k*(k+1+2*k) + 1; t->stack = alloc_idxtab(t->stack_capacity); lister_reset(t); t->root = root; if(t->root != (motifq_t *) 0) { assert(t->root->n == t->n); assert(t->root->k == t->k); assert(t->root->nl == 0); } return t; } void lister_free(lister_t *t) { if(t->root != (motifq_t *) 0) motifq_free(t->root); FREE(t->u); FREE(t->l); FREE(t->stack); FREE(t); } void lister_get_proj_embed(lister_t *t, index_t **proj_out, index_t **embed_out) { index_t n = t->n; index_t usize = t->u_size; index_t *embed = (index_t *) MALLOC(sizeof(index_t)*usize); index_t *proj = (index_t *) MALLOC(sizeof(index_t)*n); // could parallelize this (needs parallel prefix sum) index_t run = 0; for(index_t i = 0; i < n; i++) { if(bitget(t->u, i)) { proj[i] = run; embed[run] = i; run++; } else { proj[i] = PROJ_UNDEF; } } assert(run == usize); *proj_out = proj; *embed_out = embed; } void lister_query_setup(lister_t *t, motifq_t **q_out, index_t **embed_out) { index_t *proj; index_t *embed; // set up the projection with u and l lister_get_proj_embed(t, &proj, &embed); motifq_t *qq = motifq_project(t->root, t->u_size, proj, embed, t->l_size, t->l); FREE(proj); *q_out = qq; *embed_out = embed; } index_t lister_extract(lister_t *t, index_t *s) { // assumes t->u contains all elements of t->l // (otherwise query is trivial no) assert(t->root != (motifq_t *) 0); if(t->u_size == t->n) { // rush the root query without setting up a copy return extract_match(1, t->root, s); } else { // a first order of business is to set up the query // based on the current t->l and t->u; this includes // also setting up the embedding back to the root, // in case we are lucky and actually discover a match motifq_t *qq; // will be released by extractor index_t *embed; lister_query_setup(t, &qq, &embed); // now execute the interval extractor ... index_t got_match = extract_match(0, qq, s); // ... and embed the match (if any) if(got_match) { for(index_t i = 0; i < t->k; i++) s[i] = embed[s[i]]; } FREE(embed); return got_match; } } index_t lister_run(lister_t *t, index_t *s) { while(lister_have_work(t)) { index_t cmd = lister_pop(t); index_t mnem = command_mnemonic(cmd); index_t idx = command_index(cmd); switch(mnem) { case M_QUERY: if(t->k <= t->u_size && lister_extract(t, s)) { // we have discovered a match, which we need to // put on the stack to continue work when the user // requests this for(index_t i = 0; i < t->k; i++) lister_push(t, s[i]); lister_push(t, to_command_idx(M_OPEN, t->k-1)); // now report our discovery to user return 1; } break; case M_OPEN: { index_t *x = t->stack + t->top - t->k + 1; index_t k = 0; for(; k < idx; k++) if(!lister_in_l(t, x[k])) break; if(k == idx) { // opening on last element of x not in l // so we can dispense with x as long as we remember to // insert x[idx] back to u when rewinding for(index_t j = 0; j < t->k; j++) lister_pop(t); // axe x from stack if(!lister_in_l(t, x[idx])) { bitset(t->u, x[idx], 0); // remove x[idx] from u t->u_size--; lister_push(t, to_command_idx(M_REWIND_U, x[idx])); lister_push(t, to_command(M_QUERY)); } } else { // have still other elements of x that we need to // open on, so must keep x in stack // -- // invariant that controls stack size: // each open increases l by at least one lister_push(t, to_command_idx(M_CLOSE, idx)); if(!lister_in_l(t, x[idx])) { bitset(t->u, x[idx], 0); // remove x[idx] from u t->u_size--; lister_push(t, to_command_idx(M_REWIND_U, x[idx])); // force x[0],x[1],...,x[idx-1] to l index_t j = 0; for(; j < idx; j++) { if(!lister_in_l(t, x[j])) { if(t->l_size >= t->k) break; lister_push_l(t, x[j]); lister_push(t, to_command_idx(M_REWIND_L, x[j])); } } if(j == idx) lister_push(t, to_command(M_QUERY)); } } } break; case M_CLOSE: assert(idx > 0); lister_push(t, to_command_idx(M_OPEN, idx-1)); break; case M_REWIND_U: bitset(t->u, idx, 1); t->u_size++; break; case M_REWIND_L: lister_pop_l(t); break; } } lister_push(t, to_command(M_QUERY)); return 0; } /******************************************************* Root query builder. */ motifq_t *root_build(graph_t *g, index_t k, index_t *kk) { push_memtrack(); index_t n = g->num_vertices; index_t m = 2*g->num_edges; index_t *pos = alloc_idxtab(n); index_t *adj = alloc_idxtab(n+m); index_t ns = k; shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n); motifq_t *root = (motifq_t *) MALLOC(sizeof(motifq_t)); root->is_stub = 0; root->n = g->num_vertices; root->k = k; root->pos = pos; root->adj = adj; root->nl = 0; root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl); root->ns = ns; root->shade = shade; root->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*root->n); push_time(); fprintf(stdout, "root build ... "); fflush(stdout); push_time(); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) pos[u] = 0; double time = pop_time(); fprintf(stdout, "[zero: %.2lf ms] ", time); fflush(stdout); push_time(); index_t *e = g->edges; #ifdef BUILD_PARALLEL // Parallel occurrence count // -- each thread is responsible for a group of bins, // all threads scan the entire list of edges index_t nt = num_threads(); index_t block_size = n/nt; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? n-1 : (start+block_size-1); for(index_t j = 0; j < m; j++) { index_t u = e[j]; if(start <= u && u <= stop) pos[u]++; // I am responsible for u, record adjacency to u } } #else for(index_t j = 0; j < m; j++) pos[e[j]]++; #endif index_t run = prefixsum(n, pos, 1); assert(run == n+m); time = pop_time(); fprintf(stdout, "[pos: %.2lf ms] ", time); fflush(stdout); push_time(); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) adj[pos[u]] = 0; e = g->edges; #ifdef BUILD_PARALLEL // Parallel aggregation to bins // -- each thread is responsible for a group of bins, // all threads scan the entire list of edges nt = num_threads(); block_size = n/nt; #pragma omp parallel for for(index_t t = 0; t < nt; t++) { index_t start = t*block_size; index_t stop = (t == nt-1) ? n-1 : (start+block_size-1); for(index_t j = 0; j < m; j+=2) { index_t u0 = e[j+0]; index_t u1 = e[j+1]; if(start <= u0 && u0 <= stop) { // I am responsible for u0, record adjacency to u1 index_t pu0 = pos[u0]; adj[pu0 + 1 + adj[pu0]++] = u1; } if(start <= u1 && u1 <= stop) { // I am responsible for u1, record adjacency to u0 index_t pu1 = pos[u1]; adj[pu1 + 1 + adj[pu1]++] = u0; } } } #else for(index_t j = 0; j < m; j+=2) { index_t u0 = e[j+0]; index_t u1 = e[j+1]; index_t p0 = pos[u0]; index_t p1 = pos[u1]; adj[p1 + 1 + adj[p1]++] = u0; adj[p0 + 1 + adj[p0]++] = u1; } #endif time = pop_time(); fprintf(stdout, "[adj: %.2lf ms] ", time); fflush(stdout); push_time(); adjsort(n, pos, adj); time = pop_time(); fprintf(stdout, "[adjsort: %.2lf ms] ", time); fflush(stdout); push_time(); #ifdef BUILD_PARALLEL #pragma omp parallel for #endif for(index_t u = 0; u < n; u++) { shade_map_t s = 0; for(index_t j = 0; j < k; j++) if(g->colors[u] == kk[j]) s |= 1UL << j; shade[u] = s; } time = pop_time(); fprintf(stdout, "[shade: %.2lf ms] ", time); fflush(stdout); time = pop_time(); fprintf(stdout, "done. [%.2lf ms] ", time); print_pop_memtrack(); fprintf(stdout, " "); print_current_mem(); fprintf(stdout, "\n"); fflush(stdout); return root; } /***************************************************** Input reader (ASCII). */ void skipws(FILE *in) { int c; do { c = fgetc(in); if(c == '#') { do { c = fgetc(in); } while(c != EOF && c != '\n'); } } while(c != EOF && isspace(c)); if(c != EOF) ungetc(c, in); } #define CMD_NOP 0 #define CMD_TEST_UNIQUE 1 #define CMD_TEST_COUNT 2 #define CMD_RUN_ORACLE 3 #define CMD_LIST_FIRST 4 #define CMD_LIST_ALL 5 const char *cmd_legend[] = { "no operation", "test unique", "test count", "run oracle", "list first", "list all" }; void reader_ascii(FILE *in, graph_t **g_out, index_t *k_out, index_t **kk_out, index_t *cmd_out, index_t **cmd_args_out) { push_time(); push_memtrack(); index_t n = 0; index_t m = 0; graph_t *g = (graph_t *) 0; index_t i, j, d, k; index_t *kk = (index_t *) 0; index_t cmd = CMD_NOP; index_t *cmd_args = (index_t *) 0; skipws(in); while(!feof(in)) { skipws(in); int c = fgetc(in); switch(c) { case 'p': if(g != (graph_t *) 0) ERROR("duplicate parameter line"); skipws(in); if(fscanf(in, "motif %ld %ld", &n, &m) != 2) ERROR("invalid parameter line"); if(n <= 0 || m < 0) ERROR("invalid input parameters (n = %ld, m = %ld)", n, m); g = graph_alloc(n); break; case 'e': if(g == (graph_t *) 0) ERROR("parameter line must be given before edges"); skipws(in); if(fscanf(in, "%ld %ld", &i, &j) != 2) ERROR("invalid edge line"); if(i < 1 || i > n || j < 1 || j > n) ERROR("invalid edge (i = %ld, j = %ld with n = %ld)", i, j, n); graph_add_edge(g, i-1, j-1); break; case 'n': if(g == (graph_t *) 0) ERROR("parameter line must be given before vertex colors"); skipws(in); if(fscanf(in, "%ld %ld", &i, &d) != 2) ERROR("invalid color line"); if(i < 1 || i > n || d < 1) ERROR("invalid color line (i = %ld, d = %ld with n = %ld)", i, d, n); graph_set_color(g, i-1, d-1); break; case 'k': if(g == (graph_t *) 0) ERROR("parameter line must be given before motif"); skipws(in); if(fscanf(in, "%ld", &k) != 1) ERROR("invalid motif line"); if(k < 1 || k > n) ERROR("invalid motif line (k = %ld with n = %d)", k, n); kk = alloc_idxtab(k); for(index_t u = 0; u < k; u++) { skipws(in); if(fscanf(in, "%ld", &i) != 1) ERROR("error parsing motif line"); if(i < 1) ERROR("invalid color on motif line (i = %ld)", i); kk[u] = i-1; } break; case 't': if(g == (graph_t *) 0 || kk == (index_t *) 0) ERROR("parameter and motif lines must be given before test"); skipws(in); { char cmdstr[128]; if(fscanf(in, "%100s", cmdstr) != 1) ERROR("invalid test command"); if(!strcmp(cmdstr, "unique")) { cmd_args = alloc_idxtab(k); for(index_t u = 0; u < k; u++) { skipws(in); if(fscanf(in, "%ld", &i) != 1) ERROR("error parsing test line"); if(i < 1 || i > n) ERROR("invalid test line entry (i = %ld)", i); cmd_args[u] = i-1; } heapsort_indext(k, cmd_args); for(index_t u = 1; u < k; u++) if(cmd_args[u-1] >= cmd_args[u]) ERROR("test line contains duplicate entries"); cmd = CMD_TEST_UNIQUE; } else { if(!strcmp(cmdstr, "count")) { cmd_args = alloc_idxtab(1); skipws(in); if(fscanf(in, "%ld", &i) != 1) ERROR("error parsing test line"); if(i < 0) ERROR("count on test line cannot be negative"); cmd = CMD_TEST_COUNT; cmd_args[0] = i; } else { ERROR("unrecognized test command \"%s\"", cmdstr); } } } break; case EOF: break; default: ERROR("parse error"); } } if(g == (graph_t *) 0) ERROR("no graph given in input"); if(kk == (index_t *) 0) ERROR("no motif given in input"); for(index_t i = 0; i < n; i++) { if(g->colors[i] == -1) ERROR("no color assigned to vertex i = %ld", i); } double time = pop_time(); fprintf(stdout, "input: n = %ld, m = %ld, k = %ld [%.2lf ms] ", g->num_vertices, g->num_edges, k, time); print_pop_memtrack(); fprintf(stdout, " "); print_current_mem(); fprintf(stdout, "\n"); *g_out = g; *k_out = k; *kk_out = kk; *cmd_out = cmd; *cmd_args_out = cmd_args; } /**************************************************** Input reader (binary). */ #define BIN_MAGIC 0x1234567890ABCDEFUL void reader_bin(FILE *in, graph_t **g_out, index_t *k_out, index_t **kk_out, index_t *cmd_out, index_t **cmd_args_out) { push_time(); push_memtrack(); index_t magic = 0; index_t n = 0; index_t m = 0; graph_t *g = (graph_t *) 0; index_t k = 0; index_t has_target = 0; index_t *kk = (index_t *) 0; index_t cmd = CMD_NOP; index_t *cmd_args = (index_t *) 0; if(fread(&magic, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); if(magic != BIN_MAGIC) ERROR("error reading input"); if(fread(&n, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); if(fread(&m, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); assert(n >= 0 && m >= 0 && m%2 == 0); g = graph_alloc(n); index_t *e = graph_edgebuf(g, m/2); if(fread(e, sizeof(index_t), m, in) != m) ERROR("error reading input"); if(fread(g->colors, sizeof(index_t), n, in) != n) ERROR("error reading input"); if(fread(&has_target, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); assert(has_target == 0 || has_target == 1); if(has_target) { if(fread(&k, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); assert(k >= 0); kk = alloc_idxtab(k); if(fread(kk, sizeof(index_t), k, in) != k) ERROR("error reading input"); if(fread(&cmd, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); switch(cmd) { case CMD_NOP: break; case CMD_TEST_UNIQUE: cmd_args = alloc_idxtab(k); if(fread(cmd_args, sizeof(index_t), k, in) != k) ERROR("error reading input"); shellsort(k, cmd_args); break; case CMD_TEST_COUNT: cmd_args = alloc_idxtab(1); if(fread(cmd_args, sizeof(index_t), 1UL, in) != 1UL) ERROR("error reading input"); break; default: ERROR("invalid command in binary input stream"); break; } } double time = pop_time(); fprintf(stdout, "input: n = %ld, m = %ld, k = %ld [%.2lf ms] ", g->num_vertices, g->num_edges, k, time); print_pop_memtrack(); fprintf(stdout, " "); print_current_mem(); fprintf(stdout, "\n"); *g_out = g; *k_out = k; *kk_out = kk; *cmd_out = cmd; *cmd_args_out = cmd_args; } /****************************************************** Program entry point. */ int main(int argc, char **argv) { push_time(); push_memtrack(); index_t arg_cmd = CMD_NOP; index_t have_seed = 0; index_t seed = 123456789; for(index_t f = 1; f < argc; f++) { if(argv[f][0] == '-') { if(!strcmp(argv[f], "-bin")) { flag_bin_input = 1; } if(!strcmp(argv[f], "-ascii")) { flag_bin_input = 0; } if(!strcmp(argv[f], "-oracle")) { arg_cmd = CMD_RUN_ORACLE; } if(!strcmp(argv[f], "-first")) { arg_cmd = CMD_LIST_FIRST; } if(!strcmp(argv[f], "-all")) { arg_cmd = CMD_LIST_ALL; } if(!strcmp(argv[f], "-seed")) { if(f == argc - 1) ERROR("random seed missing from command line"); seed = atol(argv[++f]); have_seed = 1; } if(!strcmp(argv[f], "-devices")) { if(f == argc - 1) ERROR("number of devices missing from command line"); devices = atol(argv[++f]); have_devices = 1; } } } fprintf(stdout, "invoked as:"); for(index_t f = 0; f < argc; f++) fprintf(stdout, " %s", argv[f]); fprintf(stdout, "\n"); if(have_seed == 0) { fprintf(stdout, "no random seed given, defaulting to %ld\n", seed); } fprintf(stdout, "random seed = %ld\n", seed); srand(seed); graph_t *g; index_t k; index_t *kk; index_t input_cmd; index_t *cmd_args; if(flag_bin_input) { reader_bin(stdin, &g, &k, &kk, &input_cmd, &cmd_args); } else { reader_ascii(stdin, &g, &k, &kk, &input_cmd, &cmd_args); } index_t cmd = input_cmd; // by default execute command in input stream if(arg_cmd != CMD_NOP) cmd = arg_cmd; // override command in input stream motifq_t *root = root_build(g, k, kk); graph_free(g); FREE(kk); fprintf(stdout, "command: %s\n", cmd_legend[cmd]); fflush(stdout); push_time(); switch(cmd) { case CMD_NOP: motifq_free(root); break; case CMD_TEST_UNIQUE: { index_t n = root->n; index_t k = root->k; lister_t *t = lister_alloc(n, k, root); index_t *get = alloc_idxtab(k); index_t ct = 0; while(lister_run(t, get)) { assert(ct == 0); fprintf(stdout, "found %ld: ", ct); for(index_t i = 0; i < k; i++) fprintf(stdout, "%ld%s", get[i], i == k-1 ? "\n" : " "); for(index_t l = 0; l < k; l++) assert(get[l] == cmd_args[l]); ct++; } assert(ct == 1); FREE(get); lister_free(t); } break; case CMD_LIST_FIRST: case CMD_LIST_ALL: case CMD_TEST_COUNT: { index_t n = root->n; index_t k = root->k; lister_t *t = lister_alloc(n, k, root); index_t *get = alloc_idxtab(k); index_t ct = 0; while(lister_run(t, get)) { fprintf(stdout, "found %ld: ", ct); for(index_t i = 0; i < k; i++) fprintf(stdout, "%ld%s", get[i], i == k-1 ? "\n" : " "); ct++; if(cmd == CMD_LIST_FIRST) break; } if(cmd == CMD_TEST_COUNT) { fprintf(stdout, "count = %ld, target = %ld\n", ct, cmd_args[0]); assert(ct == cmd_args[0]); } FREE(get); lister_free(t); } break; case CMD_RUN_ORACLE: if(motifq_execute(root)) { index_t support_size = 0; assert(!root->is_stub); scalar_t *master_vsum = root->vsum; for(index_t i = 0; i < root->n; i++) { if(master_vsum[i] != 0) { support_size++; } } fprintf(stdout, " -- true [%ld]\n", support_size); } else { fprintf(stdout, " -- false [0]\n"); } motifq_free(root); break; default: assert(0); break; } double time = pop_time(); fprintf(stdout, "command done [%.2lf ms]\n", time); if(input_cmd != CMD_NOP) FREE(cmd_args); time = pop_time(); fprintf(stdout, "grand total [%.2lf ms] ", time); print_pop_memtrack(); fprintf(stdout, "\n"); fprintf(stdout, "host: %s\n", sysdep_hostname()); fprintf(stdout, "build: %s\n", LINE_TYPE); fprintf(stdout, "compiler: gcc %d.%d.%d\n", __GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__); fflush(stdout); assert(malloc_balance == 0); assert(memtrack_stack_top < 0); return 0; }
10e2312463b3c6954a2c8a0ceb35e461c7f311fd.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/hip/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; for (const auto& t: input) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() { multi_tensor_apply<4>(tensor_lists, PointwiseOpFunctor<scalar_t, Op>(), scalar.to<scalar_t>()); }); return tensor_lists[3]; } template<template<class> class Op> void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() { multi_tensor_apply<3>(tensor_lists, PointwiseOpFunctor_<scalar_t, Op>(), scalar.to<scalar_t>()); }); } #define FOREACH_UNARY_OP(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \ TORCH_CHECK(input.size() > 0, "Tensor list must have at least one tensor."); \ TORCH_CHECK(input.size() == tensors1.size(), "Tensor lists must be of the same length."); \ TORCH_CHECK(tensors1.size() == tensors2.size(), "Tensor lists must be of the same length."); \ \ if (!can_use_fast_route(input, scalar) || \ !can_use_fast_route(tensors1, tensors2) || \ !can_use_fast_route(input, tensors1)) { \ return at::native::foreach_tensor_##NAME##_slow(input, tensors1, tensors2, scalar); \ } \ \ return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalar); \ } \ \ void foreach_tensor_##NAME##_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \ TORCH_CHECK(input.size() > 0, "Tensor list must have at least one tensor."); \ TORCH_CHECK(input.size() == tensors1.size(), "Tensor lists must be of the same length."); \ TORCH_CHECK(tensors1.size() == tensors2.size(), "Tensor lists must be of the same length."); \ \ if (!can_use_fast_route(input, scalar) || \ !can_use_fast_route(tensors1, tensors2) || \ !can_use_fast_route(input, tensors1)) { \ return at::native::foreach_tensor_##NAME##_slow_(input, tensors1, tensors2, scalar); \ } \ \ foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalar); \ } FOREACH_UNARY_OP(addcmul, std::multiplies); FOREACH_UNARY_OP(addcdiv, std::divides); }} // namespace at::native
10e2312463b3c6954a2c8a0ceb35e461c7f311fd.cu
#include <ATen/Dispatch.h> #include <ATen/native/ForeachUtils.h> #include <ATen/native/cuda/ForeachFunctors.cuh> namespace at { namespace native { template<template<class> class Op> std::vector<Tensor> foreach_pointwise_op(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; std::vector<at::Tensor> vec_res; for (const auto& t: input) { vec_res.emplace_back(at::native::empty_like(t)); } tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); tensor_lists.emplace_back(std::move(vec_res)); AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op_cuda", [&]() { multi_tensor_apply<4>(tensor_lists, PointwiseOpFunctor<scalar_t, Op>(), scalar.to<scalar_t>()); }); return tensor_lists[3]; } template<template<class> class Op> void foreach_pointwise_op_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { std::vector<std::vector<at::Tensor>> tensor_lists; tensor_lists.emplace_back(input.vec()); tensor_lists.emplace_back(tensors1.vec()); tensor_lists.emplace_back(tensors2.vec()); AT_DISPATCH_ALL_TYPES_AND(kHalf, input[0].scalar_type(), "foreach_pointwise_op__cuda", [&]() { multi_tensor_apply<3>(tensor_lists, PointwiseOpFunctor_<scalar_t, Op>(), scalar.to<scalar_t>()); }); } #define FOREACH_UNARY_OP(NAME, OP) \ std::vector<Tensor> foreach_tensor_##NAME##_cuda(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \ TORCH_CHECK(input.size() > 0, "Tensor list must have at least one tensor."); \ TORCH_CHECK(input.size() == tensors1.size(), "Tensor lists must be of the same length."); \ TORCH_CHECK(tensors1.size() == tensors2.size(), "Tensor lists must be of the same length."); \ \ if (!can_use_fast_route(input, scalar) || \ !can_use_fast_route(tensors1, tensors2) || \ !can_use_fast_route(input, tensors1)) { \ return at::native::foreach_tensor_##NAME##_slow(input, tensors1, tensors2, scalar); \ } \ \ return foreach_pointwise_op<OP>(input, tensors1, tensors2, scalar); \ } \ \ void foreach_tensor_##NAME##_cuda_(TensorList input, TensorList tensors1, TensorList tensors2, Scalar scalar) { \ TORCH_CHECK(input.size() > 0, "Tensor list must have at least one tensor."); \ TORCH_CHECK(input.size() == tensors1.size(), "Tensor lists must be of the same length."); \ TORCH_CHECK(tensors1.size() == tensors2.size(), "Tensor lists must be of the same length."); \ \ if (!can_use_fast_route(input, scalar) || \ !can_use_fast_route(tensors1, tensors2) || \ !can_use_fast_route(input, tensors1)) { \ return at::native::foreach_tensor_##NAME##_slow_(input, tensors1, tensors2, scalar); \ } \ \ foreach_pointwise_op_<OP>(input, tensors1, tensors2, scalar); \ } FOREACH_UNARY_OP(addcmul, std::multiplies); FOREACH_UNARY_OP(addcdiv, std::divides); }} // namespace at::native
f1ae7e5d61de0de7c803a98bdbd40c65fbdddcce.hip
// !!! This is a file automatically generated by hipify!!! //========================================= // Kernel code for computing Wilson loops on the device //========================================= #include <builtin_types.h> #include <hip/hip_runtime.h> #include <stdio.h> #include "intT.h" #include <math.h> extern "C" void EVMean(double *EV, float4 *Wsscal_h, float4 *Wsferm_h, int n, int *WL, double T, int fermion); #if profile == fixflux #define THREADS_PER_BLOCK 128 #define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK #define MY_KERNEL_MIN_BLOCKS 4 #else #define THREADS_PER_BLOCK 256 #if __CUDA_ARCH__ >= 200 #define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK) #define MY_KERNEL_MIN_BLOCKS 3 #else #define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK) #define MY_KERNEL_MIN_BLOCKS 2 #endif #endif extern "C" __device__ float expint(const float x) //Evaluates the exponential integral Ei(x)=-E1(-x) assuming x<-1. //This algorithm is an abbreviated version of Numerical Recipes expint(). //See Chapter 6 on Special Functions. We assume (x > 1.0) and include only //the relevant code. This bit of code is Lentz's algorithm (section 5.2 of NR). { const int MAXIT = 400; const float EPS = 1.0e-6; const float BIG = 1.0e10; int i; float a, b, c, d, del, h, ans; b = -x + 1.0f; c = BIG; d = 1.0f/b; h = d; for (i = 1; i <= MAXIT; i++) { a = -(float)i*(float)i; b += 2.0f; d = 1.0f/(a*d+b); //Denominators cannot be zero c = b + a/c; del = c*d; h *= del; if (fabsf(del-1.0f) <= EPS) { ans = -h*__expf(x); return ans; } } return 0.0f; } extern "C" __host__ __device__ float interp(float rho2, float *rho2sp, float4 *coefs) //interpolation function for periodic spline profile { int j; int upperi = Nspline-1, loweri=0; float rho2diff = 0.0f; float flambda; #ifndef __CUDA_ARCH__ #warning __CUDA_ARCH__ Undefined! //printf("Printf error\n"); #else #warning __CUDA_ARCH__ defined! #endif //Discover which interval to look in using a binary search if(rho2 < rho2sp[Nspline-1] && rho2 > rho2sp[0]) // if(0) { while(upperi-loweri > 1) { if(rho2 >= rho2sp[(upperi+loweri)/2]) loweri=(upperi+loweri)/2; else upperi = (upperi+loweri)/2; } //interpolate using the jth interval j = loweri; rho2diff = rho2-rho2sp[j]; //rho2diff=0.0; flambda = coefs[j].x+rho2diff*(coefs[j].y+rho2diff*(coefs[j].z+rho2diff*coefs[j].w)); } else { flambda=0.0f; } //*flambda= coefs[j].x+rho2diff*(coefs[j].y+rho2diff*(coefs[j].z+rho2diff*coefs[j].w)); //*fprime = coefs[j].y+rho2diff*(2.0f*coefs[j].z + rho2diff*(3.0f*coefs[j].w)); //*flambda=1.0f-exp(-1.0f*rho2); //*fprime=1.0f*exp(-1.0f*rho2); return flambda; } extern "C" __host__ double EV (double T, void * p, int* WLlist) { //Function for calling the Kernel, then computing the //Expectation value from the results of each worldline struct Wparams params = *(struct Wparams *) p; double EV; const int groupsize = 128; double rtT = sqrt((double)T); hipError_t errorcode; // call to integrate the function func if( verbosity >= 5) printf("call to CUDA device\n"); hipLaunchKernelGGL(( ExpectValue), dim3(params.nBlocks), dim3(params.nThreads), 0, 0, params.Wsscal_d, params.Wsferm_d, params.worldlines,params.xcm, (float)params.F, (float)params.l2, (float)rtT, params.Nl, params.Nppl, params.flcoefs, params.rho2sp, params.fermion); errorcode = hipGetLastError(); if ( errorcode>0) printf("cuda getLastError EV(): %s\n", hipGetErrorString(errorcode)); if (verbosity >= 6) printf("return from CUDA\n"); //Copy device memory back to host errorcode = hipMemcpy(params.Wsscal_h, params.Wsscal_d, params.Nl*sizeof(params.Wsscal_h[0]), hipMemcpyDeviceToHost); if(errorcode > 0) printf("cuda memcpy scal Error EV(): %s\n", hipGetErrorString(errorcode)); if(params.fermion == 1) { errorcode = hipMemcpy(params.Wsferm_h, params.Wsferm_d, params.Nl*sizeof(params.Wsferm_h[0]), hipMemcpyDeviceToHost); if(errorcode > 0) printf("cuda memcpy ferm Error EV(): %s\n",hipGetErrorString(errorcode)); } //Compute the expectation value from the Wilson Loop data EV = 0.0; EVMean(&EV, params.Wsscal_h, params.Wsferm_h, groupsize,WLlist, T, params.fermion); //printf("EV=%f\n",EV); //result=exp(-m2*T)/(T*T*T)*(EV-1.0+1.0/6.0*TB*TB); //*SEout=(double) exp(-m2*T)/(T*T*T)*SE; //printf("%f %f \n",T,result); return EV; } extern "C" __device__ float bump(const float x) //Device version of the bump function { //the 0.999 makes no numerical difference compared to 1.0 //but seems to prevent some unpredictable, unspecified CUDA errors if(x*x < 0.999f) return __expf( -1.0f/(1.0f-x*x) ); else return 0.0f; } extern "C" __device__ float phi(const float x) //Computes the \Phi function which is defined in the thesis { const float onemx2 = 1.0f-x*x; //the 0.999 makes no numerical difference compared to 1.0 //but seems to prevent some unpredictable, unspecified CUDA errors if( x < 0.999f ) { return 1.0f - ( 0.5f/q2 )*( onemx2*__expf(-1.0f/onemx2) + expint(-1.0f/onemx2) ); } else return 1.0f; } extern "C" __device__ float chi(const float x, const float n, const float lambda) //Computes the \Chi function which is defined in the thesis { float ans; const float onemx2 = 1.0f-x*x; const float x2 = x*x; const float x4 = x2*x2; if(x <= -0.999f) ans = 0.0f; else if(x2 < 0.999f) { ans = 0.5f*(-onemx2*__expf(-1.0f/onemx2) - expint(-1.0f/onemx2)); ans += 2.0f*n*tubedist/lambda* 0.444f/( 1.0f + __expf(-(3.31f*x + 5.25f*x*x2*sin(x)*cos( -0.907f*x2 - 1.29f*x4*x4 ))/cos(x)) ); //an older, less precise approximation of the integral //ans += 2.0f*n*tubedist/lambda* // (0.218f+0.393f*x*coshf(0.806f*x4-0.696f*x2+0.0902f)/coshf(0.825f*x2-0.0234f*x+0.375f)); } else //for x >= 1.0f ans = 2.0f*n*tubedist*q1/lambda; return ans; } extern "C" __device__ float flperi(const float rho2, const float lambda2) //f_lambda for periodic flux tube profile { float expr, exprp1, exprap1, rho, lambda, flraf, f; int N = 2; int i; rho = sqrtf(rho2); lambda = sqrtf(lambda2); flraf = floorf(rho/tubedist); f = 0.0f; if((int)flraf-N > 0) { f=0.5f*(flraf-(float)N-1.0f)*(flraf-(float)N); } for(i = (int)flraf-N; i <= (int)flraf+N; i++) { if(i == 0) { expr = __expf(-rho/lambda); exprp1 = 1.0f+expr; f = -rho/tubedist*(expr/exprp1) - lambda/tubedist*log(exprp1); } else if(i > 0) { exprap1 = 1.0f+__expf(-(rho-(float)i*tubedist)/lambda); f = f+(float)i/exprap1; } } return tubedist/(lambda*log(2.0f))*f + 1.0f; } extern "C" __device__ float ffixflux(const float rho2, const float lambda2) //f_lambda for the fixflux profile { const float lam = sqrtf(lambda2); const float lmlmin = (lam-lmin)/(tubedist-lmin); const float aml = (tubedist-lam)/(tubedist-lmin); const float n = floorf((sqrt(rho2)+tubedist/2.0f)/tubedist); float ans; if(rho2 <= tubedist*tubedist/4.0f) { ans = (1.0f-0.75f*lmlmin)*phi(2.0f*sqrt(rho2/lambda2)) + 3.0f*rho2/(tubedist*tubedist)*lmlmin; } else { ans = 1.0f+0.75f*(4.0f*rho2/(tubedist*tubedist)-1.0f)*lmlmin + 3.0f*n*(n-1.0f)*aml; ans += 3.0f*lam/(q1*tubedist)*aml*chi(2.0f*(sqrt(rho2)-n*tubedist)/lam, n, lam); //ans = 1.01f; } return ans; } extern "C" __device__ float flambda(float rho2, float lambda2, float4 *flcoefs, float *rho2sp) //flambda(rho^2,lambda^2) defines the magnetic vector potential in cylindrical coordinates { float f; switch(profile){ case step: if(rho2 < lambda2) f = rho2/lambda2; else f = 1.0f; break; case smooth: f = rho2/(lambda2+rho2); break; case quadratic: if(rho2 < lambda2) f = rho2/lambda2*(2.0f-rho2/lambda2); else f = 1.0f; break; case gaussian: f = 1.0f-exp(-rho2/lambda2); break; case periodic: f = flperi(rho2, lambda2); break; case spline: f = interp(rho2, rho2sp, flcoefs); break; case fixflux: f = ffixflux(rho2, lambda2); break; } return f; //return rho2/lambda2; } extern "C" __device__ float fplperi(float rho2, float lambda2) //f'_lambda for periodic profile { float expr,exprp1,rho,lambda,flraf,f; float expral, expralp1; int N = 2; int i; rho = sqrtf(rho2); lambda = sqrtf(lambda2); flraf = floorf(rho/tubedist); f=0.0f; for(i = (int)flraf-N; i <= (int)flraf+N; i++) { if(i == 0) { expr = __expf(-rho/lambda); exprp1 = 1.0f+expr; f = expr/(tubedist*exprp1*exprp1); } else if(i > 0) { expral = __expf(-(rho-(float)i*tubedist)/lambda); expralp1 = 1.0f+expral; f = f +((float)i/rho)*expral/(expralp1*expralp1); } } return tubedist/(2.0f*lambda2*log(2.0f))*f; } extern "C" __device__ float fpfixflux(const float rho2, const float lambda2) //f'_lambda(rho^2) for the fixflux field profile { const float lam = sqrtf(lambda2); const float lmlmin = (lam-lmin)/(tubedist-lmin); const float rho = sqrtf(rho2); const float n = floorf((rho+tubedist/2.0f)/tubedist); const float a2 = tubedist*tubedist; const float aml = (tubedist - lam)/(tubedist - lmin); float ans; if(rho <= tubedist/2.0f) { ans = 2.0f/(lambda2*q2)*(1.0f-0.75f*lmlmin)*bump(2.0f*rho/lam); ans += 3.0f/a2*lmlmin; } else { ans = 3.0f/a2*lmlmin + 6.0f/(q1*lam*tubedist)*aml*bump(2.0f*(rho-n*tubedist)/lam); //ans = 3.0f/(tubedist*tubedist)*lmlmin + 6.0f/(q1*lam*tubedist)* // (tubedist-lam)/(tubedist-lmin)*bump(2.0f*(rho-n*tubedist)/lam); } //ans = 1.0f; return ans; } extern "C" __device__ float fplambda(const float rho2, const float lambda2, float4 *flcoefs, float *rho2sp) //fplambda(rho^2,lambda^2) defines the magnetic vector potential derivative // wrt rho^2 in cylindrical coordinates { float f, fjunk; f=1.0f; switch(profile){ case step: if(rho2 < lambda2) f = 1.0f/lambda2; else f = 0.0f; break; case smooth: f = lambda2/((lambda2+rho2)*(lambda2+rho2)); break; case quadratic: if(rho2<lambda2) f = 2.0f/lambda2*(1.0f-rho2/lambda2); else f = 0.0f; break; case gaussian: f = 1.0f/lambda2*exp(-1.0f*rho2/lambda2); break; case periodic: f = fplperi(rho2, lambda2); break; case spline: fjunk = __sinf(sqrtf(rho2)*pi); if(fjunk>100.0f) f = 0.0f; else { f = __expf(-1.0f*fjunk*fjunk/lambda2)/lambda2; //f=1.0f/lambda2*exp(-1.0f*rho2/lambda2); } break; case fixflux: f = fpfixflux(rho2, lambda2); break; } return f; //return 1.0f/lambda2; //return lambda2/((lambda2+rho2)*(lambda2+rho2)); } extern "C" __device__ void Idt(float *scalI, float *fermI, float4 Ai, const float l2, float4 *flcoefs, float *rho2sp, int fermion) //Computes the integral over t from 0 to 1 in the scalar and fermion Wilson loop factors { int i; const int n = 50; //number of points in point-to-point proper time integral float t, rhoi2; //proper time and rho squared const float h = 1.0f/((float) n); //distance between points in integral float4 xiscal, xiferm; //scalar and fermi integrands if (Ai.x<1.0e-8) Ai.x = 1.0e-8; if (Ai.y<1.0e-8) Ai.y = 1.0e-8; if (Ai.z<1.0e-8) Ai.z = 1.0e-8; float Aip1 = Ai.x+2.0f*Ai.y+Ai.z; //rho^2 for the final point if(Aip1<1.0e-8) Aip1 = 1.0e-8; //if(profile == periodic && Aip1 > 10.0f*tubedist) Aip1 = 1.0e-8; //Begin the Simpson's method algorithm xiscal.x = flambda(Ai.x,l2,flcoefs,rho2sp)/Ai.x + flambda(Aip1,l2,flcoefs,rho2sp)/Aip1; xiscal.y = 0.0f; xiscal.z = 0.0f; if (fermion == 1) { xiferm.x = fplambda(Ai.x, l2, flcoefs, rho2sp) + fplambda(Aip1, l2, flcoefs, rho2sp); xiferm.y = 0.0f; xiferm.z = 0.0f; } for(i = 1; i < n; i++) { t = (float)i*h; //rho2 at the point rhoi2 = Ai.x + 2.0f*Ai.y*t + Ai.z*t*t; if(rhoi2 < 1.0e-10) rhoi2 = 1.0e-10; //if(profile == periodic && rhoi2 > 10.0f*tubedist) rhoi2 = 1.0e-8; if(i%2==0) { xiscal.z += flambda(rhoi2, l2, flcoefs, rho2sp)/rhoi2; if(fermion == 1) xiferm.z += fplambda(rhoi2, l2, flcoefs, rho2sp); } else { xiscal.y += flambda(rhoi2, l2, flcoefs, rho2sp)/rhoi2; if(fermion == 1) xiferm.y += fplambda(rhoi2, l2, flcoefs, rho2sp); } } *scalI = (xiscal.x + 2.0f*xiscal.z + 4.0f*xiscal.y)*h/3.0f; if(fermion == 1) *fermI = (xiferm.x + 2.0f*xiferm.z + 4.0f*xiferm.y)*h/3.0f; //*fermI=1.0f/l2; } extern "C" __device__ void getzp1(float4 *zip1, float4 *worldlines, float rtT, float4 xcm, int i, int inx, int Nppl) //Function for determining the next point on the //worldline loop for each of the sub loops { int inxp1; //get the next worldline index for the N/2 group if(i%2 == 1){ if(i == Nppl-1) { inxp1 = inx*Nppl+1; } else { inxp1 = inx*Nppl+i+2; } } //get the next worldline index for the first N/4 group else if(i%4 == 0){ if(i == Nppl-4) { inxp1 = inx*Nppl; } else { inxp1 = inx*Nppl+i+4; } } //get the next worldline index for the second N/4 group else if((i-2)%2 == 0){ if(i == Nppl-2) { inxp1 = inx*Nppl+2; } else { inxp1 = inx*Nppl+i+4; } } //compute the next point zip1->x = xcm.x + rtT*worldlines[inxp1].x; zip1->y = xcm.y + rtT*worldlines[inxp1].y; zip1->z = xcm.z + rtT*worldlines[inxp1].z; } extern "C" __device__ void WilsonLoop(float4 *worldlines, float4 *Wsscal, float4 *Wsferm, float4 xcm, int inx, float F, float l2, float rtT, int Nppl, float4 *flcoefs, float *rho2sp, int fermion) //Returns the Wilson loop value { int i; //const float e = 1.0; float4 WLstemp, WLftemp; float4 zi, zip1; float4 Ai; float xyyx; float scalI, fermI; //Compute the scalar contribution WLstemp.x = 0.0f; WLstemp.y = 0.0f; WLstemp.z = 0.0f; WLftemp.x = 0.0f; WLftemp.y = 0.0f; WLftemp.z = 0.0f; for(i = 0; i < Nppl; i++){ //Compute the scaled, shifted coordinate zi.x = xcm.x + rtT*worldlines[inx*Nppl+i].x; zi.y = xcm.y + rtT*worldlines[inx*Nppl+i].y; getzp1(&zip1, worldlines, rtT, xcm, i, inx, Nppl); //Ai Bi and Ci coefficients for the rho2 polynomial Ai.x = zi.x*zi.x + zi.y*zi.y; Ai.y = zi.x*(zip1.x-zi.x)+zi.y*(zip1.y-zi.y); Ai.z = (zip1.x-zi.x)*(zip1.x-zi.x) + (zip1.y-zi.y)*(zip1.y-zi.y); Idt(&scalI, &fermI, Ai, l2, flcoefs, rho2sp, fermion); //scalI=1.0f/l2; //Compute the contribution to the N/2 integral xyyx = (zi.x*zip1.y-zi.y*zip1.x); if(i%2 == 1){ WLstemp.x += xyyx*scalI; WLftemp.x += fermI; } //Compute the contribution to the first N/4 integral else if(i%4 == 0){ WLstemp.z += xyyx*scalI; WLftemp.z += fermI; } //Compute the contribution to the second N/4 integral else if((i-2)%2 == 0){ WLstemp.y += xyyx*scalI; WLftemp.y += fermI; } } Wsscal[inx].x = F*WLstemp.x; Wsscal[inx].y = F*WLstemp.y; Wsscal[inx].z = F*WLstemp.z; if( fermion == 1) { Wsferm[inx].x = 2.0f*F*WLftemp.x*rtT*rtT/(Nppl/2.0f); Wsferm[inx].y = 2.0f*F*WLftemp.y*rtT*rtT/(Nppl/4.0f); Wsferm[inx].z = 2.0f*F*WLftemp.z*rtT*rtT/(Nppl/4.0f); } //Wsferm[inx].x=2.0f*F/l2*rtT*rtT; //Wsferm[inx].y=2.0f*F/l2*rtT*rtT; //Wsferm[inx].z=2.0f*F/l2*rtT*rtT; //Wsferm[inx].x=1.0f; //Wsferm[inx].y=1.0f; //Wsferm[inx].z=1.0f; } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ExpectValue(float4 *Wsscal, float4 *Wsferm, float4 *worldlines, float4 xcm, float F, float l2, float rtT, int Nl, int Nppl, float4 *flcoefs, float *rho2sp, int fermion) //Each thread computes the Wilson loop value for a single //worldline { int inx = blockIdx.x * blockDim.x + threadIdx.x; WilsonLoop(worldlines, Wsscal, Wsferm, xcm, inx, F, l2, rtT, Nppl, flcoefs, rho2sp, fermion); }
f1ae7e5d61de0de7c803a98bdbd40c65fbdddcce.cu
//========================================= // Kernel code for computing Wilson loops on the device //========================================= #include <builtin_types.h> #include <cuda.h> #include <stdio.h> #include "intT.h" #include <math.h> extern "C" void EVMean(double *EV, float4 *Wsscal_h, float4 *Wsferm_h, int n, int *WL, double T, int fermion); #if profile == fixflux #define THREADS_PER_BLOCK 128 #define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK #define MY_KERNEL_MIN_BLOCKS 4 #else #define THREADS_PER_BLOCK 256 #if __CUDA_ARCH__ >= 200 #define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK) #define MY_KERNEL_MIN_BLOCKS 3 #else #define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK) #define MY_KERNEL_MIN_BLOCKS 2 #endif #endif extern "C" __device__ float expint(const float x) //Evaluates the exponential integral Ei(x)=-E1(-x) assuming x<-1. //This algorithm is an abbreviated version of Numerical Recipes expint(). //See Chapter 6 on Special Functions. We assume (x > 1.0) and include only //the relevant code. This bit of code is Lentz's algorithm (section 5.2 of NR). { const int MAXIT = 400; const float EPS = 1.0e-6; const float BIG = 1.0e10; int i; float a, b, c, d, del, h, ans; b = -x + 1.0f; c = BIG; d = 1.0f/b; h = d; for (i = 1; i <= MAXIT; i++) { a = -(float)i*(float)i; b += 2.0f; d = 1.0f/(a*d+b); //Denominators cannot be zero c = b + a/c; del = c*d; h *= del; if (fabsf(del-1.0f) <= EPS) { ans = -h*__expf(x); return ans; } } return 0.0f; } extern "C" __host__ __device__ float interp(float rho2, float *rho2sp, float4 *coefs) //interpolation function for periodic spline profile { int j; int upperi = Nspline-1, loweri=0; float rho2diff = 0.0f; float flambda; #ifndef __CUDA_ARCH__ #warning __CUDA_ARCH__ Undefined! //printf("Printf error\n"); #else #warning __CUDA_ARCH__ defined! #endif //Discover which interval to look in using a binary search if(rho2 < rho2sp[Nspline-1] && rho2 > rho2sp[0]) // if(0) { while(upperi-loweri > 1) { if(rho2 >= rho2sp[(upperi+loweri)/2]) loweri=(upperi+loweri)/2; else upperi = (upperi+loweri)/2; } //interpolate using the jth interval j = loweri; rho2diff = rho2-rho2sp[j]; //rho2diff=0.0; flambda = coefs[j].x+rho2diff*(coefs[j].y+rho2diff*(coefs[j].z+rho2diff*coefs[j].w)); } else { flambda=0.0f; } //*flambda= coefs[j].x+rho2diff*(coefs[j].y+rho2diff*(coefs[j].z+rho2diff*coefs[j].w)); //*fprime = coefs[j].y+rho2diff*(2.0f*coefs[j].z + rho2diff*(3.0f*coefs[j].w)); //*flambda=1.0f-exp(-1.0f*rho2); //*fprime=1.0f*exp(-1.0f*rho2); return flambda; } extern "C" __host__ double EV (double T, void * p, int* WLlist) { //Function for calling the Kernel, then computing the //Expectation value from the results of each worldline struct Wparams params = *(struct Wparams *) p; double EV; const int groupsize = 128; double rtT = sqrt((double)T); cudaError_t errorcode; // call to integrate the function func if( verbosity >= 5) printf("call to CUDA device\n"); ExpectValue<<<params.nBlocks, params.nThreads>>>(params.Wsscal_d, params.Wsferm_d, params.worldlines,params.xcm, (float)params.F, (float)params.l2, (float)rtT, params.Nl, params.Nppl, params.flcoefs, params.rho2sp, params.fermion); errorcode = cudaGetLastError(); if ( errorcode>0) printf("cuda getLastError EV(): %s\n", cudaGetErrorString(errorcode)); if (verbosity >= 6) printf("return from CUDA\n"); //Copy device memory back to host errorcode = cudaMemcpy(params.Wsscal_h, params.Wsscal_d, params.Nl*sizeof(params.Wsscal_h[0]), cudaMemcpyDeviceToHost); if(errorcode > 0) printf("cuda memcpy scal Error EV(): %s\n", cudaGetErrorString(errorcode)); if(params.fermion == 1) { errorcode = cudaMemcpy(params.Wsferm_h, params.Wsferm_d, params.Nl*sizeof(params.Wsferm_h[0]), cudaMemcpyDeviceToHost); if(errorcode > 0) printf("cuda memcpy ferm Error EV(): %s\n",cudaGetErrorString(errorcode)); } //Compute the expectation value from the Wilson Loop data EV = 0.0; EVMean(&EV, params.Wsscal_h, params.Wsferm_h, groupsize,WLlist, T, params.fermion); //printf("EV=%f\n",EV); //result=exp(-m2*T)/(T*T*T)*(EV-1.0+1.0/6.0*TB*TB); //*SEout=(double) exp(-m2*T)/(T*T*T)*SE; //printf("%f %f \n",T,result); return EV; } extern "C" __device__ float bump(const float x) //Device version of the bump function { //the 0.999 makes no numerical difference compared to 1.0 //but seems to prevent some unpredictable, unspecified CUDA errors if(x*x < 0.999f) return __expf( -1.0f/(1.0f-x*x) ); else return 0.0f; } extern "C" __device__ float phi(const float x) //Computes the \Phi function which is defined in the thesis { const float onemx2 = 1.0f-x*x; //the 0.999 makes no numerical difference compared to 1.0 //but seems to prevent some unpredictable, unspecified CUDA errors if( x < 0.999f ) { return 1.0f - ( 0.5f/q2 )*( onemx2*__expf(-1.0f/onemx2) + expint(-1.0f/onemx2) ); } else return 1.0f; } extern "C" __device__ float chi(const float x, const float n, const float lambda) //Computes the \Chi function which is defined in the thesis { float ans; const float onemx2 = 1.0f-x*x; const float x2 = x*x; const float x4 = x2*x2; if(x <= -0.999f) ans = 0.0f; else if(x2 < 0.999f) { ans = 0.5f*(-onemx2*__expf(-1.0f/onemx2) - expint(-1.0f/onemx2)); ans += 2.0f*n*tubedist/lambda* 0.444f/( 1.0f + __expf(-(3.31f*x + 5.25f*x*x2*sin(x)*cos( -0.907f*x2 - 1.29f*x4*x4 ))/cos(x)) ); //an older, less precise approximation of the integral //ans += 2.0f*n*tubedist/lambda* // (0.218f+0.393f*x*coshf(0.806f*x4-0.696f*x2+0.0902f)/coshf(0.825f*x2-0.0234f*x+0.375f)); } else //for x >= 1.0f ans = 2.0f*n*tubedist*q1/lambda; return ans; } extern "C" __device__ float flperi(const float rho2, const float lambda2) //f_lambda for periodic flux tube profile { float expr, exprp1, exprap1, rho, lambda, flraf, f; int N = 2; int i; rho = sqrtf(rho2); lambda = sqrtf(lambda2); flraf = floorf(rho/tubedist); f = 0.0f; if((int)flraf-N > 0) { f=0.5f*(flraf-(float)N-1.0f)*(flraf-(float)N); } for(i = (int)flraf-N; i <= (int)flraf+N; i++) { if(i == 0) { expr = __expf(-rho/lambda); exprp1 = 1.0f+expr; f = -rho/tubedist*(expr/exprp1) - lambda/tubedist*log(exprp1); } else if(i > 0) { exprap1 = 1.0f+__expf(-(rho-(float)i*tubedist)/lambda); f = f+(float)i/exprap1; } } return tubedist/(lambda*log(2.0f))*f + 1.0f; } extern "C" __device__ float ffixflux(const float rho2, const float lambda2) //f_lambda for the fixflux profile { const float lam = sqrtf(lambda2); const float lmlmin = (lam-lmin)/(tubedist-lmin); const float aml = (tubedist-lam)/(tubedist-lmin); const float n = floorf((sqrt(rho2)+tubedist/2.0f)/tubedist); float ans; if(rho2 <= tubedist*tubedist/4.0f) { ans = (1.0f-0.75f*lmlmin)*phi(2.0f*sqrt(rho2/lambda2)) + 3.0f*rho2/(tubedist*tubedist)*lmlmin; } else { ans = 1.0f+0.75f*(4.0f*rho2/(tubedist*tubedist)-1.0f)*lmlmin + 3.0f*n*(n-1.0f)*aml; ans += 3.0f*lam/(q1*tubedist)*aml*chi(2.0f*(sqrt(rho2)-n*tubedist)/lam, n, lam); //ans = 1.01f; } return ans; } extern "C" __device__ float flambda(float rho2, float lambda2, float4 *flcoefs, float *rho2sp) //flambda(rho^2,lambda^2) defines the magnetic vector potential in cylindrical coordinates { float f; switch(profile){ case step: if(rho2 < lambda2) f = rho2/lambda2; else f = 1.0f; break; case smooth: f = rho2/(lambda2+rho2); break; case quadratic: if(rho2 < lambda2) f = rho2/lambda2*(2.0f-rho2/lambda2); else f = 1.0f; break; case gaussian: f = 1.0f-exp(-rho2/lambda2); break; case periodic: f = flperi(rho2, lambda2); break; case spline: f = interp(rho2, rho2sp, flcoefs); break; case fixflux: f = ffixflux(rho2, lambda2); break; } return f; //return rho2/lambda2; } extern "C" __device__ float fplperi(float rho2, float lambda2) //f'_lambda for periodic profile { float expr,exprp1,rho,lambda,flraf,f; float expral, expralp1; int N = 2; int i; rho = sqrtf(rho2); lambda = sqrtf(lambda2); flraf = floorf(rho/tubedist); f=0.0f; for(i = (int)flraf-N; i <= (int)flraf+N; i++) { if(i == 0) { expr = __expf(-rho/lambda); exprp1 = 1.0f+expr; f = expr/(tubedist*exprp1*exprp1); } else if(i > 0) { expral = __expf(-(rho-(float)i*tubedist)/lambda); expralp1 = 1.0f+expral; f = f +((float)i/rho)*expral/(expralp1*expralp1); } } return tubedist/(2.0f*lambda2*log(2.0f))*f; } extern "C" __device__ float fpfixflux(const float rho2, const float lambda2) //f'_lambda(rho^2) for the fixflux field profile { const float lam = sqrtf(lambda2); const float lmlmin = (lam-lmin)/(tubedist-lmin); const float rho = sqrtf(rho2); const float n = floorf((rho+tubedist/2.0f)/tubedist); const float a2 = tubedist*tubedist; const float aml = (tubedist - lam)/(tubedist - lmin); float ans; if(rho <= tubedist/2.0f) { ans = 2.0f/(lambda2*q2)*(1.0f-0.75f*lmlmin)*bump(2.0f*rho/lam); ans += 3.0f/a2*lmlmin; } else { ans = 3.0f/a2*lmlmin + 6.0f/(q1*lam*tubedist)*aml*bump(2.0f*(rho-n*tubedist)/lam); //ans = 3.0f/(tubedist*tubedist)*lmlmin + 6.0f/(q1*lam*tubedist)* // (tubedist-lam)/(tubedist-lmin)*bump(2.0f*(rho-n*tubedist)/lam); } //ans = 1.0f; return ans; } extern "C" __device__ float fplambda(const float rho2, const float lambda2, float4 *flcoefs, float *rho2sp) //fplambda(rho^2,lambda^2) defines the magnetic vector potential derivative // wrt rho^2 in cylindrical coordinates { float f, fjunk; f=1.0f; switch(profile){ case step: if(rho2 < lambda2) f = 1.0f/lambda2; else f = 0.0f; break; case smooth: f = lambda2/((lambda2+rho2)*(lambda2+rho2)); break; case quadratic: if(rho2<lambda2) f = 2.0f/lambda2*(1.0f-rho2/lambda2); else f = 0.0f; break; case gaussian: f = 1.0f/lambda2*exp(-1.0f*rho2/lambda2); break; case periodic: f = fplperi(rho2, lambda2); break; case spline: fjunk = __sinf(sqrtf(rho2)*pi); if(fjunk>100.0f) f = 0.0f; else { f = __expf(-1.0f*fjunk*fjunk/lambda2)/lambda2; //f=1.0f/lambda2*exp(-1.0f*rho2/lambda2); } break; case fixflux: f = fpfixflux(rho2, lambda2); break; } return f; //return 1.0f/lambda2; //return lambda2/((lambda2+rho2)*(lambda2+rho2)); } extern "C" __device__ void Idt(float *scalI, float *fermI, float4 Ai, const float l2, float4 *flcoefs, float *rho2sp, int fermion) //Computes the integral over t from 0 to 1 in the scalar and fermion Wilson loop factors { int i; const int n = 50; //number of points in point-to-point proper time integral float t, rhoi2; //proper time and rho squared const float h = 1.0f/((float) n); //distance between points in integral float4 xiscal, xiferm; //scalar and fermi integrands if (Ai.x<1.0e-8) Ai.x = 1.0e-8; if (Ai.y<1.0e-8) Ai.y = 1.0e-8; if (Ai.z<1.0e-8) Ai.z = 1.0e-8; float Aip1 = Ai.x+2.0f*Ai.y+Ai.z; //rho^2 for the final point if(Aip1<1.0e-8) Aip1 = 1.0e-8; //if(profile == periodic && Aip1 > 10.0f*tubedist) Aip1 = 1.0e-8; //Begin the Simpson's method algorithm xiscal.x = flambda(Ai.x,l2,flcoefs,rho2sp)/Ai.x + flambda(Aip1,l2,flcoefs,rho2sp)/Aip1; xiscal.y = 0.0f; xiscal.z = 0.0f; if (fermion == 1) { xiferm.x = fplambda(Ai.x, l2, flcoefs, rho2sp) + fplambda(Aip1, l2, flcoefs, rho2sp); xiferm.y = 0.0f; xiferm.z = 0.0f; } for(i = 1; i < n; i++) { t = (float)i*h; //rho2 at the point rhoi2 = Ai.x + 2.0f*Ai.y*t + Ai.z*t*t; if(rhoi2 < 1.0e-10) rhoi2 = 1.0e-10; //if(profile == periodic && rhoi2 > 10.0f*tubedist) rhoi2 = 1.0e-8; if(i%2==0) { xiscal.z += flambda(rhoi2, l2, flcoefs, rho2sp)/rhoi2; if(fermion == 1) xiferm.z += fplambda(rhoi2, l2, flcoefs, rho2sp); } else { xiscal.y += flambda(rhoi2, l2, flcoefs, rho2sp)/rhoi2; if(fermion == 1) xiferm.y += fplambda(rhoi2, l2, flcoefs, rho2sp); } } *scalI = (xiscal.x + 2.0f*xiscal.z + 4.0f*xiscal.y)*h/3.0f; if(fermion == 1) *fermI = (xiferm.x + 2.0f*xiferm.z + 4.0f*xiferm.y)*h/3.0f; //*fermI=1.0f/l2; } extern "C" __device__ void getzp1(float4 *zip1, float4 *worldlines, float rtT, float4 xcm, int i, int inx, int Nppl) //Function for determining the next point on the //worldline loop for each of the sub loops { int inxp1; //get the next worldline index for the N/2 group if(i%2 == 1){ if(i == Nppl-1) { inxp1 = inx*Nppl+1; } else { inxp1 = inx*Nppl+i+2; } } //get the next worldline index for the first N/4 group else if(i%4 == 0){ if(i == Nppl-4) { inxp1 = inx*Nppl; } else { inxp1 = inx*Nppl+i+4; } } //get the next worldline index for the second N/4 group else if((i-2)%2 == 0){ if(i == Nppl-2) { inxp1 = inx*Nppl+2; } else { inxp1 = inx*Nppl+i+4; } } //compute the next point zip1->x = xcm.x + rtT*worldlines[inxp1].x; zip1->y = xcm.y + rtT*worldlines[inxp1].y; zip1->z = xcm.z + rtT*worldlines[inxp1].z; } extern "C" __device__ void WilsonLoop(float4 *worldlines, float4 *Wsscal, float4 *Wsferm, float4 xcm, int inx, float F, float l2, float rtT, int Nppl, float4 *flcoefs, float *rho2sp, int fermion) //Returns the Wilson loop value { int i; //const float e = 1.0; float4 WLstemp, WLftemp; float4 zi, zip1; float4 Ai; float xyyx; float scalI, fermI; //Compute the scalar contribution WLstemp.x = 0.0f; WLstemp.y = 0.0f; WLstemp.z = 0.0f; WLftemp.x = 0.0f; WLftemp.y = 0.0f; WLftemp.z = 0.0f; for(i = 0; i < Nppl; i++){ //Compute the scaled, shifted coordinate zi.x = xcm.x + rtT*worldlines[inx*Nppl+i].x; zi.y = xcm.y + rtT*worldlines[inx*Nppl+i].y; getzp1(&zip1, worldlines, rtT, xcm, i, inx, Nppl); //Ai Bi and Ci coefficients for the rho2 polynomial Ai.x = zi.x*zi.x + zi.y*zi.y; Ai.y = zi.x*(zip1.x-zi.x)+zi.y*(zip1.y-zi.y); Ai.z = (zip1.x-zi.x)*(zip1.x-zi.x) + (zip1.y-zi.y)*(zip1.y-zi.y); Idt(&scalI, &fermI, Ai, l2, flcoefs, rho2sp, fermion); //scalI=1.0f/l2; //Compute the contribution to the N/2 integral xyyx = (zi.x*zip1.y-zi.y*zip1.x); if(i%2 == 1){ WLstemp.x += xyyx*scalI; WLftemp.x += fermI; } //Compute the contribution to the first N/4 integral else if(i%4 == 0){ WLstemp.z += xyyx*scalI; WLftemp.z += fermI; } //Compute the contribution to the second N/4 integral else if((i-2)%2 == 0){ WLstemp.y += xyyx*scalI; WLftemp.y += fermI; } } Wsscal[inx].x = F*WLstemp.x; Wsscal[inx].y = F*WLstemp.y; Wsscal[inx].z = F*WLstemp.z; if( fermion == 1) { Wsferm[inx].x = 2.0f*F*WLftemp.x*rtT*rtT/(Nppl/2.0f); Wsferm[inx].y = 2.0f*F*WLftemp.y*rtT*rtT/(Nppl/4.0f); Wsferm[inx].z = 2.0f*F*WLftemp.z*rtT*rtT/(Nppl/4.0f); } //Wsferm[inx].x=2.0f*F/l2*rtT*rtT; //Wsferm[inx].y=2.0f*F/l2*rtT*rtT; //Wsferm[inx].z=2.0f*F/l2*rtT*rtT; //Wsferm[inx].x=1.0f; //Wsferm[inx].y=1.0f; //Wsferm[inx].z=1.0f; } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ExpectValue(float4 *Wsscal, float4 *Wsferm, float4 *worldlines, float4 xcm, float F, float l2, float rtT, int Nl, int Nppl, float4 *flcoefs, float *rho2sp, int fermion) //Each thread computes the Wilson loop value for a single //worldline { int inx = blockIdx.x * blockDim.x + threadIdx.x; WilsonLoop(worldlines, Wsscal, Wsferm, xcm, inx, F, l2, rtT, Nppl, flcoefs, rho2sp, fermion); }
e5fc6a44535596d92e466ead52c820daeed0d9e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/core/op_registry.h" #include "lite/kernels/cuda/leaky_relu_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T> __global__ void LeakyReluKernel(const int num, const T alpha, const T* input, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { #if __CUDA_ARCH__ >= 350 output[index] = __ldg(input + index) >= 0 ? __ldg(input + index) : __ldg(input + index) * alpha; #else output[index] = input[index] >= 0 ? input[index] : input[index] * alpha; #endif } } void LeakyReluCompute::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); int num = static_cast<int>(param.X->numel()); float alpha = param.Leaky_relu_alpha; auto input = param.X->data<float>(); auto output = param.Out->mutable_data<float>(TARGET(kCUDA)); int threads = 1024; int blocks = (num + threads - 1) / threads; hipLaunchKernelGGL(( LeakyReluKernel), dim3(blocks), dim3(threads), 0, stream, num, alpha, input, output); hipError_t error = hipGetLastError(); if (error != hipSuccess) LOG(INFO) << hipGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(leaky_relu, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::LeakyReluCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .SetVersion("1.5.0") .Finalize();
e5fc6a44535596d92e466ead52c820daeed0d9e4.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/core/op_registry.h" #include "lite/kernels/cuda/leaky_relu_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { template <typename T> __global__ void LeakyReluKernel(const int num, const T alpha, const T* input, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { #if __CUDA_ARCH__ >= 350 output[index] = __ldg(input + index) >= 0 ? __ldg(input + index) : __ldg(input + index) * alpha; #else output[index] = input[index] >= 0 ? input[index] : input[index] * alpha; #endif } } void LeakyReluCompute::Run() { auto& param = this->Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); int num = static_cast<int>(param.X->numel()); float alpha = param.Leaky_relu_alpha; auto input = param.X->data<float>(); auto output = param.Out->mutable_data<float>(TARGET(kCUDA)); int threads = 1024; int blocks = (num + threads - 1) / threads; LeakyReluKernel<<<blocks, threads, 0, stream>>>(num, alpha, input, output); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) LOG(INFO) << cudaGetErrorString(error); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(leaky_relu, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::LeakyReluCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))}) .SetVersion("1.5.0") .Finalize();
5f70a40be76d5693e6fe3ade754b5339f19e5677.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/sparse_pool_kernel.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/pooling.h" #include "paddle/phi/kernels/funcs/sparse/convolution.h" #include "paddle/phi/kernels/sparse/gpu/convolution.cu.h" namespace phi { namespace sparse { template <typename T, typename IntT = int> __global__ void MaxPoolCudaKernel(const T* in_features_ptr, const IntT* rulebook_ptr, const int n, const int rulebook_len, const int channels, T* out_features_ptr) { phi::funcs::MaxPool<T> max_pool_functor; CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) { int real_i = i / channels; int channel_i = i - real_i * channels; IntT in_i = rulebook_ptr[real_i]; IntT out_i = rulebook_ptr[real_i + rulebook_len]; max_pool_functor.compute(in_features_ptr[in_i * channels + channel_i], &out_features_ptr[out_i * channels + channel_i]); } } /** * x: (N, D, H, W, C) * kernel: (D, H, W, C, OC) * out: (N, D, H, W, OC) **/ template <typename T, typename IntT = int> void MaxPoolGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const std::vector<int>& kernel_sizes, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, SparseCooTensor* out, DenseTensor* rulebook) { const auto& x_dims = x.dims(); int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2]; const std::vector<int>& real_kernel_sizes = phi::funcs::sparse::PoolResetKernel(kernel_sizes, x_dims[4], x_dims[4]); DDim out_dims = {1, 1, 1, 1, 1}; phi::funcs::sparse::GetOutShape( x_dims, real_kernel_sizes, paddings, dilations, strides, &out_dims); const int in_channels = real_kernel_sizes[3]; std::vector<int> offsets(kernel_size + 1), counter(kernel_size); DenseTensorMeta counter_meta( DataType::INT32, {kernel_size}, DataLayout::NCHW); DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW); DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta)); DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta)); // 1. product rulebook int rulebook_len = ProductRuleBook<T, GPUContext, IntT>(dev_ctx, x, real_kernel_sizes, paddings, dilations, strides, out_dims, false, rulebook, &counter_per_kernel, &offsets_per_kernel, &out_index, &unique_value, out, &counter, &offsets); const IntT* rulebook_ptr = rulebook->data<IntT>(); T* out_features_ptr = out->mutable_non_zero_elements()->data<T>(); const T* in_features_ptr = x.non_zero_elements().data<T>(); // 2. max pool #ifdef PADDLE_WITH_HIP thrust::fill(thrust::hip::par.on(dev_ctx.stream()), #else thrust::fill(thrust::hip::par.on(dev_ctx.stream()), #endif out_features_ptr, out_features_ptr + out->non_zero_elements().numel(), static_cast<T>(0)); // TODO(zhangkaihuo) Replacing multiple calls with one kernel may be faster for (int i = 0; i < kernel_size; i++) { if (counter[i] <= 0) { continue; } auto config = phi::backends::gpu::GetGpuLaunchConfig1D( dev_ctx, counter[i] * in_channels, 1); hipLaunchKernelGGL(( MaxPoolCudaKernel<T, IntT>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), in_features_ptr, rulebook_ptr + offsets[i] + rulebook_len, counter[i], rulebook_len, in_channels, out_features_ptr); } } template <typename T, typename Context> void MaxPoolKernel(const Context& dev_ctx, const SparseCooTensor& x, const std::vector<int>& kernel_sizes, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, SparseCooTensor* out, DenseTensor* rulebook) { PD_VISIT_INTEGRAL_TYPES( x.non_zero_indices().dtype(), "MaxPoolGPUKernel", ([&] { MaxPoolGPUKernel<T, data_t>(dev_ctx, x, kernel_sizes, paddings, dilations, strides, out, rulebook); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(sparse_maxpool, GPU, ALL_LAYOUT, phi::sparse::MaxPoolKernel, float, double, phi::dtype::float16) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
5f70a40be76d5693e6fe3ade754b5339f19e5677.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/sparse_pool_kernel.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/pooling.h" #include "paddle/phi/kernels/funcs/sparse/convolution.h" #include "paddle/phi/kernels/sparse/gpu/convolution.cu.h" namespace phi { namespace sparse { template <typename T, typename IntT = int> __global__ void MaxPoolCudaKernel(const T* in_features_ptr, const IntT* rulebook_ptr, const int n, const int rulebook_len, const int channels, T* out_features_ptr) { phi::funcs::MaxPool<T> max_pool_functor; CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) { int real_i = i / channels; int channel_i = i - real_i * channels; IntT in_i = rulebook_ptr[real_i]; IntT out_i = rulebook_ptr[real_i + rulebook_len]; max_pool_functor.compute(in_features_ptr[in_i * channels + channel_i], &out_features_ptr[out_i * channels + channel_i]); } } /** * x: (N, D, H, W, C) * kernel: (D, H, W, C, OC) * out: (N, D, H, W, OC) **/ template <typename T, typename IntT = int> void MaxPoolGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const std::vector<int>& kernel_sizes, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, SparseCooTensor* out, DenseTensor* rulebook) { const auto& x_dims = x.dims(); int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2]; const std::vector<int>& real_kernel_sizes = phi::funcs::sparse::PoolResetKernel(kernel_sizes, x_dims[4], x_dims[4]); DDim out_dims = {1, 1, 1, 1, 1}; phi::funcs::sparse::GetOutShape( x_dims, real_kernel_sizes, paddings, dilations, strides, &out_dims); const int in_channels = real_kernel_sizes[3]; std::vector<int> offsets(kernel_size + 1), counter(kernel_size); DenseTensorMeta counter_meta( DataType::INT32, {kernel_size}, DataLayout::NCHW); DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW); DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta)); DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta)); // 1. product rulebook int rulebook_len = ProductRuleBook<T, GPUContext, IntT>(dev_ctx, x, real_kernel_sizes, paddings, dilations, strides, out_dims, false, rulebook, &counter_per_kernel, &offsets_per_kernel, &out_index, &unique_value, out, &counter, &offsets); const IntT* rulebook_ptr = rulebook->data<IntT>(); T* out_features_ptr = out->mutable_non_zero_elements()->data<T>(); const T* in_features_ptr = x.non_zero_elements().data<T>(); // 2. max pool #ifdef PADDLE_WITH_HIP thrust::fill(thrust::hip::par.on(dev_ctx.stream()), #else thrust::fill(thrust::cuda::par.on(dev_ctx.stream()), #endif out_features_ptr, out_features_ptr + out->non_zero_elements().numel(), static_cast<T>(0)); // TODO(zhangkaihuo) Replacing multiple calls with one kernel may be faster for (int i = 0; i < kernel_size; i++) { if (counter[i] <= 0) { continue; } auto config = phi::backends::gpu::GetGpuLaunchConfig1D( dev_ctx, counter[i] * in_channels, 1); MaxPoolCudaKernel<T, IntT><<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>( in_features_ptr, rulebook_ptr + offsets[i] + rulebook_len, counter[i], rulebook_len, in_channels, out_features_ptr); } } template <typename T, typename Context> void MaxPoolKernel(const Context& dev_ctx, const SparseCooTensor& x, const std::vector<int>& kernel_sizes, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, SparseCooTensor* out, DenseTensor* rulebook) { PD_VISIT_INTEGRAL_TYPES( x.non_zero_indices().dtype(), "MaxPoolGPUKernel", ([&] { MaxPoolGPUKernel<T, data_t>(dev_ctx, x, kernel_sizes, paddings, dilations, strides, out, rulebook); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(sparse_maxpool, GPU, ALL_LAYOUT, phi::sparse::MaxPoolKernel, float, double, phi::dtype::float16) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
92e7316452add1f94988cb7fa41e1ca82b2be955.hip
// !!! This is a file automatically generated by hipify!!! //##########################################################// // Name: Kirtan Mali // // Roll no: 18AG10016 // // Question 1: 2D Convolution Matrix // //##########################################################// #include <stdio.h> #include <stdlib.h> // Cuda Libraries #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // Macro for error checking and debugging #define CHECK(call) { \ const hipError_t error = call; \ if (error != hipSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \ exit(1); \ } \ } typedef long long int lli; #define MAX_VAL 100 // Important parameters #define KERNEL_SIZE 3 #define KERNEL_HALF (KERNEL_SIZE >> 1) #define BLOCK_SIZE 32 #define TILE_SIZE (BLOCK_SIZE - KERNEL_SIZE + 1) // Function prototypes void printMat(float *matrix, lli n); void convolution_2D_HOST(float *matrix, float *output, int n, int kernelH, int kernelW); float *createMat(lli n, int isempty, int seed); // Convolution Kernel __global__ void convolution_2D_DEVICE(float *matrix, float *output, int n) { __shared__ float tile[BLOCK_SIZE][BLOCK_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KERNEL_HALF; int col_i = col_o - KERNEL_HALF; // Load tile elements if(row_i >= 0 && row_i < n && col_i >= 0 && col_i < n) tile[ty][tx] = matrix[row_i*n + col_i]; else tile[ty][tx] = 0.0f; __syncthreads(); if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++) pValue += tile[y+ty][x+tx] / 9.0; if(row_o < n && col_o < n) { output[row_o*n + col_o] = pValue; } } } int main(int argc, char **argv) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; int isprint = 1; if (argc > 1) { printf("\n\nDisabling Printing ...\n\n"); isprint = 0; } lli t; scanf("%lld", &t); while (t--) { srand(t); lli n; scanf("%lld", &n); size_t size = sizeof(float) * n * n; float *h_matrix = createMat(n, 0, t); float *h_output = createMat(n, 1, t); float *d_matrix = NULL; float *d_output = NULL; CHECK(hipMalloc((void **)&d_matrix, size)); CHECK(hipMalloc((void **)&d_output, size)); CHECK(hipMemcpy(d_matrix, h_matrix, size, hipMemcpyHostToDevice)); dim3 blockSize, gridSize; blockSize.x = BLOCK_SIZE, blockSize.y = BLOCK_SIZE, blockSize.z = 1; gridSize.x = ceil((float)n/TILE_SIZE), gridSize.y = ceil((float)n/TILE_SIZE), gridSize.z = 1; hipLaunchKernelGGL(( convolution_2D_DEVICE), dim3(gridSize), dim3(blockSize), 0, 0, d_matrix, d_output, n); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch convolution_2D_DEVICE kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // convolution_2D_HOST(h_matrix, h_output, n, 3, 3); CHECK(hipMemcpy(h_output, d_output, size, hipMemcpyDeviceToHost)); if (isprint == 1) { printf("\n\n***** Original Matrix *****\n\n"); printMat(h_matrix, n); printf("\n\n***** Convolved Matrix Output *****\n\n"); printMat(h_output, n); } } return 0; } // Utility Functions float *createMat(lli n, int isempty, int seed) { srand(seed+1); size_t size = sizeof(float) * n * n; float *matrix = (float *)malloc(size); for (int i=0; i<n*n; i++) { if (isempty == 1) matrix[i] = 0.0f; else matrix[i] = (float)rand()/((float)RAND_MAX/MAX_VAL); } return matrix; } void printMat(float *matrix, lli n) { for (lli i=0; i<n*n; i++) { printf("%0.2f ", matrix[i]); if (i % n == n-1) printf("\n"); } } void convolution_2D_HOST(float *matrix, float *output, int n, int kernelH, int kernelW) { for (lli i=0; i<n; i++) { for (lli j=0; j<n; j++) { lli startx = i - (kernelH/2); lli starty = j - (kernelW/2); float newval = 0.0; for (lli a=0; a<kernelH; a++) { for (lli b=0; b<kernelW; b++) { if (startx + a >= 0 && startx + a < n && starty + b >= 0 && starty + b < n) { newval += matrix[(startx+a)*n + (starty+b)] / (float)(kernelH*kernelW); } } } output[i*n + j] = newval; } } }
92e7316452add1f94988cb7fa41e1ca82b2be955.cu
//##########################################################// // Name: Kirtan Mali // // Roll no: 18AG10016 // // Question 1: 2D Convolution Matrix // //##########################################################// #include <stdio.h> #include <stdlib.h> // Cuda Libraries #include <cuda.h> #include <cuda_runtime.h> // Macro for error checking and debugging #define CHECK(call) { \ const cudaError_t error = call; \ if (error != cudaSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } typedef long long int lli; #define MAX_VAL 100 // Important parameters #define KERNEL_SIZE 3 #define KERNEL_HALF (KERNEL_SIZE >> 1) #define BLOCK_SIZE 32 #define TILE_SIZE (BLOCK_SIZE - KERNEL_SIZE + 1) // Function prototypes void printMat(float *matrix, lli n); void convolution_2D_HOST(float *matrix, float *output, int n, int kernelH, int kernelW); float *createMat(lli n, int isempty, int seed); // Convolution Kernel __global__ void convolution_2D_DEVICE(float *matrix, float *output, int n) { __shared__ float tile[BLOCK_SIZE][BLOCK_SIZE]; // get thread indices int tx = threadIdx.x; int ty = threadIdx.y; // get the output indices int row_o = ty + blockIdx.y * TILE_SIZE; int col_o = tx + blockIdx.x * TILE_SIZE; // shift to obtain input indices int row_i = row_o - KERNEL_HALF; int col_i = col_o - KERNEL_HALF; // Load tile elements if(row_i >= 0 && row_i < n && col_i >= 0 && col_i < n) tile[ty][tx] = matrix[row_i*n + col_i]; else tile[ty][tx] = 0.0f; __syncthreads(); if(tx < TILE_SIZE && ty < TILE_SIZE){ float pValue = 0.0f; for(int y=0; y<KERNEL_SIZE; y++) for(int x=0; x<KERNEL_SIZE; x++) pValue += tile[y+ty][x+tx] / 9.0; if(row_o < n && col_o < n) { output[row_o*n + col_o] = pValue; } } } int main(int argc, char **argv) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int isprint = 1; if (argc > 1) { printf("\n\nDisabling Printing ...\n\n"); isprint = 0; } lli t; scanf("%lld", &t); while (t--) { srand(t); lli n; scanf("%lld", &n); size_t size = sizeof(float) * n * n; float *h_matrix = createMat(n, 0, t); float *h_output = createMat(n, 1, t); float *d_matrix = NULL; float *d_output = NULL; CHECK(cudaMalloc((void **)&d_matrix, size)); CHECK(cudaMalloc((void **)&d_output, size)); CHECK(cudaMemcpy(d_matrix, h_matrix, size, cudaMemcpyHostToDevice)); dim3 blockSize, gridSize; blockSize.x = BLOCK_SIZE, blockSize.y = BLOCK_SIZE, blockSize.z = 1; gridSize.x = ceil((float)n/TILE_SIZE), gridSize.y = ceil((float)n/TILE_SIZE), gridSize.z = 1; convolution_2D_DEVICE<<<gridSize, blockSize>>>(d_matrix, d_output, n); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch convolution_2D_DEVICE kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // convolution_2D_HOST(h_matrix, h_output, n, 3, 3); CHECK(cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost)); if (isprint == 1) { printf("\n\n***** Original Matrix *****\n\n"); printMat(h_matrix, n); printf("\n\n***** Convolved Matrix Output *****\n\n"); printMat(h_output, n); } } return 0; } // Utility Functions float *createMat(lli n, int isempty, int seed) { srand(seed+1); size_t size = sizeof(float) * n * n; float *matrix = (float *)malloc(size); for (int i=0; i<n*n; i++) { if (isempty == 1) matrix[i] = 0.0f; else matrix[i] = (float)rand()/((float)RAND_MAX/MAX_VAL); } return matrix; } void printMat(float *matrix, lli n) { for (lli i=0; i<n*n; i++) { printf("%0.2f ", matrix[i]); if (i % n == n-1) printf("\n"); } } void convolution_2D_HOST(float *matrix, float *output, int n, int kernelH, int kernelW) { for (lli i=0; i<n; i++) { for (lli j=0; j<n; j++) { lli startx = i - (kernelH/2); lli starty = j - (kernelW/2); float newval = 0.0; for (lli a=0; a<kernelH; a++) { for (lli b=0; b<kernelW; b++) { if (startx + a >= 0 && startx + a < n && starty + b >= 0 && starty + b < n) { newval += matrix[(startx+a)*n + (starty+b)] / (float)(kernelH*kernelW); } } } output[i*n + j] = newval; } } }
93593f2e3e51c9e7a85c7b85c33e4915f1804fdf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <MarchingCube_Kernel.cuh> #include <MarchingCube.cuh> #include <UniformGrid.cuh> #include <AtomicDoubleAdd.cuh> #include <UniformGridKernel.cuh> #include <stdio.h> /**************************************************************************************************************/ /**************************************************************************************************************/ extern "C" { /**************************************************************************************************************/ /**************************************************************************************************************/ __global__ void createGrid_MarchingCube_Kernel(double4* grid, double3 center, double l, double w, double d, float sizeCell, uint nbPosX, uint nbPosY, uint nbPosZ, int* m_nbIndex) { uint indexX = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; uint indexY = __mul24(blockIdx.y,blockDim.y) + threadIdx.y; uint indexZ = __mul24(blockIdx.z,blockDim.z) + threadIdx.z; if(indexX<nbPosX && indexY<nbPosY && indexZ<nbPosZ) { uint indexCell = indexX + indexY*nbPosX + indexZ*nbPosX*nbPosY; double3 pos = make_double3(center.x-(l/2)+(sizeCell/2)+indexX*sizeCell, center.y-(w/2)+(sizeCell/2)+indexY*sizeCell,center.z-(d/2)+(sizeCell/2)+indexZ*sizeCell); grid[indexCell].x = pos.x; grid[indexCell].y = pos.y; grid[indexCell].z = pos.z; grid[indexCell].w = 0; m_nbIndex[indexCell] = 0; //printf("pos:%f %f %f\n",grid[indexCell].x,grid[indexCell].y,grid[indexCell].z); } } /**************************************************************************************************************/ /**************************************************************************************************************/ __global__ void polygonize_MarchingCube_Kernel(double4* grid, uint nbCellsX, uint nbCellsY, uint nbCellsZ, double isoLevel, int* nbIndex, double3* vertexs, double3* normales, int* indexs) { int indexX = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int indexY = __mul24(blockIdx.y,blockDim.y) + threadIdx.y; int indexZ = __mul24(blockIdx.z,blockDim.z) + threadIdx.z; if(indexX>=0 && indexY>=0 && indexZ>=0 && indexX<(nbCellsX-1) && indexY<(nbCellsY-1) && indexZ<(nbCellsZ-1)) { uint i0 = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i3 = indexX + indexY*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; uint i4 = indexX + (indexY+1)*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i7 = indexX + (indexY+1)*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; uint i1 = (indexX+1) + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i2 = (indexX+1) + indexY*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; uint i5 = (indexX+1) + (indexY+1)*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i6 = (indexX+1) + (indexY+1)*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; double3 P0 = make_double3(grid[i0].x,grid[i0].y,grid[i0].z); double3 P1 = make_double3(grid[i1].x,grid[i1].y,grid[i1].z); double3 P2 = make_double3(grid[i2].x,grid[i2].y,grid[i2].z); double3 P3 = make_double3(grid[i3].x,grid[i3].y,grid[i3].z); double3 P4 = make_double3(grid[i4].x,grid[i4].y,grid[i4].z); double3 P5 = make_double3(grid[i5].x,grid[i5].y,grid[i5].z); double3 P6 = make_double3(grid[i6].x,grid[i6].y,grid[i6].z); double3 P7 = make_double3(grid[i7].x,grid[i7].y,grid[i7].z); double val0 = grid[i0].w; double val1 = grid[i1].w; double val2 = grid[i2].w; double val3 = grid[i3].w; double val4 = grid[i4].w; double val5 = grid[i5].w; double val6 = grid[i6].w; double val7 = grid[i7].w; uint CubeIndex = 0; if (grid[i0].w <= isoLevel) CubeIndex |= 1; if (grid[i1].w <= isoLevel) CubeIndex |= 2; if (grid[i2].w <= isoLevel) CubeIndex |= 4; if (grid[i3].w <= isoLevel) CubeIndex |= 8; if (grid[i4].w <= isoLevel) CubeIndex |= 16; if (grid[i5].w <= isoLevel) CubeIndex |= 32; if (grid[i6].w <= isoLevel) CubeIndex |= 64; if (grid[i7].w <= isoLevel) CubeIndex |= 128; //if(CubeIndex!=0)printf("CubeIndex:%d\n",CubeIndex); uint size = nbCellsX + nbCellsY*nbCellsX + nbCellsZ*nbCellsX*nbCellsY; uint indexCell = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY; if (EdgeTable[CubeIndex] != 0){ polygoniseCell_MarchingCube(P0,P1,P2,P3,P4,P5,P6,P7, val0, val1, val2, val3, val4, val5, val6, val7, CubeIndex, isoLevel, indexCell, size, nbIndex, vertexs, normales, indexs); } //if(nbIndex[indexCell]!=0) printf("nbIndex:%d\n",nbIndex[indexCell]); } } /**************************************************************************************************************/ /**************************************************************************************************************/ __device__ void polygoniseCell_MarchingCube(double3 pG0, double3 pG1, double3 pG2, double3 pG3, double3 pG4, double3 pG5, double3 pG6, double3 pG7, double val0, double val1, double val2, double val3, double val4, double val5, double val6, double val7, int CubeIndex, double isoLevel, uint indexCell, uint size, int* nbIndex, double3* vertexs, double3* normales, int* indexs) { double3 P[12]; if (EdgeTable[CubeIndex] & 1) P[0] = vertexInterpolate_MarchingCube(pG0,pG1,val0,val1,isoLevel); if (EdgeTable[CubeIndex] & 2) P[1] = vertexInterpolate_MarchingCube(pG1,pG2,val1,val2,isoLevel); if (EdgeTable[CubeIndex] & 4) P[2] = vertexInterpolate_MarchingCube(pG2,pG3,val2,val3,isoLevel); if (EdgeTable[CubeIndex] & 8) P[3] = vertexInterpolate_MarchingCube(pG3,pG0,val3,val0,isoLevel); if (EdgeTable[CubeIndex] & 16) P[4] = vertexInterpolate_MarchingCube(pG4,pG5,val4,val5,isoLevel); if (EdgeTable[CubeIndex] & 32) P[5] = vertexInterpolate_MarchingCube(pG5,pG6,val5,val6,isoLevel); if (EdgeTable[CubeIndex] & 64) P[6] = vertexInterpolate_MarchingCube(pG6,pG7,val6,val7,isoLevel); if (EdgeTable[CubeIndex] & 128) P[7] = vertexInterpolate_MarchingCube(pG7,pG4,val7,val4,isoLevel); if (EdgeTable[CubeIndex] & 256) P[8] = vertexInterpolate_MarchingCube(pG0,pG4,val0,val4,isoLevel); if (EdgeTable[CubeIndex] & 512) P[9] = vertexInterpolate_MarchingCube(pG1,pG5,val1,val5,isoLevel); if (EdgeTable[CubeIndex] & 1024) P[10] = vertexInterpolate_MarchingCube(pG2,pG6,val2,val6,isoLevel); if (EdgeTable[CubeIndex] & 2048) P[11] = vertexInterpolate_MarchingCube(pG3,pG7,val3,val7,isoLevel); uint nbT = 0; for (int i=0;TriTable[CubeIndex][i]!=-1;i+=3) { for(int j=0;j<3;j++) { uint nbInd = nbIndex[indexCell]; nbIndex[indexCell] = nbIndex[indexCell] + 1; indexs[indexCell*nbIndexMax_perCell + nbInd] = nbInd; vertexs[indexCell*nbVertexMax_perCell + nbInd] = P[TriTable[CubeIndex][i+j]]; } nbT++; } } /**************************************************************************************************************/ /**************************************************************************************************************/ __device__ double3 vertexInterpolate_MarchingCube(double3 P1, double3 P2, double ValP1,double ValP2, double isoLevel) { if (fabs(isoLevel-ValP1) < 0.000001) return(P1); if (fabs(isoLevel-ValP2) < 0.000001) return(P2); if (fabs(ValP1-ValP2) < 0.000001) return(P1); double mu = (isoLevel - ValP1) / (ValP2 - ValP1); double3 P; P.x = P1.x + mu * (P2.x - P1.x); P.y = P1.y + mu * (P2.y - P1.y); P.z = P1.z + mu * (P2.z - P1.z); return(P); } /**************************************************************************************************************/ /**************************************************************************************************************/ }
93593f2e3e51c9e7a85c7b85c33e4915f1804fdf.cu
#include <MarchingCube_Kernel.cuh> #include <MarchingCube.cuh> #include <UniformGrid.cuh> #include <AtomicDoubleAdd.cuh> #include <UniformGridKernel.cuh> #include <stdio.h> /**************************************************************************************************************/ /**************************************************************************************************************/ extern "C" { /**************************************************************************************************************/ /**************************************************************************************************************/ __global__ void createGrid_MarchingCube_Kernel(double4* grid, double3 center, double l, double w, double d, float sizeCell, uint nbPosX, uint nbPosY, uint nbPosZ, int* m_nbIndex) { uint indexX = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; uint indexY = __mul24(blockIdx.y,blockDim.y) + threadIdx.y; uint indexZ = __mul24(blockIdx.z,blockDim.z) + threadIdx.z; if(indexX<nbPosX && indexY<nbPosY && indexZ<nbPosZ) { uint indexCell = indexX + indexY*nbPosX + indexZ*nbPosX*nbPosY; double3 pos = make_double3(center.x-(l/2)+(sizeCell/2)+indexX*sizeCell, center.y-(w/2)+(sizeCell/2)+indexY*sizeCell,center.z-(d/2)+(sizeCell/2)+indexZ*sizeCell); grid[indexCell].x = pos.x; grid[indexCell].y = pos.y; grid[indexCell].z = pos.z; grid[indexCell].w = 0; m_nbIndex[indexCell] = 0; //printf("pos:%f %f %f\n",grid[indexCell].x,grid[indexCell].y,grid[indexCell].z); } } /**************************************************************************************************************/ /**************************************************************************************************************/ __global__ void polygonize_MarchingCube_Kernel(double4* grid, uint nbCellsX, uint nbCellsY, uint nbCellsZ, double isoLevel, int* nbIndex, double3* vertexs, double3* normales, int* indexs) { int indexX = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; int indexY = __mul24(blockIdx.y,blockDim.y) + threadIdx.y; int indexZ = __mul24(blockIdx.z,blockDim.z) + threadIdx.z; if(indexX>=0 && indexY>=0 && indexZ>=0 && indexX<(nbCellsX-1) && indexY<(nbCellsY-1) && indexZ<(nbCellsZ-1)) { uint i0 = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i3 = indexX + indexY*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; uint i4 = indexX + (indexY+1)*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i7 = indexX + (indexY+1)*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; uint i1 = (indexX+1) + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i2 = (indexX+1) + indexY*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; uint i5 = (indexX+1) + (indexY+1)*nbCellsX + indexZ*nbCellsX*nbCellsY; uint i6 = (indexX+1) + (indexY+1)*nbCellsX + (indexZ+1)*nbCellsX*nbCellsY; double3 P0 = make_double3(grid[i0].x,grid[i0].y,grid[i0].z); double3 P1 = make_double3(grid[i1].x,grid[i1].y,grid[i1].z); double3 P2 = make_double3(grid[i2].x,grid[i2].y,grid[i2].z); double3 P3 = make_double3(grid[i3].x,grid[i3].y,grid[i3].z); double3 P4 = make_double3(grid[i4].x,grid[i4].y,grid[i4].z); double3 P5 = make_double3(grid[i5].x,grid[i5].y,grid[i5].z); double3 P6 = make_double3(grid[i6].x,grid[i6].y,grid[i6].z); double3 P7 = make_double3(grid[i7].x,grid[i7].y,grid[i7].z); double val0 = grid[i0].w; double val1 = grid[i1].w; double val2 = grid[i2].w; double val3 = grid[i3].w; double val4 = grid[i4].w; double val5 = grid[i5].w; double val6 = grid[i6].w; double val7 = grid[i7].w; uint CubeIndex = 0; if (grid[i0].w <= isoLevel) CubeIndex |= 1; if (grid[i1].w <= isoLevel) CubeIndex |= 2; if (grid[i2].w <= isoLevel) CubeIndex |= 4; if (grid[i3].w <= isoLevel) CubeIndex |= 8; if (grid[i4].w <= isoLevel) CubeIndex |= 16; if (grid[i5].w <= isoLevel) CubeIndex |= 32; if (grid[i6].w <= isoLevel) CubeIndex |= 64; if (grid[i7].w <= isoLevel) CubeIndex |= 128; //if(CubeIndex!=0)printf("CubeIndex:%d\n",CubeIndex); uint size = nbCellsX + nbCellsY*nbCellsX + nbCellsZ*nbCellsX*nbCellsY; uint indexCell = indexX + indexY*nbCellsX + indexZ*nbCellsX*nbCellsY; if (EdgeTable[CubeIndex] != 0){ polygoniseCell_MarchingCube(P0,P1,P2,P3,P4,P5,P6,P7, val0, val1, val2, val3, val4, val5, val6, val7, CubeIndex, isoLevel, indexCell, size, nbIndex, vertexs, normales, indexs); } //if(nbIndex[indexCell]!=0) printf("nbIndex:%d\n",nbIndex[indexCell]); } } /**************************************************************************************************************/ /**************************************************************************************************************/ __device__ void polygoniseCell_MarchingCube(double3 pG0, double3 pG1, double3 pG2, double3 pG3, double3 pG4, double3 pG5, double3 pG6, double3 pG7, double val0, double val1, double val2, double val3, double val4, double val5, double val6, double val7, int CubeIndex, double isoLevel, uint indexCell, uint size, int* nbIndex, double3* vertexs, double3* normales, int* indexs) { double3 P[12]; if (EdgeTable[CubeIndex] & 1) P[0] = vertexInterpolate_MarchingCube(pG0,pG1,val0,val1,isoLevel); if (EdgeTable[CubeIndex] & 2) P[1] = vertexInterpolate_MarchingCube(pG1,pG2,val1,val2,isoLevel); if (EdgeTable[CubeIndex] & 4) P[2] = vertexInterpolate_MarchingCube(pG2,pG3,val2,val3,isoLevel); if (EdgeTable[CubeIndex] & 8) P[3] = vertexInterpolate_MarchingCube(pG3,pG0,val3,val0,isoLevel); if (EdgeTable[CubeIndex] & 16) P[4] = vertexInterpolate_MarchingCube(pG4,pG5,val4,val5,isoLevel); if (EdgeTable[CubeIndex] & 32) P[5] = vertexInterpolate_MarchingCube(pG5,pG6,val5,val6,isoLevel); if (EdgeTable[CubeIndex] & 64) P[6] = vertexInterpolate_MarchingCube(pG6,pG7,val6,val7,isoLevel); if (EdgeTable[CubeIndex] & 128) P[7] = vertexInterpolate_MarchingCube(pG7,pG4,val7,val4,isoLevel); if (EdgeTable[CubeIndex] & 256) P[8] = vertexInterpolate_MarchingCube(pG0,pG4,val0,val4,isoLevel); if (EdgeTable[CubeIndex] & 512) P[9] = vertexInterpolate_MarchingCube(pG1,pG5,val1,val5,isoLevel); if (EdgeTable[CubeIndex] & 1024) P[10] = vertexInterpolate_MarchingCube(pG2,pG6,val2,val6,isoLevel); if (EdgeTable[CubeIndex] & 2048) P[11] = vertexInterpolate_MarchingCube(pG3,pG7,val3,val7,isoLevel); uint nbT = 0; for (int i=0;TriTable[CubeIndex][i]!=-1;i+=3) { for(int j=0;j<3;j++) { uint nbInd = nbIndex[indexCell]; nbIndex[indexCell] = nbIndex[indexCell] + 1; indexs[indexCell*nbIndexMax_perCell + nbInd] = nbInd; vertexs[indexCell*nbVertexMax_perCell + nbInd] = P[TriTable[CubeIndex][i+j]]; } nbT++; } } /**************************************************************************************************************/ /**************************************************************************************************************/ __device__ double3 vertexInterpolate_MarchingCube(double3 P1, double3 P2, double ValP1,double ValP2, double isoLevel) { if (fabs(isoLevel-ValP1) < 0.000001) return(P1); if (fabs(isoLevel-ValP2) < 0.000001) return(P2); if (fabs(ValP1-ValP2) < 0.000001) return(P1); double mu = (isoLevel - ValP1) / (ValP2 - ValP1); double3 P; P.x = P1.x + mu * (P2.x - P1.x); P.y = P1.y + mu * (P2.y - P1.y); P.z = P1.z + mu * (P2.z - P1.z); return(P); } /**************************************************************************************************************/ /**************************************************************************************************************/ }
aba5fbb2676bd501f89b59a7329caca152952097.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THLongStorage *indexSize = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, tensor, indexSize), 4, "Index tensor must have the same size as output tensor."); THLongStorage_free(indexSize); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(hipGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(hipGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(hipGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(hipGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(hipGetLastError()); } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCudaLongTensor_size(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(hipGetLastError()); } #undef RUN #endif
aba5fbb2676bd501f89b59a7329caca152952097.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu" #else #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_gatherKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(gather)(THCState* state, THCTensor *tensor, THCTensor *src, int dim, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4, "Index tensor must have same dimensions as input tensor"); THLongStorage *indexSize = THCudaLongTensor_newSizeOf(state, index); THArgCheck(THCTensor_(isSize)(state, tensor, indexSize), 4, "Index tensor must have the same size as output tensor."); THLongStorage_free(indexSize); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index dimension is out of bounds"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCTensor_(size)(state, src, d), 2, "Input tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); THCudaCheck(cudaGetLastError()); break; case 2: RUN(unsigned int, 2, real); THCudaCheck(cudaGetLastError()); break; case 3: RUN(unsigned int, 3, real); THCudaCheck(cudaGetLastError()); break; default: RUN(unsigned int, -1, real); THCudaCheck(cudaGetLastError()); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); THCudaCheck(cudaGetLastError()); } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements); void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "Index tensor must have same dimensions as input tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4, "Input tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { int64_t indexSizeD = THCudaLongTensor_size(state, index, d); if (d != dim) { THArgCheck(indexSizeD <= THCTensor_(size)(state, tensor, d), 3, "Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s", dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str); } THArgCheck(indexSizeD <= THCTensor_(size)(state, src, d), 3, "Index tensor must not have larger size than input tensor, but got index %s input %s", THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str); } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (totalElements > 0) { if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<real, unsigned int> srcInfo = getTensorInfo<real, THCTensor, unsigned int>(state, src); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<real, uint64_t> srcInfo = getTensorInfo<real, THCTensor, uint64_t>(state, src); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real) } } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #define RUN(TYPE, DIMS, REAL) \ THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS> \ <<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \ tensorInfo, indexInfo, value, dim, (TYPE)totalElements); void THCTensor_(scatterFill)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2, "Index dimension is out of bounds"); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3, "Index tensor must have same dimensions as output tensor"); for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) { if (d != dim) { THArgCheck(THCTensor_(size)(state, tensor, d) == THCudaLongTensor_size(state, index, d), 4, "Index tensor must have same size as output tensor apart from the specified dimension"); } } THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS, 1, CUTORCH_DIM_WARNING); const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index); const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING); THCTensor* oldTensor = NULL; if (THCTensor_maybeOverlappingIndices(state, tensor)) { oldTensor = tensor; tensor = THCTensor_(newContiguous)(state, tensor); } if (THCTensor_canUse32BitIndexMath(state, tensor) && THCTensor_canUse32BitIndexMath(state, index)) { TensorInfo<real, unsigned int> tensorInfo = getTensorInfo<real, THCTensor, unsigned int>(state, tensor); TensorInfo<int64_t, unsigned int> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index); // Specialize for a small number of dimensions. switch (indexInfo.dims) { case 1: RUN(unsigned int, 1, real); break; case 2: RUN(unsigned int, 2, real); break; case 3: RUN(unsigned int, 3, real); break; default: RUN(unsigned int, -1, real); break; } } else { TensorInfo<real, uint64_t> tensorInfo = getTensorInfo<real, THCTensor, uint64_t>(state, tensor); TensorInfo<int64_t, uint64_t> indexInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index); RUN(uint64_t, -1, real); } if (oldTensor) { THCTensor_copyIgnoringOverlaps<real>(state, oldTensor, tensor); THCTensor_(free)(state, tensor); tensor = oldTensor; } THCudaCheck(cudaGetLastError()); } #undef RUN #endif
1203ab9a7034363ba6e636613519df75f3ca4c1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html // Cheng-Bin Jin re-implementation. #include <cstdio> void checkCudaError(hipError_t err) { if (err != hipSuccess) { printf("%s: %s\n", hipGetErrorName(err), hipGetErrorString(err)); exit(1); } } __global__ void cudaKernel(void) { printf("GPU syas hello.\n"); } int main(void) { printf("CPU says hello.\n"); checkCudaError(cudaLaunchKernel((void*)cudaKernel, 1, 1, NULL, 0, NULL)); checkCudaError(hipDeviceSynchronize()); return 0; }
1203ab9a7034363ba6e636613519df75f3ca4c1f.cu
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. // // This work is made available under the Nvidia Source Code License-NC. // To view a copy of this license, visit // https://nvlabs.github.io/stylegan2/license.html // Cheng-Bin Jin re-implementation. #include <cstdio> void checkCudaError(cudaError_t err) { if (err != cudaSuccess) { printf("%s: %s\n", cudaGetErrorName(err), cudaGetErrorString(err)); exit(1); } } __global__ void cudaKernel(void) { printf("GPU syas hello.\n"); } int main(void) { printf("CPU says hello.\n"); checkCudaError(cudaLaunchKernel((void*)cudaKernel, 1, 1, NULL, 0, NULL)); checkCudaError(cudaDeviceSynchronize()); return 0; }
343e191dd8654e7034e8bb10ad6ba788982d2e36.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kTanhDeriv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *dest = NULL; hipMalloc(&dest, XSIZE*YSIZE); unsigned int numEls = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kTanhDeriv), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kTanhDeriv), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kTanhDeriv), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
343e191dd8654e7034e8bb10ad6ba788982d2e36.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kTanhDeriv.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *dest = NULL; cudaMalloc(&dest, XSIZE*YSIZE); unsigned int numEls = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kTanhDeriv<<<gridBlock,threadBlock>>>(a,b,dest,numEls); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kTanhDeriv<<<gridBlock,threadBlock>>>(a,b,dest,numEls); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kTanhDeriv<<<gridBlock,threadBlock>>>(a,b,dest,numEls); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
564e9af7b6e3362b1d908d8c86a12645c8137473.hip
// !!! This is a file automatically generated by hipify!!! #include <malloc.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "le_core.h" #define s_ind_vx(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 0 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_vy(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 1 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_sxx(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 2 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_sxy(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 3 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_syy(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 4 * (blockDim.x + 4) * (blockDim.y + 4))) #ifdef DEBUG #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #else #define gpuErrchk(ans) { (ans); } #endif #define vnorm(v) (sqrt(v.x * v.x + v.y * v.y)) #ifdef USE_DOUBLE #define TVD2_EPS 1e-6 __device__ inline real le_min(real a, real b) { return a > b ? b : a; } __device__ inline real le_max(real a, real b) { return a > b ? a : b; } __device__ inline real le_max3(real a, real b, real c) { return le_max(a, le_max(b, c)); } #define limiter_minmod(r) (le_max(0.0, le_min(1.0, (r)))) #define limiter_cir(r) (0.0) #define limiter_superbee(r) (le_max3(0.0, le_min(1.0, 2.0 * r), le_min(2.0, r))) #else #define TVD2_EPS 1e-6f __device__ inline real le_min(real a, real b) { return a > b ? b : a; } __device__ inline real le_max(real a, real b) { return a > b ? a : b; } __device__ inline real le_max3(real a, real b, real c) { return le_max(a, le_max(b, c)); } #define limiter_minmod(r) (le_max(0.0f, le_min(1.0f, (r)))) #define limiter_cir(r) (0.0f) #define limiter_superbee(r) (le_max3(0.0f, le_min(1.0f, 2.0f * r), le_min(2.0f, r))) #endif #define limiter limiter_superbee __device__ inline real tvd2(const real c, const real u_2, const real u_1, const real u, const real u1) { const real eps = TVD2_EPS; real r1 = u - u_1; r1 += eps; real r2 = u1 - u; r2 += eps; const real r = r1 / r2; real r_1 = u_1 - u_2; r_1 += eps; r_1 /= r1; real f12 = r2 * limiter(r); real f_12 = r1 * limiter(r_1); const real k = 0.5 * (1 - c); return c * ((f_12 - f12) * k - r1); } void le_set_ball(le_task *t, const le_vec2 c, const real r, const real s) { int i, j; for (i = 0; i < t->n.x; i++) { for (j = 0; j < t->n.y; j++) { le_vec2 x = {t->h.x * i, t->h.y * j}; le_vec2 d = {x.x - c.x, x.y - c.y}; if (vnorm(d) < r) { /* Set pressure disturbance */ int_t nx = t->n.x; int_t ny = t->n.y; real* grid = t->grid; ind_sxx(i, j) = s; ind_syy(i, j) = s; } } } } /* * Write float to file and reverse byte order. */ void write_float(FILE* f, const float v) { union { float f; unsigned char b[4]; } dat1, dat2; dat1.f = v; dat2.b[0] = dat1.b[3]; dat2.b[1] = dat1.b[2]; dat2.b[2] = dat1.b[1]; dat2.b[3] = dat1.b[0]; fwrite(dat2.b, sizeof(unsigned char), 4, f); } void le_init_task(le_task *task, const real dt, const le_vec2 h, const le_material mat, const le_point2 n) { task->dt = dt; task->h = h; task->mat = mat; task->n = n; task->grid = (real*) malloc(sizeof(real) * NODE_SIZE * n.x * n.y); memset(task->grid, 0, sizeof(real) * NODE_SIZE * n.x * n.y); } void le_free_task(le_task* task) { free(task->grid); } int le_save_task(le_task *t, const char *file) { int i, j; FILE *fp = fopen(file, "w"); if (fp == NULL) { perror("Failed to open file"); return 1; } fprintf(fp, "# vtk DataFile Version 3.0\n"); fprintf(fp, "Created by le_save_task\n"); fprintf(fp, "BINARY\n"); fprintf(fp, "DATASET STRUCTURED_POINTS\n"); fprintf(fp, "DIMENSIONS %d %d 1\n", t->n.x, t->n.y); fprintf(fp, "SPACING %f %f 0.0\n", t->h.x, t->h.y); fprintf(fp, "ORIGIN 0.0 0.0 0.0\n"); fprintf(fp, "POINT_DATA %d\n", t->n.x * t->n.y); /* velocity */ fprintf(fp, "SCALARS v float 1\n"); fprintf(fp, "LOOKUP_TABLE v_table\n"); for (j = 0; j < t->n.y; j++) { for (i = 0; i < t->n.x; i++) { float v; const int_t nx = t->n.x; const int_t ny = t->n.y; const real* grid = t->grid; le_vec2 vt = { ind_vx(i, j), ind_vy(i, j) }; v = vnorm(vt); write_float(fp, v); } } /* * You can use the same code for saving other variables. */ fclose(fp); return 0; } void le_init_material(const real c1, const real c2, const real rho, le_material *m) { m->c1 = c1; m->c2 = c2; m->rho = rho; /* Cached values. */ m->irhoc1 = 1.0 / (c1 * rho); m->irhoc2 = 1.0 / (c2 * rho); m->rhoc1 = c1 * rho; m->rhoc2 = c2 * rho; real mu = rho * c2 * c2; real la = rho * c1 * c1 - 2.0 * mu; m->rhoc3 = rho * c1 * la / (la + 2.0 * mu); } __device__ inline void omega_x(const le_material *m, const real vx, const real vy, const real sxx, const real sxy, le_w *w) { const real N00T = sxx * m->irhoc1; const real N01T = sxy * m->irhoc2; w->w1 = vx - N00T; w->w2 = vx + N00T; w->w3 = vy - N01T; w->w4 = vy + N01T; } __device__ inline void omega_y(const le_material *m, const real vx, const real vy, const real sxy, const real syy, le_w *w) { const real N00T = syy * m->irhoc1; const real N01T = sxy * m->irhoc2; w->w1 = vy - N00T; w->w2 = vy + N00T; w->w3 = vx - N01T; w->w4 = vx + N01T; } __device__ inline void inc_x(const le_material *m, const real vx, const real vy, const real sxx, const real sxy, const real syy, real* new_vx, real* new_vy, real* new_sxx, real* new_sxy, real* new_syy, const le_w *d) { const real d1 = 0.5 * d->w1; const real d2 = 0.5 * d->w2; const real d3 = 0.5 * d->w3; const real d4 = 0.5 * d->w4; *new_vx = vx + d1 + d2; *new_vy = vy + d3 + d4; *new_syy = syy + (d2 - d1) * m->rhoc3; *new_sxx = sxx + (d2 - d1) * m->rhoc1; *new_sxy = sxy + (d4 - d3) * m->rhoc2; } __device__ inline void inc_y(const le_material *m, const real vx, const real vy, const real sxx, const real sxy, const real syy, real* new_vx, real* new_vy, real* new_sxx, real* new_sxy, real* new_syy, const le_w *d) { const real d1 = 0.5 * d->w1; const real d2 = 0.5 * d->w2; const real d3 = 0.5 * d->w3; const real d4 = 0.5 * d->w4; *new_vy = vy + d1 + d2; *new_vx = vx + d3 + d4; *new_syy = syy + (d2 - d1) * m->rhoc1; *new_sxx = sxx + (d2 - d1) * m->rhoc3; *new_sxy = sxy + (d4 - d3) * m->rhoc2; } __device__ inline void write_new_values(real* vx, real* vy, real* sxx, real* sxy, real* syy, const real new_vx, const real new_vy, const real new_sxx, const real new_sxy, const real new_syy) { *vx = new_vx; *vy = new_vy; *sxx = new_sxx; *sxy = new_sxy; *syy = new_syy; } __device__ inline void reconstruct(const le_w ppu, const le_w pu, const le_w u, const le_w nu, const le_w nnu, const real k1, const real k2, le_w *d) { d->w1 = tvd2(k1, ppu.w1, pu.w1, u.w1, nu.w1); // c1 d->w2 = tvd2(k1, nnu.w2, nu.w2, u.w2, pu.w2); // -c1 d->w3 = tvd2(k2, ppu.w3, pu.w3, u.w3, nu.w3); // c2 d->w4 = tvd2(k2, nnu.w4, nu.w4, u.w4, pu.w4); // -c2 } __device__ inline real g_ind(const real* grid, int i, int j, const int nx, const int ny, const int node) { // TODO it works only with SOA if (i < 0) i = 0; if (j < 0) j = 0; if (i >= nx) i = nx - 1; if (j >= ny) j = ny - 1; return (*(grid + (i) + (j) * nx + node * nx * ny)); } extern __shared__ real shared_grid[]; __global__ void le_step_x(le_task *t, real* in_grid, real* out_grid) { const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; const int li = threadIdx.x; const int lj = threadIdx.y; const real k1 = t->dt * t->mat.c1 / t->h.y; const real k2 = t->dt * t->mat.c2 / t->h.y; const int nx = t->n.x; const int ny = t->n.y; real* grid = in_grid; real vx, vy, sxx, sxy, syy; le_w w_2, w_1, w, w1, w2, d; s_ind_vx(li, lj) = g_ind(grid, i, j, nx, ny, 0); s_ind_vy(li, lj) = g_ind(grid, i, j, nx, ny, 1); s_ind_sxx(li, lj) = g_ind(grid, i, j, nx, ny, 2); s_ind_sxy(li, lj) = g_ind(grid, i, j, nx, ny, 3); s_ind_syy(li, lj) = g_ind(grid, i, j, nx, ny, 4); if (li < 2) { s_ind_vx(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 0); s_ind_vy(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 1); s_ind_sxx(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 2); s_ind_sxy(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 3); s_ind_syy(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 4); } else if (li >= blockDim.x - 2) { s_ind_vx(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 0); s_ind_vy(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 1); s_ind_sxx(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 2); s_ind_sxy(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 3); s_ind_syy(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 4); } __syncthreads(); if (i >= nx || j >= ny) return; omega_x(&t->mat, s_ind_vx(li-2, lj), s_ind_vy(li-2, lj), s_ind_sxx(li-2, lj), s_ind_sxy(li-2, lj), &w_2); omega_x(&t->mat, s_ind_vx(li-1, lj), s_ind_vy(li-1, lj), s_ind_sxx(li-1, lj), s_ind_sxy(li-1, lj), &w_1); omega_x(&t->mat, s_ind_vx(li, lj), s_ind_vy(li, lj), s_ind_sxx(li, lj), s_ind_sxy(li, lj), &w); omega_x(&t->mat, s_ind_vx(li+1, lj), s_ind_vy(li+1, lj), s_ind_sxx(li+1, lj), s_ind_sxy(li+1, lj), &w1); omega_x(&t->mat, s_ind_vx(li+2, lj), s_ind_vy(li+2, lj), s_ind_sxx(li+2, lj), s_ind_sxy(li+2, lj), &w2); reconstruct(w_2, w_1, w, w1, w2, k1, k2, &d); inc_x(&t->mat, s_ind_vx(li,lj), s_ind_vy(li,lj), s_ind_sxx(li,lj), s_ind_sxy(li,lj), s_ind_syy(li,lj), &vx, &vy, &sxx, &sxy, &syy, &d); grid = out_grid; write_new_values(&ind_vx(i,j), &ind_vy(i,j), &ind_sxx(i,j), &ind_sxy(i,j), &ind_syy(i,j), vx, vy, sxx, sxy, syy); } __global__ void le_step_y(le_task *t, real* in_grid, real* out_grid) { const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; const int li = threadIdx.x; const int lj = threadIdx.y; const real k1 = t->dt * t->mat.c1 / t->h.y; const real k2 = t->dt * t->mat.c2 / t->h.y; const int nx = t->n.x; const int ny = t->n.y; real* grid = in_grid; real vx, vy, sxx, sxy, syy; le_w w_2, w_1, w, w1, w2, d; s_ind_vx(li, lj) = g_ind(grid, i, j, nx, ny, 0); s_ind_vy(li, lj) = g_ind(grid, i, j, nx, ny, 1); s_ind_sxx(li, lj) = g_ind(grid, i, j, nx, ny, 2); s_ind_sxy(li, lj) = g_ind(grid, i, j, nx, ny, 3); s_ind_syy(li, lj) = g_ind(grid, i, j, nx, ny, 4); if (lj < 2) { s_ind_vx(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 0); s_ind_vy(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 1); s_ind_sxx(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 2); s_ind_sxy(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 3); s_ind_syy(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 4); } else if (lj >= blockDim.y - 2) { s_ind_vx(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 0); s_ind_vy(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 1); s_ind_sxx(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 2); s_ind_sxy(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 3); s_ind_syy(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 4); } __syncthreads(); if (i >= nx || j >= ny) return; omega_y(&t->mat, s_ind_vx(li, lj-2), s_ind_vy(li, lj-2), s_ind_sxy(li, lj-2), s_ind_syy(li, lj-2), &w_2); omega_y(&t->mat, s_ind_vx(li, lj-1), s_ind_vy(li, lj-1), s_ind_sxy(li, lj-1), s_ind_syy(li, lj-1), &w_1); omega_y(&t->mat, s_ind_vx(li, lj), s_ind_vy(li, lj), s_ind_sxy(li, lj), s_ind_syy(li, lj), &w); omega_y(&t->mat, s_ind_vx(li, lj+1), s_ind_vy(li, lj+1), s_ind_sxy(li, lj+1), s_ind_syy(li, lj+1), &w1); omega_y(&t->mat, s_ind_vx(li, lj+2), s_ind_vy(li, lj+2), s_ind_sxy(li, lj+2), s_ind_syy(li, lj+2), &w2); reconstruct(w_2, w_1, w, w1, w2, k1, k2, &d); inc_y(&t->mat, s_ind_vx(li,lj), s_ind_vy(li,lj), s_ind_sxx(li,lj), s_ind_sxy(li,lj), s_ind_syy(li,lj), &vx, &vy, &sxx, &sxy, &syy, &d); grid = out_grid; write_new_values(&ind_vx(i,j), &ind_vy(i,j), &ind_sxx(i,j), &ind_sxy(i,j), &ind_syy(i,j), vx, vy, sxx, sxy, syy); } double le_step(le_task *task, int steps) { int nx = task->n.x; int ny = task->n.y; // set sizes of blocks on gpu int threads_width = 16; dim3 threadsPerBlock(threads_width, threads_width); dim3 blocksPerGrid((nx + threads_width - 1) / threads_width, (ny + threads_width - 1) / threads_width); int sharedMemSize = (threads_width + 4) * (threads_width + 4) * NODE_SIZE * sizeof(real); int grid_size = sizeof(real) * NODE_SIZE * nx * ny; int task_size = sizeof(le_task); le_task* d_task; real* d_grid1; real* d_grid2; double t; // allocate memory on gpu gpuErrchk(hipMalloc(&d_task, task_size)); gpuErrchk(hipMalloc(&d_grid1, grid_size)); gpuErrchk(hipMalloc(&d_grid2, grid_size)); gpuErrchk(hipMemcpy(d_task, task, task_size, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_grid1, task->grid, grid_size, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_grid2, task->grid, grid_size, hipMemcpyHostToDevice)); hipDeviceSynchronize(); t = timer(); // run kernel for (int i = 0; i < steps; i++) { hipLaunchKernelGGL(( le_step_x), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemSize, 0, d_task, d_grid1, d_grid2); gpuErrchk( hipPeekAtLastError() ); hipLaunchKernelGGL(( le_step_y), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMemSize, 0, d_task, d_grid2, d_grid1); gpuErrchk( hipPeekAtLastError() ); } gpuErrchk(hipDeviceSynchronize()); t = timer() - t; // drop data to host gpuErrchk(hipMemcpy(task->grid, d_grid1, grid_size, hipMemcpyDeviceToHost)); hipFree(d_grid1); hipFree(d_grid2); hipFree(d_task); return t; }
564e9af7b6e3362b1d908d8c86a12645c8137473.cu
#include <malloc.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #include "le_core.h" #define s_ind_vx(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 0 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_vy(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 1 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_sxx(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 2 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_sxy(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 3 * (blockDim.x + 4) * (blockDim.y + 4))) #define s_ind_syy(i, j) (*(shared_grid + ((i)+2) + ((j)+2) * (blockDim.x + 4) + 4 * (blockDim.x + 4) * (blockDim.y + 4))) #ifdef DEBUG #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #else #define gpuErrchk(ans) { (ans); } #endif #define vnorm(v) (sqrt(v.x * v.x + v.y * v.y)) #ifdef USE_DOUBLE #define TVD2_EPS 1e-6 __device__ inline real le_min(real a, real b) { return a > b ? b : a; } __device__ inline real le_max(real a, real b) { return a > b ? a : b; } __device__ inline real le_max3(real a, real b, real c) { return le_max(a, le_max(b, c)); } #define limiter_minmod(r) (le_max(0.0, le_min(1.0, (r)))) #define limiter_cir(r) (0.0) #define limiter_superbee(r) (le_max3(0.0, le_min(1.0, 2.0 * r), le_min(2.0, r))) #else #define TVD2_EPS 1e-6f __device__ inline real le_min(real a, real b) { return a > b ? b : a; } __device__ inline real le_max(real a, real b) { return a > b ? a : b; } __device__ inline real le_max3(real a, real b, real c) { return le_max(a, le_max(b, c)); } #define limiter_minmod(r) (le_max(0.0f, le_min(1.0f, (r)))) #define limiter_cir(r) (0.0f) #define limiter_superbee(r) (le_max3(0.0f, le_min(1.0f, 2.0f * r), le_min(2.0f, r))) #endif #define limiter limiter_superbee __device__ inline real tvd2(const real c, const real u_2, const real u_1, const real u, const real u1) { const real eps = TVD2_EPS; real r1 = u - u_1; r1 += eps; real r2 = u1 - u; r2 += eps; const real r = r1 / r2; real r_1 = u_1 - u_2; r_1 += eps; r_1 /= r1; real f12 = r2 * limiter(r); real f_12 = r1 * limiter(r_1); const real k = 0.5 * (1 - c); return c * ((f_12 - f12) * k - r1); } void le_set_ball(le_task *t, const le_vec2 c, const real r, const real s) { int i, j; for (i = 0; i < t->n.x; i++) { for (j = 0; j < t->n.y; j++) { le_vec2 x = {t->h.x * i, t->h.y * j}; le_vec2 d = {x.x - c.x, x.y - c.y}; if (vnorm(d) < r) { /* Set pressure disturbance */ int_t nx = t->n.x; int_t ny = t->n.y; real* grid = t->grid; ind_sxx(i, j) = s; ind_syy(i, j) = s; } } } } /* * Write float to file and reverse byte order. */ void write_float(FILE* f, const float v) { union { float f; unsigned char b[4]; } dat1, dat2; dat1.f = v; dat2.b[0] = dat1.b[3]; dat2.b[1] = dat1.b[2]; dat2.b[2] = dat1.b[1]; dat2.b[3] = dat1.b[0]; fwrite(dat2.b, sizeof(unsigned char), 4, f); } void le_init_task(le_task *task, const real dt, const le_vec2 h, const le_material mat, const le_point2 n) { task->dt = dt; task->h = h; task->mat = mat; task->n = n; task->grid = (real*) malloc(sizeof(real) * NODE_SIZE * n.x * n.y); memset(task->grid, 0, sizeof(real) * NODE_SIZE * n.x * n.y); } void le_free_task(le_task* task) { free(task->grid); } int le_save_task(le_task *t, const char *file) { int i, j; FILE *fp = fopen(file, "w"); if (fp == NULL) { perror("Failed to open file"); return 1; } fprintf(fp, "# vtk DataFile Version 3.0\n"); fprintf(fp, "Created by le_save_task\n"); fprintf(fp, "BINARY\n"); fprintf(fp, "DATASET STRUCTURED_POINTS\n"); fprintf(fp, "DIMENSIONS %d %d 1\n", t->n.x, t->n.y); fprintf(fp, "SPACING %f %f 0.0\n", t->h.x, t->h.y); fprintf(fp, "ORIGIN 0.0 0.0 0.0\n"); fprintf(fp, "POINT_DATA %d\n", t->n.x * t->n.y); /* velocity */ fprintf(fp, "SCALARS v float 1\n"); fprintf(fp, "LOOKUP_TABLE v_table\n"); for (j = 0; j < t->n.y; j++) { for (i = 0; i < t->n.x; i++) { float v; const int_t nx = t->n.x; const int_t ny = t->n.y; const real* grid = t->grid; le_vec2 vt = { ind_vx(i, j), ind_vy(i, j) }; v = vnorm(vt); write_float(fp, v); } } /* * You can use the same code for saving other variables. */ fclose(fp); return 0; } void le_init_material(const real c1, const real c2, const real rho, le_material *m) { m->c1 = c1; m->c2 = c2; m->rho = rho; /* Cached values. */ m->irhoc1 = 1.0 / (c1 * rho); m->irhoc2 = 1.0 / (c2 * rho); m->rhoc1 = c1 * rho; m->rhoc2 = c2 * rho; real mu = rho * c2 * c2; real la = rho * c1 * c1 - 2.0 * mu; m->rhoc3 = rho * c1 * la / (la + 2.0 * mu); } __device__ inline void omega_x(const le_material *m, const real vx, const real vy, const real sxx, const real sxy, le_w *w) { const real N00T = sxx * m->irhoc1; const real N01T = sxy * m->irhoc2; w->w1 = vx - N00T; w->w2 = vx + N00T; w->w3 = vy - N01T; w->w4 = vy + N01T; } __device__ inline void omega_y(const le_material *m, const real vx, const real vy, const real sxy, const real syy, le_w *w) { const real N00T = syy * m->irhoc1; const real N01T = sxy * m->irhoc2; w->w1 = vy - N00T; w->w2 = vy + N00T; w->w3 = vx - N01T; w->w4 = vx + N01T; } __device__ inline void inc_x(const le_material *m, const real vx, const real vy, const real sxx, const real sxy, const real syy, real* new_vx, real* new_vy, real* new_sxx, real* new_sxy, real* new_syy, const le_w *d) { const real d1 = 0.5 * d->w1; const real d2 = 0.5 * d->w2; const real d3 = 0.5 * d->w3; const real d4 = 0.5 * d->w4; *new_vx = vx + d1 + d2; *new_vy = vy + d3 + d4; *new_syy = syy + (d2 - d1) * m->rhoc3; *new_sxx = sxx + (d2 - d1) * m->rhoc1; *new_sxy = sxy + (d4 - d3) * m->rhoc2; } __device__ inline void inc_y(const le_material *m, const real vx, const real vy, const real sxx, const real sxy, const real syy, real* new_vx, real* new_vy, real* new_sxx, real* new_sxy, real* new_syy, const le_w *d) { const real d1 = 0.5 * d->w1; const real d2 = 0.5 * d->w2; const real d3 = 0.5 * d->w3; const real d4 = 0.5 * d->w4; *new_vy = vy + d1 + d2; *new_vx = vx + d3 + d4; *new_syy = syy + (d2 - d1) * m->rhoc1; *new_sxx = sxx + (d2 - d1) * m->rhoc3; *new_sxy = sxy + (d4 - d3) * m->rhoc2; } __device__ inline void write_new_values(real* vx, real* vy, real* sxx, real* sxy, real* syy, const real new_vx, const real new_vy, const real new_sxx, const real new_sxy, const real new_syy) { *vx = new_vx; *vy = new_vy; *sxx = new_sxx; *sxy = new_sxy; *syy = new_syy; } __device__ inline void reconstruct(const le_w ppu, const le_w pu, const le_w u, const le_w nu, const le_w nnu, const real k1, const real k2, le_w *d) { d->w1 = tvd2(k1, ppu.w1, pu.w1, u.w1, nu.w1); // c1 d->w2 = tvd2(k1, nnu.w2, nu.w2, u.w2, pu.w2); // -c1 d->w3 = tvd2(k2, ppu.w3, pu.w3, u.w3, nu.w3); // c2 d->w4 = tvd2(k2, nnu.w4, nu.w4, u.w4, pu.w4); // -c2 } __device__ inline real g_ind(const real* grid, int i, int j, const int nx, const int ny, const int node) { // TODO it works only with SOA if (i < 0) i = 0; if (j < 0) j = 0; if (i >= nx) i = nx - 1; if (j >= ny) j = ny - 1; return (*(grid + (i) + (j) * nx + node * nx * ny)); } extern __shared__ real shared_grid[]; __global__ void le_step_x(le_task *t, real* in_grid, real* out_grid) { const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; const int li = threadIdx.x; const int lj = threadIdx.y; const real k1 = t->dt * t->mat.c1 / t->h.y; const real k2 = t->dt * t->mat.c2 / t->h.y; const int nx = t->n.x; const int ny = t->n.y; real* grid = in_grid; real vx, vy, sxx, sxy, syy; le_w w_2, w_1, w, w1, w2, d; s_ind_vx(li, lj) = g_ind(grid, i, j, nx, ny, 0); s_ind_vy(li, lj) = g_ind(grid, i, j, nx, ny, 1); s_ind_sxx(li, lj) = g_ind(grid, i, j, nx, ny, 2); s_ind_sxy(li, lj) = g_ind(grid, i, j, nx, ny, 3); s_ind_syy(li, lj) = g_ind(grid, i, j, nx, ny, 4); if (li < 2) { s_ind_vx(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 0); s_ind_vy(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 1); s_ind_sxx(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 2); s_ind_sxy(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 3); s_ind_syy(li - 2, lj) = g_ind(grid, i - 2, j, nx, ny, 4); } else if (li >= blockDim.x - 2) { s_ind_vx(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 0); s_ind_vy(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 1); s_ind_sxx(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 2); s_ind_sxy(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 3); s_ind_syy(li + 2, lj) = g_ind(grid, i + 2, j, nx, ny, 4); } __syncthreads(); if (i >= nx || j >= ny) return; omega_x(&t->mat, s_ind_vx(li-2, lj), s_ind_vy(li-2, lj), s_ind_sxx(li-2, lj), s_ind_sxy(li-2, lj), &w_2); omega_x(&t->mat, s_ind_vx(li-1, lj), s_ind_vy(li-1, lj), s_ind_sxx(li-1, lj), s_ind_sxy(li-1, lj), &w_1); omega_x(&t->mat, s_ind_vx(li, lj), s_ind_vy(li, lj), s_ind_sxx(li, lj), s_ind_sxy(li, lj), &w); omega_x(&t->mat, s_ind_vx(li+1, lj), s_ind_vy(li+1, lj), s_ind_sxx(li+1, lj), s_ind_sxy(li+1, lj), &w1); omega_x(&t->mat, s_ind_vx(li+2, lj), s_ind_vy(li+2, lj), s_ind_sxx(li+2, lj), s_ind_sxy(li+2, lj), &w2); reconstruct(w_2, w_1, w, w1, w2, k1, k2, &d); inc_x(&t->mat, s_ind_vx(li,lj), s_ind_vy(li,lj), s_ind_sxx(li,lj), s_ind_sxy(li,lj), s_ind_syy(li,lj), &vx, &vy, &sxx, &sxy, &syy, &d); grid = out_grid; write_new_values(&ind_vx(i,j), &ind_vy(i,j), &ind_sxx(i,j), &ind_sxy(i,j), &ind_syy(i,j), vx, vy, sxx, sxy, syy); } __global__ void le_step_y(le_task *t, real* in_grid, real* out_grid) { const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; const int li = threadIdx.x; const int lj = threadIdx.y; const real k1 = t->dt * t->mat.c1 / t->h.y; const real k2 = t->dt * t->mat.c2 / t->h.y; const int nx = t->n.x; const int ny = t->n.y; real* grid = in_grid; real vx, vy, sxx, sxy, syy; le_w w_2, w_1, w, w1, w2, d; s_ind_vx(li, lj) = g_ind(grid, i, j, nx, ny, 0); s_ind_vy(li, lj) = g_ind(grid, i, j, nx, ny, 1); s_ind_sxx(li, lj) = g_ind(grid, i, j, nx, ny, 2); s_ind_sxy(li, lj) = g_ind(grid, i, j, nx, ny, 3); s_ind_syy(li, lj) = g_ind(grid, i, j, nx, ny, 4); if (lj < 2) { s_ind_vx(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 0); s_ind_vy(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 1); s_ind_sxx(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 2); s_ind_sxy(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 3); s_ind_syy(li, lj - 2) = g_ind(grid, i, j - 2, nx, ny, 4); } else if (lj >= blockDim.y - 2) { s_ind_vx(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 0); s_ind_vy(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 1); s_ind_sxx(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 2); s_ind_sxy(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 3); s_ind_syy(li, lj + 2) = g_ind(grid, i, j + 2, nx, ny, 4); } __syncthreads(); if (i >= nx || j >= ny) return; omega_y(&t->mat, s_ind_vx(li, lj-2), s_ind_vy(li, lj-2), s_ind_sxy(li, lj-2), s_ind_syy(li, lj-2), &w_2); omega_y(&t->mat, s_ind_vx(li, lj-1), s_ind_vy(li, lj-1), s_ind_sxy(li, lj-1), s_ind_syy(li, lj-1), &w_1); omega_y(&t->mat, s_ind_vx(li, lj), s_ind_vy(li, lj), s_ind_sxy(li, lj), s_ind_syy(li, lj), &w); omega_y(&t->mat, s_ind_vx(li, lj+1), s_ind_vy(li, lj+1), s_ind_sxy(li, lj+1), s_ind_syy(li, lj+1), &w1); omega_y(&t->mat, s_ind_vx(li, lj+2), s_ind_vy(li, lj+2), s_ind_sxy(li, lj+2), s_ind_syy(li, lj+2), &w2); reconstruct(w_2, w_1, w, w1, w2, k1, k2, &d); inc_y(&t->mat, s_ind_vx(li,lj), s_ind_vy(li,lj), s_ind_sxx(li,lj), s_ind_sxy(li,lj), s_ind_syy(li,lj), &vx, &vy, &sxx, &sxy, &syy, &d); grid = out_grid; write_new_values(&ind_vx(i,j), &ind_vy(i,j), &ind_sxx(i,j), &ind_sxy(i,j), &ind_syy(i,j), vx, vy, sxx, sxy, syy); } double le_step(le_task *task, int steps) { int nx = task->n.x; int ny = task->n.y; // set sizes of blocks on gpu int threads_width = 16; dim3 threadsPerBlock(threads_width, threads_width); dim3 blocksPerGrid((nx + threads_width - 1) / threads_width, (ny + threads_width - 1) / threads_width); int sharedMemSize = (threads_width + 4) * (threads_width + 4) * NODE_SIZE * sizeof(real); int grid_size = sizeof(real) * NODE_SIZE * nx * ny; int task_size = sizeof(le_task); le_task* d_task; real* d_grid1; real* d_grid2; double t; // allocate memory on gpu gpuErrchk(cudaMalloc(&d_task, task_size)); gpuErrchk(cudaMalloc(&d_grid1, grid_size)); gpuErrchk(cudaMalloc(&d_grid2, grid_size)); gpuErrchk(cudaMemcpy(d_task, task, task_size, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_grid1, task->grid, grid_size, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_grid2, task->grid, grid_size, cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); t = timer(); // run kernel for (int i = 0; i < steps; i++) { le_step_x<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_task, d_grid1, d_grid2); gpuErrchk( cudaPeekAtLastError() ); le_step_y<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_task, d_grid2, d_grid1); gpuErrchk( cudaPeekAtLastError() ); } gpuErrchk(cudaDeviceSynchronize()); t = timer() - t; // drop data to host gpuErrchk(cudaMemcpy(task->grid, d_grid1, grid_size, cudaMemcpyDeviceToHost)); cudaFree(d_grid1); cudaFree(d_grid2); cudaFree(d_task); return t; }
60abfc4a952c31d3dcb875313bb1bed4f39227df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/graphs/traversal.h> #include "bfs_common.h" DEFINE_int32(source_node, 0, "The source node for the BFS traversal (clamped to [0, nnodes-1])"); const level_t INF = UINT_MAX; namespace bfs { struct LevelData { index_t node; level_t level; __device__ __host__ __forceinline__ LevelData(index_t node, level_t level) : node(node), level(level) { } __device__ __host__ __forceinline__ LevelData() : node(INF), level(INF) { } }; typedef index_t local_work_t; typedef LevelData remote_work_t; __global__ void BFSMemsetKernel(level_t* levels, int nnodes) { int tid = TID_1D; if (tid < nnodes) { levels[tid] = INF; } } template<bool CTAScheduling = true> /// BFS work with Collective Thread Array scheduling for exploiting nested parallelism struct BFSWork { template<typename WorkSource, typename WorkTarget, typename TGraph, typename TGraphDatum> __device__ static void work( const WorkSource& work_source, WorkTarget& work_target, const TGraph& graph, TGraphDatum& levels_datum ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<level_t> np_local = { 0, 0, 0 }; if (i < work_size) { index_t node = work_source.get_work(i); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = levels_datum.get_item(node) + 1; } groute::dev::CTAWorkScheduler<level_t>::template schedule( np_local, [&work_target, &graph, &levels_datum](index_t edge, level_t next_level) { index_t dest = graph.edge_dest(edge); if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level)) { work_target.append_work(LevelData(dest, next_level)); } } ); } } }; template<> /// BFS work without CTA support struct BFSWork< false > { template<typename WorkSource, typename WorkTarget, typename TGraph, typename TGraphDatum> __device__ static void work( const WorkSource& work_source, WorkTarget& work_target, const TGraph& graph, TGraphDatum& levels_datum ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i); level_t next_level = levels_datum.get_item(node) + 1; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level)) { work_target.append_work(LevelData(dest, next_level)); } } } } }; struct DWCallbacks { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<level_t> m_levels_datum; public: template<typename...UnusedData> DWCallbacks(const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<level_t>& levels_datum, UnusedData&... data) : m_graph_seg(graph_seg), m_levels_datum(levels_datum) { } DWCallbacks() { } __device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t& work) { if (m_graph_seg.owns(work.node)) { return (work.level < atomicMin(m_levels_datum.get_item_ptr(work.node), work.level)) ? groute::SF_Take : groute::SF_None; // filter } return groute::SF_Pass; } __device__ __forceinline__ bool should_defer(const local_work_t& work, const level_t& global_threshold) { return m_levels_datum[work] > global_threshold; } __device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) { return (m_graph_seg.owns(work)) ? groute::SF_Take : groute::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { return LevelData(work, m_levels_datum.get_item(work)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t& work) { return work.node; } }; struct Algo { static const char* NameLower() { return "bfs"; } static const char* Name() { return "BFS"; } static void HostInit( utils::traversal::Context<bfs::Algo>& context, groute::graphs::multi::CSRGraphAllocator& graph_manager, groute::IDistributedWorklist<local_work_t, remote_work_t>& distributed_worklist) { // Get a valid source_node from flag index_t source_node = min(max((index_t)0, (index_t)FLAGS_source_node), context.host_graph.nnodes - 1); // Map to the (possibly new) partitioned vertex space source_node = graph_manager.GetGraphPartitioner()->ReverseLookup(source_node); // Host endpoint for sending initial work groute::Endpoint host = groute::Endpoint::HostEndpoint(0); // Report the initial work distributed_worklist.ReportInitialWork(1, host); std::vector<remote_work_t> initial_work; initial_work.push_back(remote_work_t(source_node, 0)); distributed_worklist .GetLink(host) .Send(groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event()); } template<typename TGraph, typename TGraphDatum> static void DeviceMemset(groute::Stream& stream, TGraph graph, TGraphDatum levels_datum) { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, levels_datum.size); hipLaunchKernelGGL(( BFSMemsetKernel) , dim3(grid_dims), dim3(block_dims), 0, stream.cuda_stream , levels_datum.data_ptr, levels_datum.size); } template<typename TGraph, typename TGraphDatum> static void DeviceInit( groute::Endpoint endpoint, groute::Stream& stream, groute::IDistributedWorklist<local_work_t, remote_work_t>& distributed_worklist, groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks>* peer, TGraph graph, TGraphDatum levels_datum) { } template<typename TGraphAllocator, typename TGraphDatum, typename...UnusedData> static const std::vector<level_t>& Gather(TGraphAllocator& graph_allocator, TGraphDatum& levels_datum, UnusedData&... data) { graph_allocator.GatherDatum(levels_datum); return levels_datum.GetHostData(); } template<typename...UnusedData> static std::vector<level_t> Host(groute::graphs::host::CSRGraph& graph, UnusedData&... data) { return BFSHost(graph, min(max((index_t)0, (index_t)FLAGS_source_node), graph.nnodes - 1)); } static int Output(const char *file, const std::vector<level_t>& levels) { return BFSOutput(file, levels); } static int CheckErrors(const std::vector<level_t>& levels, const std::vector<level_t>& regression) { return BFSCheckErrors(levels, regression); } }; using NodeLevelDatumType = groute::graphs::multi::NodeOutputGlobalDatum < level_t > ; template<bool IterationFusion = true, bool CTAScheduling = true> using FusedWorkerType = groute::FusedWorker < IterationFusion, local_work_t, remote_work_t, int, DWCallbacks, BFSWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, NodeLevelDatumType::DeviceObjectType> ; template<bool CTAScheduling = true> using WorkerType = groute::Worker < local_work_t, remote_work_t, DWCallbacks, BFSWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, NodeLevelDatumType::DeviceObjectType> ; template<typename TWorker> using RunnerType = utils::traversal::Runner < Algo, TWorker, DWCallbacks, local_work_t, remote_work_t, NodeLevelDatumType > ; } template<typename TWorker> bool TestBFSAsyncMultiTemplate(int ngpus) { bfs::RunnerType<TWorker> runner; bfs::NodeLevelDatumType levels_datum; return runner(ngpus, FLAGS_prio_delta, levels_datum); } bool TestBFSAsyncMultiOptimized(int ngpus) { return FLAGS_cta_np ? FLAGS_iteration_fusion ? TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< true, true >>(ngpus) : TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< false, true >>(ngpus) : FLAGS_iteration_fusion ? TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< true, false >>(ngpus) : TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< false, false >>(ngpus); } bool TestBFSAsyncMulti(int ngpus) { return FLAGS_cta_np ? TestBFSAsyncMultiTemplate< bfs::WorkerType< true >>(ngpus) : TestBFSAsyncMultiTemplate< bfs::WorkerType< false >>(ngpus); } bool TestBFSSingle() { return TestBFSAsyncMultiOptimized(1); }
60abfc4a952c31d3dcb875313bb1bed4f39227df.cu
// Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/graphs/traversal.h> #include "bfs_common.h" DEFINE_int32(source_node, 0, "The source node for the BFS traversal (clamped to [0, nnodes-1])"); const level_t INF = UINT_MAX; namespace bfs { struct LevelData { index_t node; level_t level; __device__ __host__ __forceinline__ LevelData(index_t node, level_t level) : node(node), level(level) { } __device__ __host__ __forceinline__ LevelData() : node(INF), level(INF) { } }; typedef index_t local_work_t; typedef LevelData remote_work_t; __global__ void BFSMemsetKernel(level_t* levels, int nnodes) { int tid = TID_1D; if (tid < nnodes) { levels[tid] = INF; } } template<bool CTAScheduling = true> /// BFS work with Collective Thread Array scheduling for exploiting nested parallelism struct BFSWork { template<typename WorkSource, typename WorkTarget, typename TGraph, typename TGraphDatum> __device__ static void work( const WorkSource& work_source, WorkTarget& work_target, const TGraph& graph, TGraphDatum& levels_datum ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<level_t> np_local = { 0, 0, 0 }; if (i < work_size) { index_t node = work_source.get_work(i); np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = levels_datum.get_item(node) + 1; } groute::dev::CTAWorkScheduler<level_t>::template schedule( np_local, [&work_target, &graph, &levels_datum](index_t edge, level_t next_level) { index_t dest = graph.edge_dest(edge); if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level)) { work_target.append_work(LevelData(dest, next_level)); } } ); } } }; template<> /// BFS work without CTA support struct BFSWork< false > { template<typename WorkSource, typename WorkTarget, typename TGraph, typename TGraphDatum> __device__ static void work( const WorkSource& work_source, WorkTarget& work_target, const TGraph& graph, TGraphDatum& levels_datum ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i); level_t next_level = levels_datum.get_item(node) + 1; for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); if (next_level < atomicMin(levels_datum.get_item_ptr(dest), next_level)) { work_target.append_work(LevelData(dest, next_level)); } } } } }; struct DWCallbacks { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<level_t> m_levels_datum; public: template<typename...UnusedData> DWCallbacks(const groute::graphs::dev::CSRGraphSeg& graph_seg, const groute::graphs::dev::GraphDatum<level_t>& levels_datum, UnusedData&... data) : m_graph_seg(graph_seg), m_levels_datum(levels_datum) { } DWCallbacks() { } __device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t& work) { if (m_graph_seg.owns(work.node)) { return (work.level < atomicMin(m_levels_datum.get_item_ptr(work.node), work.level)) ? groute::SF_Take : groute::SF_None; // filter } return groute::SF_Pass; } __device__ __forceinline__ bool should_defer(const local_work_t& work, const level_t& global_threshold) { return m_levels_datum[work] > global_threshold; } __device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) { return (m_graph_seg.owns(work)) ? groute::SF_Take : groute::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { return LevelData(work, m_levels_datum.get_item(work)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t& work) { return work.node; } }; struct Algo { static const char* NameLower() { return "bfs"; } static const char* Name() { return "BFS"; } static void HostInit( utils::traversal::Context<bfs::Algo>& context, groute::graphs::multi::CSRGraphAllocator& graph_manager, groute::IDistributedWorklist<local_work_t, remote_work_t>& distributed_worklist) { // Get a valid source_node from flag index_t source_node = min(max((index_t)0, (index_t)FLAGS_source_node), context.host_graph.nnodes - 1); // Map to the (possibly new) partitioned vertex space source_node = graph_manager.GetGraphPartitioner()->ReverseLookup(source_node); // Host endpoint for sending initial work groute::Endpoint host = groute::Endpoint::HostEndpoint(0); // Report the initial work distributed_worklist.ReportInitialWork(1, host); std::vector<remote_work_t> initial_work; initial_work.push_back(remote_work_t(source_node, 0)); distributed_worklist .GetLink(host) .Send(groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event()); } template<typename TGraph, typename TGraphDatum> static void DeviceMemset(groute::Stream& stream, TGraph graph, TGraphDatum levels_datum) { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, levels_datum.size); BFSMemsetKernel <<< grid_dims, block_dims, 0, stream.cuda_stream >>>( levels_datum.data_ptr, levels_datum.size); } template<typename TGraph, typename TGraphDatum> static void DeviceInit( groute::Endpoint endpoint, groute::Stream& stream, groute::IDistributedWorklist<local_work_t, remote_work_t>& distributed_worklist, groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks>* peer, TGraph graph, TGraphDatum levels_datum) { } template<typename TGraphAllocator, typename TGraphDatum, typename...UnusedData> static const std::vector<level_t>& Gather(TGraphAllocator& graph_allocator, TGraphDatum& levels_datum, UnusedData&... data) { graph_allocator.GatherDatum(levels_datum); return levels_datum.GetHostData(); } template<typename...UnusedData> static std::vector<level_t> Host(groute::graphs::host::CSRGraph& graph, UnusedData&... data) { return BFSHost(graph, min(max((index_t)0, (index_t)FLAGS_source_node), graph.nnodes - 1)); } static int Output(const char *file, const std::vector<level_t>& levels) { return BFSOutput(file, levels); } static int CheckErrors(const std::vector<level_t>& levels, const std::vector<level_t>& regression) { return BFSCheckErrors(levels, regression); } }; using NodeLevelDatumType = groute::graphs::multi::NodeOutputGlobalDatum < level_t > ; template<bool IterationFusion = true, bool CTAScheduling = true> using FusedWorkerType = groute::FusedWorker < IterationFusion, local_work_t, remote_work_t, int, DWCallbacks, BFSWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, NodeLevelDatumType::DeviceObjectType> ; template<bool CTAScheduling = true> using WorkerType = groute::Worker < local_work_t, remote_work_t, DWCallbacks, BFSWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, NodeLevelDatumType::DeviceObjectType> ; template<typename TWorker> using RunnerType = utils::traversal::Runner < Algo, TWorker, DWCallbacks, local_work_t, remote_work_t, NodeLevelDatumType > ; } template<typename TWorker> bool TestBFSAsyncMultiTemplate(int ngpus) { bfs::RunnerType<TWorker> runner; bfs::NodeLevelDatumType levels_datum; return runner(ngpus, FLAGS_prio_delta, levels_datum); } bool TestBFSAsyncMultiOptimized(int ngpus) { return FLAGS_cta_np ? FLAGS_iteration_fusion ? TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< true, true >>(ngpus) : TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< false, true >>(ngpus) : FLAGS_iteration_fusion ? TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< true, false >>(ngpus) : TestBFSAsyncMultiTemplate< bfs::FusedWorkerType< false, false >>(ngpus); } bool TestBFSAsyncMulti(int ngpus) { return FLAGS_cta_np ? TestBFSAsyncMultiTemplate< bfs::WorkerType< true >>(ngpus) : TestBFSAsyncMultiTemplate< bfs::WorkerType< false >>(ngpus); } bool TestBFSSingle() { return TestBFSAsyncMultiOptimized(1); }
f046d7c5511ed1087564f7204cb062dc3c32b444.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2015 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "rocblas.h" #include "../debug.h" /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 /* setup various hard-coded parameters for this kernel */ #define TBX 64 // Size of C this CTA is responsible for, x dimension #define TBY 64 // Size of C this CTA is responsible for, y dimension #define TX 16 // Thread block size, x dimension #define TY 16 // Thread block size, y dimension #define BK 16 // square block of K size #define NX 4 // = TBX/TX == number of iterations to do TBX work with TX blocks #define NY 4 // = TBY/TY == number of iterations to do TBY work with TY blocks __global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c ) { /* setup some constants for later use */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * TBY; const int ibx = blockIdx.x * TBX; /* shared memory arrays for A and B */ __shared__ double as[ TBX ][ BK+1 ]; __shared__ double bs[ BK ][ TBY+1 ]; /* space for C to be held in registers */ double c_tmp[ NX ][ NY ] ; /* zero the temp C array */ #pragma unroll for ( int i = 0 ; i < NX ; i++) { for ( int j = 0 ; j < NY ; j++) { c_tmp[i][j] = 0.0; } } /* calculate my initial offset into A and B */ int aoff = INDX( ibx + tx, ty, m ); int boff = INDX( tx, iby + ty, m ); /* main loop over blocks of K */ for( int Kblock = 0; Kblock < m; Kblock+=BK ) { /* read block of A into shared memory */ #pragma unroll for ( int i = 0; i < NX ; i ++ ) { as[ tx + i * TX ][ ty ] = a[ (aoff + i*TX) ]; } /* read block of B into shared memory */ #pragma unroll for ( int i = 0; i < NY ; i ++ ) { bs[ tx ][ ty + TY * i ] = b[ (boff + m*i*TY) ]; } __syncthreads(); /* increment A and B offsets for next round of data reads */ boff += BK; aoff += m * BK; /* triply nested loop to perform the matmult on the blocks */ #pragma unroll for( int k = 0 ; k < BK ; k++ ) { #pragma unroll for (int j = 0 ; j < NY ; j++ ) { #pragma unroll for (int i = 0 ; i < NX ; i++ ) { c_tmp[ i ][ j ] += as[ tx + TX*i ][ k ] * bs[ k ][ ty + j*TY ]; } } } __syncthreads(); } /* end for Kblock */ /* set coff to its proper index int the C matrix */ int coff = INDX( ibx + tx, iby + ty, m ); /* write results to the C matrix */ #pragma unroll for ( int j = 0 ; j < NY ; j++ ) { #pragma unroll for ( int i = 0 ; i < NX ; i++ ) { c[ coff + INDX( TX * i, TY * j, m )] = c_tmp[i][j]; } } } /* end GPU_shmem1 */ int main( int argc, char *argv[] ) { /* get GPU device number and name */ int dev; hipDeviceProp_t deviceProp; checkCUDA( hipGetDevice( &dev ) ); checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); double *h_a, *h_b, *h_c, *h_c1; double *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double ); h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (double *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (double *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ checkCUDA( hipMalloc( (void **)&d_a, numbytes ) ); checkCUDA( hipMalloc( (void **)&d_b, numbytes ) ); checkCUDA( hipMalloc( (void **)&d_c, numbytes ) ); /* copy a and b to device */ checkCUDA( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) ); checkCUDA( hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ) ); hipblasHandle_t handle; checkCUBLAS( hipblasCreate( &handle ) ); double alpha = 1.0; double beta = 0.0; /* start timers */ hipEvent_t start, stop; checkCUDA( hipEventCreate( &start ) ); checkCUDA( hipEventCreate( &stop ) ); checkCUDA( hipEventRecord( start, 0 ) ); /* call CUBLAS dgemm */ checkCUBLAS( hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, size, size, size, &alpha, d_a, size, d_b, size, &beta, d_c, size ) ); /* stop timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); float elapsedTime; checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ checkCUDA( hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ) ); /* reset C on device to zero */ checkCUDA( hipMemset( d_c, 0, numbytes ) ); /* setup grid and block sizes */ dim3 threads( TX, TY, 1 ); dim3 blocks( size / ( TBX ), size / ( TBY ), 1 ); /* call GPU_naive */ printf("block.X %d block.Y %d\n",blocks.x, blocks.y ); printf("threads.x %d threads.y %d\n",threads.x, threads.y ); /* start timers */ checkCUDA( hipEventRecord( start, 0 ) ); /* call the kernel */ hipLaunchKernelGGL(( GPU_shmem2), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c ); checkKERNEL() /* stop timers */ checkCUDA( hipEventRecord( stop, 0 ) ); checkCUDA( hipEventSynchronize( stop ) ); elapsedTime = 0.0f; checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ checkCUDA( hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ) ); checkCUBLAS( hipblasDestroy( handle ) ); checkCUDA( hipEventDestroy( start ) ); checkCUDA( hipEventDestroy( stop ) ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] ); } /* end for */ printf("error is %f\n",temp); if( temp > 10 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ checkCUDA( hipFree( d_a ) ); checkCUDA( hipFree( d_b ) ); checkCUDA( hipFree( d_c ) ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); checkCUDA( hipDeviceReset() ); return 0; }
f046d7c5511ed1087564f7204cb062dc3c32b444.cu
/* * Copyright 2015 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "cublas_v2.h" #include "../debug.h" /* macro for index calculations */ #define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) ) /* matrix size and thread dimensions */ #define SIZE 1024 /* setup various hard-coded parameters for this kernel */ #define TBX 64 // Size of C this CTA is responsible for, x dimension #define TBY 64 // Size of C this CTA is responsible for, y dimension #define TX 16 // Thread block size, x dimension #define TY 16 // Thread block size, y dimension #define BK 16 // square block of K size #define NX 4 // = TBX/TX == number of iterations to do TBX work with TX blocks #define NY 4 // = TBY/TY == number of iterations to do TBY work with TY blocks __global__ void GPU_shmem2(const int m, double const * const a, double const * const b, double *c ) { /* setup some constants for later use */ const int tx = threadIdx.x; const int ty = threadIdx.y; const int iby = blockIdx.y * TBY; const int ibx = blockIdx.x * TBX; /* shared memory arrays for A and B */ __shared__ double as[ TBX ][ BK+1 ]; __shared__ double bs[ BK ][ TBY+1 ]; /* space for C to be held in registers */ double c_tmp[ NX ][ NY ] ; /* zero the temp C array */ #pragma unroll for ( int i = 0 ; i < NX ; i++) { for ( int j = 0 ; j < NY ; j++) { c_tmp[i][j] = 0.0; } } /* calculate my initial offset into A and B */ int aoff = INDX( ibx + tx, ty, m ); int boff = INDX( tx, iby + ty, m ); /* main loop over blocks of K */ for( int Kblock = 0; Kblock < m; Kblock+=BK ) { /* read block of A into shared memory */ #pragma unroll for ( int i = 0; i < NX ; i ++ ) { as[ tx + i * TX ][ ty ] = a[ (aoff + i*TX) ]; } /* read block of B into shared memory */ #pragma unroll for ( int i = 0; i < NY ; i ++ ) { bs[ tx ][ ty + TY * i ] = b[ (boff + m*i*TY) ]; } __syncthreads(); /* increment A and B offsets for next round of data reads */ boff += BK; aoff += m * BK; /* triply nested loop to perform the matmult on the blocks */ #pragma unroll for( int k = 0 ; k < BK ; k++ ) { #pragma unroll for (int j = 0 ; j < NY ; j++ ) { #pragma unroll for (int i = 0 ; i < NX ; i++ ) { c_tmp[ i ][ j ] += as[ tx + TX*i ][ k ] * bs[ k ][ ty + j*TY ]; } } } __syncthreads(); } /* end for Kblock */ /* set coff to its proper index int the C matrix */ int coff = INDX( ibx + tx, iby + ty, m ); /* write results to the C matrix */ #pragma unroll for ( int j = 0 ; j < NY ; j++ ) { #pragma unroll for ( int i = 0 ; i < NX ; i++ ) { c[ coff + INDX( TX * i, TY * j, m )] = c_tmp[i][j]; } } } /* end GPU_shmem1 */ int main( int argc, char *argv[] ) { /* get GPU device number and name */ int dev; cudaDeviceProp deviceProp; checkCUDA( cudaGetDevice( &dev ) ); checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); const int size = SIZE; fprintf(stdout, "Matrix size is %d\n",size); double *h_a, *h_b, *h_c, *h_c1; double *d_a, *d_b, *d_c; size_t numbytes = (size_t ) size * (size_t ) size * sizeof( double ); h_a = (double *) malloc( numbytes ); if( h_a == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_b = (double *) malloc( numbytes ); if( h_b == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c = (double *) malloc( numbytes ); if( h_c == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } h_c1 = (double *) malloc( numbytes ); if( h_c1 == NULL ) { fprintf(stderr,"Error in host malloc\n"); return 911; } /* zero out the host memory for C matrices */ memset( h_c, 0, numbytes ); memset( h_c1, 0, numbytes ); fprintf( stdout, "Total memory required is %lf MB\n", 3.0 * (double) numbytes / 1000000.0 ); /* initialize the A and B matrices */ for( int i = 0; i < size * size; i++ ) { h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 ); } /* allocate a, b, c in gpu memory */ checkCUDA( cudaMalloc( (void **)&d_a, numbytes ) ); checkCUDA( cudaMalloc( (void **)&d_b, numbytes ) ); checkCUDA( cudaMalloc( (void **)&d_c, numbytes ) ); /* copy a and b to device */ checkCUDA( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) ); checkCUDA( cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ) ); cublasHandle_t handle; checkCUBLAS( cublasCreate( &handle ) ); double alpha = 1.0; double beta = 0.0; /* start timers */ cudaEvent_t start, stop; checkCUDA( cudaEventCreate( &start ) ); checkCUDA( cudaEventCreate( &stop ) ); checkCUDA( cudaEventRecord( start, 0 ) ); /* call CUBLAS dgemm */ checkCUBLAS( cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, d_a, size, d_b, size, &beta, d_c, size ) ); /* stop timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); float elapsedTime; checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print GPU CUBLAS timing information */ fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C from device to host for error checking */ checkCUDA( cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ) ); /* reset C on device to zero */ checkCUDA( cudaMemset( d_c, 0, numbytes ) ); /* setup grid and block sizes */ dim3 threads( TX, TY, 1 ); dim3 blocks( size / ( TBX ), size / ( TBY ), 1 ); /* call GPU_naive */ printf("block.X %d block.Y %d\n",blocks.x, blocks.y ); printf("threads.x %d threads.y %d\n",threads.x, threads.y ); /* start timers */ checkCUDA( cudaEventRecord( start, 0 ) ); /* call the kernel */ GPU_shmem2<<< blocks, threads >>> ( size, d_a, d_b, d_c ); checkKERNEL() /* stop timers */ checkCUDA( cudaEventRecord( stop, 0 ) ); checkCUDA( cudaEventSynchronize( stop ) ); elapsedTime = 0.0f; checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) ); /* print data for GPU naive */ fprintf(stdout, "Total time GPU SHMEM is %f sec\n", elapsedTime / 1000.0f ); fprintf(stdout, "Performance is %f GFlop/s\n", 2.0 * (double) size * (double) size * (double) size / ( (double) elapsedTime / 1000.0 ) * 1.e-9 ); /* copy C back to host */ checkCUDA( cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ) ); checkCUBLAS( cublasDestroy( handle ) ); checkCUDA( cudaEventDestroy( start ) ); checkCUDA( cudaEventDestroy( stop ) ); /* check CUBLAS versus GPU NAIVE numerical results */ double temp = 0.0; for( int i = 0; i < size * size; i++ ) { temp += ( h_c[i] - h_c1[i] ) * ( h_c[i] - h_c1[i] ); } /* end for */ printf("error is %f\n",temp); if( temp > 10 ) printf("FAIL\n"); else printf("PASS\n"); /* cleanup */ checkCUDA( cudaFree( d_a ) ); checkCUDA( cudaFree( d_b ) ); checkCUDA( cudaFree( d_c ) ); free( h_a ); free( h_b ); free( h_c ); free( h_c1 ); checkCUDA( cudaDeviceReset() ); return 0; }
996775ea6925bf20998b63538fbb51a601e83b73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //============================================================================= // FILE: memory.cu // // DESC: This file the basic memory management for the given device //============================================================================= #include "memory.h" //============================================================================= // Class Methods //============================================================================= template <class T> Memory<T>::Memory() : m_memory(), m_memoryPointers(), m_hostbuffers(), m_streams(), m_tensorDesc(), m_filterDesc(), m_convDesc(), m_poolDesc(), m_rnnDesc(), m_rnnDataDesc2(), m_lrnDesc(), m_cudnn(), m_pca(), m_tsnegp(), m_tsneg(), m_memtest(), m_nccl(), m_ssd(), m_memoryMap() { m_memory.SetMemoryPointers(&m_memoryPointers); m_tOne = (T)1; m_tZero = (T)0; #ifdef CUDNN_5 CreateActivationDesc(&m_hGlobalActivationSigmoid); SetActivationDesc(m_hGlobalActivationSigmoid, SIGMOID); CreateActivationDesc(&m_hGlobalActivationRelu); SetActivationDesc(m_hGlobalActivationRelu, RELU); CreateActivationDesc(&m_hGlobalActivationTanh); SetActivationDesc(m_hGlobalActivationTanh, TANH); CreateActivationDesc(&m_hGlobalActivationElu); SetActivationDesc(m_hGlobalActivationElu, ELU); #endif } template Memory<double>::Memory(); template Memory<float>::Memory(); template <class T> Memory<T>::~Memory() { for (int i=0; i<m_hostbuffers.GetCount(); i++) { FreeHostBuffer(i); } for (int i=0; i<m_streams.GetCount(); i++) { FreeStream(i); } for (int i=0; i<m_tensorDesc.GetCount(); i++) { FreeTensorDesc(i); } for (int i=0; i<m_filterDesc.GetCount(); i++) { FreeFilterDesc(i); } for (int i=0; i<m_convDesc.GetCount(); i++) { FreeConvolutionDesc(i); } for (int i=0; i<m_poolDesc.GetCount(); i++) { FreePoolingDesc(i); } for (int i = 0; i < m_rnnDesc.GetCount(); i++) { FreeRnnDesc(i); } for (int i = 0; i < m_rnnDataDesc1.GetCount(); i++) { FreeRnnDataDesc1(i); } for (int i = 0; i < m_rnnDataDesc2.GetCount(); i++) { FreeRnnDataDesc2(i); } for (int i = 0; i < m_rnn.GetCount(); i++) { FreeRnn8(i); } for (int i=0; i<m_lrnDesc.GetCount(); i++) { FreeLRNDesc(i); } for (int i=0; i<m_cudnn.GetCount(); i++) { FreeCuDNN(i); } #ifdef CUDNN_5 for (int i=0; i<m_activationDesc.GetCount(); i++) { FreeActivationDesc(i); } m_hGlobalActivationSigmoid = 0; m_hGlobalActivationRelu = 0; m_hGlobalActivationTanh = 0; m_hGlobalActivationElu = 0; for (int i = 0; i < m_dropoutDesc.GetCount(); i++) { FreeDropoutDesc(i); } #endif for (int i=0; i<m_pca.GetCount(); i++) { FreePCA(i); } for (int i=0; i<m_tsnegp.GetCount(); i++) { FreeTsneGaussianPerplexity(i); } for (int i = 0; i < m_memtest.GetCount(); i++) { FreeMemoryTest(i); } for (int i = 0; i < m_imgop.GetCount(); i++) { FreeImageOp(i); } for (int i = 0; i < m_nccl.GetCount(); i++) { FreeNCCL(i); } for (int i = 0; i < m_ssd.GetCount(); i++) { FreeSSD(i); } for (int i = 0; i < m_layernorm.GetCount(); i++) { FreeLayerNorm(i); } m_memoryMap.clear(); m_cudnnRef.clear(); m_cudnnH2Dev.clear(); m_cudnnDev2H.clear(); m_streamRef.clear(); m_streamH2Dev.clear(); m_streamH2Idx.clear(); m_streamH2CudnnH.clear(); m_streamCudnnRef.clear(); m_streamDev2Idx2H.clear(); } template Memory<double>::~Memory(); template Memory<float>::~Memory(); template <class T> long Memory<T>::GetDeviceMemory(int nDeviceID, T* pfTotal, T* pfFree, T* pfUsed, bool* pbEstimate) { LONG lErr; size_t lFree = 0; size_t lTotal = 0; size_t lUsed = 0; int nOriginalDeviceID = -1; if (nDeviceID >= 0) { if (lErr = hipGetDevice(&nOriginalDeviceID)) return lErr; if (nDeviceID != nOriginalDeviceID) { if (lErr = hipSetDevice(nDeviceID)) return lErr; } } if (nDeviceID == -1) { hipDeviceProp_t prop; memset(&prop, 0, sizeof(hipDeviceProp_t)); if (lErr = hipGetDeviceProperties(&prop, nDeviceID)) return lErr; lTotal = prop.totalGlobalMem; lUsed = (size_t)m_memory.GetTotalUsed(); lFree = lTotal - lUsed; *pbEstimate = true; } else { if (lErr = hipMemGetInfo(&lFree, &lTotal)) return lErr; lUsed = lTotal - lFree; *pbEstimate = false; } *pfTotal = (T)((double)lTotal / (double)1000000000.0); *pfFree = (T)((double)lFree / (double)1000000000.0); *pfUsed = (T)((double)lUsed / (double)1000000000.0); if (nOriginalDeviceID >= 0 && nOriginalDeviceID != nDeviceID) { if (lErr = hipSetDevice(nOriginalDeviceID)) return lErr; } return hipStreamSynchronize(0); } template long Memory<double>::GetDeviceMemory(int nDeviceID, double* pdfTotal, double* pdfFree, double* pdfUsed, bool* pbEstimate); template long Memory<float>::GetDeviceMemory(int nDeviceID, float* pfTotal, float* pfFree, float* pfUsed, bool* pbEstimate); template <class T> long Memory<T>::AllocHost(LPTSTR* ppDst, LPTSTR pSrc) { int nLen = (int)_tcslen(pSrc); if (nLen == 0) return ERROR_PARAM_OUT_OF_RANGE; nLen++; // make room for NULL; LPTSTR pDst = NULL; LONG lSize = nLen * sizeof(TCHAR); LONG lErr = 0; if (lErr = alloc_host((void**)&pDst, lSize, false)) return lErr; pDst[nLen] = (TCHAR)NULL; _tcsncpy(pDst, pSrc, nLen); *ppDst = pDst; return lErr; } template long Memory<double>::AllocHost(LPTSTR* ppDst, LPTSTR pSrc); template long Memory<float>::AllocHost(LPTSTR* ppDst, LPTSTR pSrc); template <class T> long Memory<T>::AllocHost(size_t lCount, T** ppDst, void* pSrc, bool bSrcOnDevice, bool bHalf, bool bPinned) { if (lCount == 0) return ERROR_PARAM_OUT_OF_RANGE; if (ppDst == NULL) return ERROR_PARAM_NULL; long long lSize = lCount * sizeof(T); if (lSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; T* pDst = NULL; LONG lErr; if (lErr = alloc_host((void**)&pDst, (size_t)lSize, bPinned)) return lErr; if (pSrc != NULL) { hipMemcpyKind kind = (bSrcOnDevice) ? hipMemcpyDeviceToHost : hipMemcpyHostToHost; if (bHalf) { if (lErr = convertHalf2BaseType(lCount, pSrc, pDst, kind)) { FreeHost(pDst); return lErr; } } else { if (lErr = hipMemcpy(pDst, pSrc, (size_t)lSize, kind)) { FreeHost(pDst); return lErr; } } } else { memset(pDst, 0, lSize); } *ppDst = pDst; return hipGetLastError(); } template long Memory<double>::AllocHost(size_t lCount, double** ppDst, void* pSrc, bool bSrcOnDevice, bool bHalf, bool bPinned); template long Memory<float>::AllocHost(size_t lCount, float** ppDst, void* pSrc, bool bSrcOnDevice, bool bHalf, bool bPinned); template <class T> long Memory<T>::CopyToHost(size_t lCount, T* pDst, void* pSrc, bool bSrcOnDevice, bool bHalf) { if (lCount == 0) return ERROR_PARAM_OUT_OF_RANGE; if (pDst == NULL || pSrc == NULL) return ERROR_PARAM_NULL; hipMemcpyKind kind = (bSrcOnDevice) ? hipMemcpyDeviceToHost : hipMemcpyHostToHost; long long lSize = lCount * sizeof(T); if (lSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (bHalf) return convertHalf2BaseType(lCount, pSrc, pDst, kind); else return hipMemcpy(pDst, pSrc, (size_t)lSize, kind); } template long Memory<double>::CopyToHost(size_t lCount, double* pDst, void* pSrc, bool bSrcOnDevice, bool bHalf); template long Memory<float>::CopyToHost(size_t lCount, float* pDst, void* pSrc, bool bSrcOnDevice, bool bHalf); template <class T> long Memory<T>::CopyGpuToHost(long lCount, long hGpuSrc, long hHostDst) { LONG lErr; MemoryItem* pSrcD; if (lErr = m_memory.GetData(hGpuSrc, &pSrcD)) return lErr; HostBuffer<T>* pDstH = GetHostBuffer(hHostDst); if (pDstH == NULL) return ERROR_MEMORY_NOT_FOUND; T* pSrc = (T*)pSrcD->Data(); T* pDst = (T*)pDstH->Data(); return CopyToHost(lCount, pDst, pSrc, true, false); } template long Memory<double>::CopyGpuToHost(long lCount, long hGpuSrc, long hHostDst); template long Memory<float>::CopyGpuToHost(long lCount, long hGpuSrc, long hHostDst); template <class T> long Memory<T>::CopyHostToGpu(long lCount, long hHostSrc, long hGpuDst) { LONG lErr; MemoryItem* pDstD; if (lErr = m_memory.GetData(hGpuDst, &pDstD)) return lErr; HostBuffer<T>* pSrcH = GetHostBuffer(hHostSrc); if (pSrcH == NULL) return ERROR_MEMORY_NOT_FOUND; T* pDst = (T*)pDstD->Data(); T* pSrc = (T*)pSrcH->Data(); long long lSize = lCount * sizeof(T); if (lSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; return hipMemcpy(pDst, pSrc, lSize, hipMemcpyHostToDevice); } template long Memory<double>::CopyHostToGpu(long lCount, long hHostSrc, long hGpuDst); template long Memory<float>::CopyHostToGpu(long lCount, long hHostSrc, long hGpuDst); template <class T> long Memory<T>::AllocHostBuffer(size_t lCount, long* phHandle) { LONG lErr = 0; if (lCount % 2 != 0) lCount++; T* pMem = NULL; if (lErr = AllocHost(lCount, &pMem, NULL, FALSE, FALSE)) return lErr; HostBuffer<T>* pHostBuf = new HostBuffer<T>(pMem, lCount); if (pHostBuf == NULL) { FreeHost(pMem); return ERROR_MEMORY_OUT; } long hHandle = m_hostbuffers.Allocate(pHostBuf); if (hHandle < 0) { delete pHostBuf; FreeHost(pMem); return ERROR_MEMORY_OUT; } m_rgActiveHostBuffers[pMem] = pHostBuf; *phHandle = hHandle; return 0; } template long Memory<double>::AllocHostBuffer(size_t lCount, long* phHandle); template long Memory<float>::AllocHostBuffer(size_t lCount, long* phHandle); template <class T> long Memory<T>::FreeHostBuffer(long hHandle) { HostBuffer<T>* pHostBuf = (HostBuffer<T>*)m_hostbuffers.Free(hHandle); if (pHostBuf != NULL) { if (pHostBuf->Data() != NULL) { m_rgActiveHostBuffers.erase(pHostBuf->Data()); FreeHost(pHostBuf->Data()); } delete pHostBuf; } return 0; } template long Memory<double>::FreeHostBuffer(long hHandle); template long Memory<float>::FreeHostBuffer(long hHandle); template <class T> bool Memory<T>::IsHostBuffer(T* pf) { if (m_rgActiveHostBuffers.find(pf) != m_rgActiveHostBuffers.end()) return true; return false; } template bool Memory<double>::IsHostBuffer(double* pf); template bool Memory<float>::IsHostBuffer(float* pf); template <class T> long Memory<T>::CreateStream(long* phHandle, bool bNonBlocking, int nIndex) { std::unique_lock<std::mutex> lock(m_sync); LONG lErr; hipStream_t stream = NULL; int nDeviceID = 0; if (phHandle == NULL) return ERROR_PARAM_NULL; if (nIndex >= 0) { if (lErr = hipGetDevice(&nDeviceID)) return lErr; if (m_streamDev2Idx2H.find(nDeviceID) != m_streamDev2Idx2H.end()) { if (m_streamDev2Idx2H[nDeviceID].find(nIndex) != m_streamDev2Idx2H[nDeviceID].end()) { stream = m_streamDev2Idx2H[nDeviceID][nIndex]; m_streamRef[stream]++; } } } if (stream == NULL) { if (bNonBlocking) { if (lErr = hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)) return lErr; } else { if (lErr = hipStreamCreate(&stream)) return lErr; } if (nIndex >= 0) { m_streamDev2Idx2H[nDeviceID][nIndex] = stream; m_streamRef[stream] = 1; m_streamH2Dev[stream] = nDeviceID; m_streamH2Idx[stream] = nIndex; } } long hHandle = m_streams.Allocate(stream); if (hHandle < 0) { hipStreamDestroy(stream); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return hipStreamSynchronize(0); } template long Memory<double>::CreateStream(long* phHandle, bool bNonBlocking, int nIndex); template long Memory<float>::CreateStream(long* phHandle, bool bNonBlocking, int nIndex); template <typename T> __global__ void synchronize_thread_kernel() { } template <class T> long Memory<T>::SynchronizeThread() { hipLaunchKernelGGL(( synchronize_thread_kernel<T>), dim3(1), dim3(1), 0, 0, ); return hipGetLastError(); } template long Memory<double>::SynchronizeThread(); template long Memory<float>::SynchronizeThread(); template <class T> long Memory<T>::CreateCuDNN(long hStream, long* phHandle) { std::unique_lock<std::mutex> lock(m_sync); LONG lErr; cudnnHandle_t cudnn = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; int nDeviceID; if (lErr = hipGetDevice(&nDeviceID)) return lErr; if (hStream == 0) { // If the cudnn is already created for the device, share it // and increase the ref count. if (m_cudnnDev2H.find(nDeviceID) != m_cudnnDev2H.end()) { cudnn = m_cudnnDev2H[nDeviceID]; m_cudnnRef[cudnn]++; } // Otherwise create a new cudnn for the device and add it to // the maps to share with a ref count of 1. else { if (lErr = cudnnCreate(&cudnn)) return lErr | ERROR_CUDNN_OFFSET; m_cudnnRef[cudnn] = 1; m_cudnnDev2H[nDeviceID] = cudnn; m_cudnnH2Dev[cudnn] = nDeviceID; } } else { hipStream_t stream = GetStream(hStream); // If the stream is a shared stream, share see if the cudnn // is already shared with the shared stream and use the shared // cudnn if it exists, making sure to increase its ref count. if (m_streamRef.find(stream) != m_streamRef.end()) { if (m_streamH2CudnnH.find(stream) != m_streamH2CudnnH.end()) { cudnn = m_streamH2CudnnH[stream]; m_streamCudnnRef[cudnn]++; } } if (cudnn == NULL) { if (lErr = cudnnCreate(&cudnn)) return lErr | ERROR_CUDNN_OFFSET; if (lErr = cudnnSetStream(cudnn, stream)) { cudnnDestroy(cudnn); if (m_streamH2CudnnH.find(stream) != m_streamH2CudnnH.end()) m_streamH2CudnnH.erase(stream); return lErr | ERROR_CUDNN_OFFSET; } // If the stream is a shared stream, add the cudnn to // it and set the ref count to 1. if (m_streamRef.find(stream) != m_streamRef.end()) { m_streamH2CudnnH[stream] = cudnn; m_streamCudnnRef[cudnn] = 1; } } } long hHandle = m_cudnn.Allocate(cudnn); if (hHandle < 0) { free_cudnn(cudnn); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return hipStreamSynchronize(0); } template long Memory<double>::CreateCuDNN(long hStream, long* phHandle); template long Memory<float>::CreateCuDNN(long hStream, long* phHandle); template <class T> long Memory<T>::CreateTensorDesc(long* phHandle) { LONG lErr; cudnnTensorDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateTensorDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_tensorDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyTensorDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return hipStreamSynchronize(0); } template long Memory<double>::CreateTensorDesc(long* phHandle); template long Memory<float>::CreateTensorDesc(long* phHandle); template <class T> long Memory<T>::AddTensor(long hHandle, T fAlpha, long hSrcDesc, long hSrc, int nSrcOffset, T fBeta, long hDstDesc, long hDst, int nDstOffset) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t srcdesc = GetTensorDesc(hSrcDesc); cudnnTensorDescriptor_t dstdesc = GetTensorDesc(hDstDesc); MemoryItem* pSrc; MemoryItem* pDst; if (lErr = m_memory.GetData(hSrc, &pSrc)) return lErr; if (lErr = m_memory.GetData(hDst, &pDst)) return lErr; if (cudnn == NULL || srcdesc == NULL || dstdesc == NULL) return ERROR_PARAM_NULL; T* src = (T*)pSrc->Data(); T* dst = (T*)pDst->Data(); if (nSrcOffset > 0) src += nSrcOffset; if (nDstOffset > 0) dst += nDstOffset; #ifdef CUDNN_4 if (lErr = cudnnAddTensor(cudnn, &fAlpha, srcdesc, src, &fBeta, dstdesc, dst)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnAddTensor(cudnn, CUDNN_ADD_SAME_C, &fAlpha, srcdesc, src, &fBeta, dstdesc, dst)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::AddTensor(long hHandle, double dfAlpha, long hSrcDesc, long hSrc, int nSrcOffset, double dfBeta, long hDstDesc, long hDst, int nDstOffset); template long Memory<float>::AddTensor(long hHandle, float fAlpha, long hSrcDesc, long hSrc, int nSrcOffset, float fBeta, long hDstDesc, long hDst, int nDstOffset); template <class T> long Memory<T>::CreateFilterDesc(long* phHandle) { LONG lErr; cudnnFilterDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateFilterDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_filterDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyFilterDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return hipStreamSynchronize(0); } template long Memory<double>::CreateFilterDesc(long* phHandle); template long Memory<float>::CreateFilterDesc(long* phHandle); template <class T> long Memory<T>::CreateConvolutionDesc(long* phHandle) { LONG lErr; cudnnConvolutionDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateConvolutionDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_convDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyConvolutionDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return hipStreamSynchronize(0); } template long Memory<double>::CreateConvolutionDesc(long* phHandle); template long Memory<float>::CreateConvolutionDesc(long* phHandle); template <class T> long Memory<T>::GetConvolutionInfo(long hHandle, long hBottomDesc, long hFilterDesc, long hConvDesc, long hTopDesc, size_t lWsLimitInBytes, bool bUseTensorCores, long* palgoFwd, size_t* plWsSizeFwd, long* palgoBwdFilter, size_t* plWsSizeBwdFilter, long* palgoBwdData, size_t* plWsSizeBwdData, int nPreferredFwdAlgo) { cudnnStatus_t lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t bottom = GetTensorDesc(hBottomDesc); cudnnFilterDescriptor_t filter = GetFilterDesc(hFilterDesc); cudnnConvolutionDescriptor_t conv = GetConvolutionDesc(hConvDesc); cudnnTensorDescriptor_t top = GetTensorDesc(hTopDesc); // Choose forward algorithm for convolution. cudnnConvolutionFwdAlgo_t algoFwd; #ifdef CUDA10_2 // Setup the algorithm preference. cudnnConvolutionFwdPreference_t fwdPref = CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT; cudnnConvolutionBwdFilterPreference_t bwdFltPref = CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT; cudnnConvolutionBwdDataPreference_t bwdDataPref = CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { lWsLimitInBytes = 0; fwdPref = CUDNN_CONVOLUTION_FWD_PREFER_FASTEST; bwdFltPref = CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST; bwdDataPref = CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST; } else if (lWsLimitInBytes == 0) { lWsLimitInBytes = 0; fwdPref = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE; bwdFltPref = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE; bwdDataPref = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE; } if (lErr = cudnnGetConvolutionForwardAlgorithm(cudnn, bottom, filter, conv, top, fwdPref, lWsLimitInBytes, &algoFwd)) return lErr | ERROR_CUDNN_OFFSET; #else int nAlgCount; cudnnConvolutionFwdAlgoPerf_t fwdPref[10]; if (lErr = cudnnGetConvolutionForwardAlgorithm_v7(cudnn, bottom, filter, conv, top, 10, &nAlgCount, fwdPref)) return lErr; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { algoFwd = fwdPref[0].algo; } else { int nIdx = 0; while (fwdPref[nIdx].status == CUDNN_STATUS_SUCCESS && fwdPref[nIdx].memory > lWsLimitInBytes && nIdx < nAlgCount && nIdx < 10) { nIdx++; } if (nIdx == nAlgCount) return ERROR_PARAM_OUT_OF_RANGE; algoFwd = fwdPref[nIdx].algo; } #endif // Get workspace size for forward algorithm. size_t szFwd = 0; if (lErr = cudnnGetConvolutionForwardWorkspaceSize(cudnn, bottom, filter, conv, top, algoFwd, &szFwd)) return lErr | ERROR_CUDNN_OFFSET; // CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD has been found by the native Caffe team to work better than // CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM on deconvolution (which acts a bit buggy in this // situation. For this reason, when using cuDnn deconvolution, the C# side sets the preferred // fwd algo to CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD which is used only when the workspace is less // than or equat to the default workspace size and no errors occur when attempting to get the // workspace size for WINOGRAD. By default, the nPrefferredFwdAlgo paraeter is ignored. if (nPreferredFwdAlgo >= 0 && algoFwd == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM && (int)algoFwd != nPreferredFwdAlgo) { size_t lWinogradWorkspaceSize = 0; lErr = cudnnGetConvolutionForwardWorkspaceSize(cudnn, bottom, filter, conv, top, (cudnnConvolutionFwdAlgo_t)nPreferredFwdAlgo, &lWinogradWorkspaceSize); if (lErr == CUDNN_STATUS_SUCCESS) { if (lWinogradWorkspaceSize <= szFwd) { algoFwd = (cudnnConvolutionFwdAlgo_t)nPreferredFwdAlgo; szFwd = lWinogradWorkspaceSize; } } } // Choose backward filter algorithm. cudnnConvolutionBwdFilterAlgo_t algoBwdFilter; #ifdef CUDA10_2 if (lErr = cudnnGetConvolutionBackwardFilterAlgorithm(cudnn, bottom, top, conv, filter, bwdFltPref, lWsLimitInBytes, &algoBwdFilter)) return lErr | ERROR_CUDNN_OFFSET; #else cudnnConvolutionBwdFilterAlgoPerf_t bwdFltPref[10]; if (lErr = cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnn, bottom, top, conv, filter, 10, &nAlgCount, bwdFltPref)) return lErr; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { algoBwdFilter = bwdFltPref[0].algo; } else { int nIdx = 0; while (bwdFltPref[nIdx].status == CUDNN_STATUS_SUCCESS && bwdFltPref[nIdx].memory > lWsLimitInBytes && nIdx < nAlgCount && nIdx < 10) { nIdx++; } if (nIdx == nAlgCount) return ERROR_PARAM_OUT_OF_RANGE; algoBwdFilter = bwdFltPref[nIdx].algo; } #endif // Get workspace size for backward filter algorithm. size_t szBwdFilter = 0; if (lErr = cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn, bottom, top, conv, filter, algoBwdFilter, &szBwdFilter)) return lErr | ERROR_CUDNN_OFFSET; // Choose backward data algorithm. cudnnConvolutionBwdDataAlgo_t algoBwdData; #ifdef CUDA10_2 if (lErr = cudnnGetConvolutionBackwardDataAlgorithm(cudnn, filter, top, conv, bottom, bwdDataPref, lWsLimitInBytes, &algoBwdData)) return lErr | ERROR_CUDNN_OFFSET; #else cudnnConvolutionBwdDataAlgoPerf_t bwdDataPref[10]; if (lErr = cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnn, filter, top, conv, bottom, 5, &nAlgCount, bwdDataPref)) return lErr; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { algoBwdData = bwdDataPref[0].algo; } else { int nIdx = 0; while (bwdDataPref[nIdx].status == CUDNN_STATUS_SUCCESS && bwdDataPref[nIdx].memory > lWsLimitInBytes && nIdx < nAlgCount && nIdx < 10) { nIdx++; } if (nIdx == nAlgCount) return ERROR_PARAM_OUT_OF_RANGE; algoBwdData = bwdDataPref[nIdx].algo; } #endif // Get workspace size for backward data algorithm. size_t szBwdData = 0; if (lErr = cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn, filter, top, conv, bottom, algoBwdData, &szBwdData)) return lErr | ERROR_CUDNN_OFFSET; *palgoFwd = (long)algoFwd; *plWsSizeFwd = szFwd; *palgoBwdFilter = (long)algoBwdFilter; *plWsSizeBwdFilter = szBwdFilter; *palgoBwdData = (long)algoBwdData; *plWsSizeBwdData = szBwdData; return hipStreamSynchronize(0); } template long Memory<double>::GetConvolutionInfo(long hHandle, long hBottomDesc, long hFilterDesc, long hConvDesc, long hTopDesc, size_t lWsLimitInBytes, bool bUseTensorCores, long* palgoFwd, size_t* plWsSizeFwd, long* palgoBwdFilter, size_t* plWsSizeBwdFilter, long* palgoBwdData, size_t* plWsSizeBwdData, int nPreferredFwdAlgo); template long Memory<float>::GetConvolutionInfo(long hHandle, long hBottomDesc, long hFilterDesc, long hConvDesc, long hTopDesc, size_t lWsLimitInBytes, bool bUseTensorCores, long* palgoFwd, size_t* plWsSizeFwd, long* palgoBwdFilter, size_t* plWsSizeBwdFilter, long* palgoBwdData, size_t* plWsSizeBwdData, int nPreferredFwdAlgo); template <class T> long Memory<T>::ConvolutionForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hFilterDesc, long hWeight, int nWeightOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, T fBeta, long hTopDesc, long hTopData, int nTopOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); cudnnFilterDescriptor_t filterdesc = GetFilterDesc(hFilterDesc); cudnnConvolutionDescriptor_t convdesc = GetConvolutionDesc(hConvDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); MemoryItem* pBtmData; MemoryItem* pTopData; MemoryItem* pWeight; MemoryItem* pWorkspace = NULL; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hWeight, &pWeight)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdata = (T*)pTopData->Data(); T* weight = (T*)pWeight->Data(); T* wksp = NULL; if (hWorkspace != 0) { if (lErr = m_memory.GetData(hWorkspace, &pWorkspace)) return lErr; wksp = (T*)pWorkspace->Data(); } else if (lWorkspaceSize != 0) { return ERROR_PARAM_OUT_OF_RANGE; } if (nBottomOffset > 0) btmdata += nBottomOffset; if (nTopOffset > 0) topdata += nTopOffset; if (nWeightOffset > 0) weight += nWeightOffset; if (wksp != NULL && nWorkspaceOffset > 0) wksp += nWorkspaceOffset; if (lErr = cudnnConvolutionForward(cudnn, &fAlpha, btmdesc, btmdata, filterdesc, weight, convdesc, (cudnnConvolutionFwdAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; if (bSyncStream) return hipStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hFilterDesc, long hWeight, int nWeightOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, double dfBeta, long hTopDesc, long hTopData, int nTopOffset, bool bSyncStream); template long Memory<float>::ConvolutionForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hFilterDesc, long hWeight, int nWeightOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, float fBeta, long hTopDesc, long hTopData, int nTopOffset, bool bSyncStream); template <class T> long Memory<T>::ConvolutionBackwardBias(long hHandle, T fAlpha, long hTopDesc, long hTopDiff, int nTopOffset, T fBeta, long hBiasDesc, long hBiasDiff, int nBiasOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t biasdesc = GetTensorDesc(hBiasDesc); MemoryItem* pTopDiff; MemoryItem* pBiasDiff; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBiasDiff, &pBiasDiff)) return lErr; T* topdiff = (T*)pTopDiff->Data(); T* biasdiff = (T*)pBiasDiff->Data(); if (nTopOffset > 0) topdiff += nTopOffset; if (nBiasOffset > 0) biasdiff += nBiasOffset; if (lErr = cudnnConvolutionBackwardBias(cudnn, &fAlpha, topdesc, topdiff, &fBeta, biasdesc, biasdiff)) return lErr | ERROR_CUDNN_OFFSET; if (bSyncStream) return hipStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionBackwardBias(long hHandle, double dfAlpha, long hTopDesc, long hTopDiff, int nTopOffset, double dfBeta, long hBiasDesc, long hBiasDiff, int nBiasOffset, bool bSyncStream); template long Memory<float>::ConvolutionBackwardBias(long hHandle, float fAlpha, long hTopDesc, long hTopDiff, int nTopOffset, float fBeta, long hBiasDesc, long hBiasDiff, int nBiasOffset, bool bSyncStream); template <class T> long Memory<T>::ConvolutionBackwardFilter(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, T fBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnConvolutionDescriptor_t convdesc = GetConvolutionDesc(hConvDesc); cudnnFilterDescriptor_t filterdesc = GetFilterDesc(hFilterDesc); MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pWeightDiff; MemoryItem* pWorkspace = NULL; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hWeightDiff, &pWeightDiff)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* weightdiff = (T*)pWeightDiff->Data(); T* wksp = NULL; if (hWorkspace != 0) { if (lErr = m_memory.GetData(hWorkspace, &pWorkspace)) return lErr; wksp = (T*)pWorkspace->Data(); } else if (lWorkspaceSize != 0) { return ERROR_PARAM_OUT_OF_RANGE; } if (nBottomOffset > 0) btmdata += nBottomOffset; if (nTopOffset > 0) topdiff += nTopOffset; if (nWeightOffset > 0) weightdiff += nWeightOffset; if (wksp != NULL && nWorkspaceOffset > 0) wksp += nWorkspaceOffset; #ifdef CUDNN_5 if (lErr = cudnnConvolutionBackwardFilter(cudnn, &fAlpha, btmdesc, btmdata, topdesc, topdiff, convdesc, (cudnnConvolutionBwdFilterAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, filterdesc, weightdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnConvolutionBackwardFilter_v3(cudnn, &fAlpha, btmdesc, btmdata, topdesc, topdiff, convdesc, (cudnnConvolutionBwdFilterAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, filterdesc, weightdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif if (bSyncStream) return hipStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionBackwardFilter(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, double dfBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template long Memory<float>::ConvolutionBackwardFilter(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, float fBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template <class T> long Memory<T>::ConvolutionBackwardData(long hHandle, T fAlpha, long hFilterDesc, long hWeight, int nWeightOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, T fBeta, long hBottomDesc, long hBottomDiff, int nBottomOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnFilterDescriptor_t filterdesc = GetFilterDesc(hFilterDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnConvolutionDescriptor_t convdesc = GetConvolutionDesc(hConvDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pWeight; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; MemoryItem* pWorkspace = NULL; if (lErr = m_memory.GetData(hWeight, &pWeight)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* weight = (T*)pWeight->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); T* wksp = NULL; if (hWorkspace != 0) { if (lErr = m_memory.GetData(hWorkspace, &pWorkspace)) return lErr; wksp = (T*)pWorkspace->Data(); } else if (lWorkspaceSize != 0) { return ERROR_PARAM_OUT_OF_RANGE; } if (nWeightOffset > 0) weight += nWeightOffset; if (nTopOffset > 0) topdiff += nTopOffset; if (nBottomOffset > 0) btmdiff += nBottomOffset; if (wksp != NULL && nWorkspaceOffset > 0) wksp += nWorkspaceOffset; #ifdef CUDNN_5 if (lErr = cudnnConvolutionBackwardData(cudnn, &fAlpha, filterdesc, weight, topdesc, topdiff, convdesc, (cudnnConvolutionBwdDataAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, btmdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnConvolutionBackwardData_v3(cudnn, &fAlpha, filterdesc, weight, topdesc, topdiff, convdesc, (cudnnConvolutionBwdDataAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, btmdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif if (bSyncStream) return hipStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionBackwardData(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, double dfBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template long Memory<float>::ConvolutionBackwardData(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, float fBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template <class T> long Memory<T>::CreatePoolingDesc(long* phHandle) { LONG lErr; cudnnPoolingDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreatePoolingDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_poolDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyPoolingDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreatePoolingDesc(long* phHandle); template long Memory<float>::CreatePoolingDesc(long* phHandle); template <class T> long Memory<T>::PoolingForward(long hHandle, long hPoolingDesc, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnPoolingDescriptor_t pooldesc = GetPoolingDesc(hPoolingDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); if (lErr = cudnnPoolingForward(cudnn, pooldesc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::PoolingForward(long hHandle, long hPoolingDesc, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::PoolingForward(long hHandle, long hPoolingDesc, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::PoolingBackward(long hHandle, long hPoolingDesc, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnPoolingDescriptor_t pooldesc = GetPoolingDesc(hPoolingDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); if (lErr = cudnnPoolingBackward(cudnn, pooldesc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::PoolingBackward(long hHandle, long hPoolingDesc, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::PoolingBackward(long hHandle, long hPoolingDesc, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::DeriveBatchNormDesc(long hFwdScaleBiasMeanVarDesc, long hFwdBottomDesc, long hBwdScaleBiasMeanVarDesc, long hBwdBottomDesc, int mode) { LONG lErr; cudnnTensorDescriptor_t fwdscalemeanvardesc = GetTensorDesc(hFwdScaleBiasMeanVarDesc); cudnnTensorDescriptor_t fwdbtmdesc = GetTensorDesc(hFwdBottomDesc); cudnnTensorDescriptor_t bwdscalemeanvardesc = GetTensorDesc(hBwdScaleBiasMeanVarDesc); cudnnTensorDescriptor_t bwdbtmdesc = GetTensorDesc(hBwdBottomDesc); if (lErr = cudnnDeriveBNTensorDescriptor(fwdscalemeanvardesc, fwdbtmdesc, (cudnnBatchNormMode_t)mode)) return lErr; if (lErr = cudnnDeriveBNTensorDescriptor(bwdscalemeanvardesc, bwdbtmdesc, (cudnnBatchNormMode_t)mode)) return lErr; return CUDNN_STATUS_SUCCESS; } template long Memory<double>::DeriveBatchNormDesc(long hFwdScaleBiasMeanVarDesc, long hFwdBottomDesc, long hBwdScaleBiasMeanVarDesc, long hBwdBottomDesc, int mode); template long Memory<float>::DeriveBatchNormDesc(long hFwdScaleBiasMeanVarDesc, long hFwdBottomDesc, long hBwdScaleBiasMeanVarDesc, long hBwdBottomDesc, int mode); template <class T> long Memory<T>::BatchNormForward(long hHandle, int mode, T fAlpha, T fBeta, long hFwdBottomDesc, long hBottomData, long hFwdTopDesc, long hTopData, long hFwdScaleBiasMeanVarDesc, long hScaleData, long hBiasData, T fFactor, long hGlobalMean, long hGlobalVar, T fEps, long hSaveMean, long hSaveVar, bool bTraining) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t fwdbtmdesc = GetTensorDesc(hFwdBottomDesc); cudnnTensorDescriptor_t fwdtopdesc = GetTensorDesc(hFwdTopDesc); cudnnTensorDescriptor_t fwdscalemeanvardesc = GetTensorDesc(hFwdScaleBiasMeanVarDesc); MemoryItem* pBtmData; MemoryItem* pTopData; MemoryItem* pScaleData; MemoryItem* pBiasData; MemoryItem* pGlobalMean; MemoryItem* pGlobalVar; MemoryItem* pSaveMean; MemoryItem* pSaveVar; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hScaleData, &pScaleData)) return lErr; if (lErr = m_memory.GetData(hBiasData, &pBiasData)) return lErr; if (lErr = m_memory.GetData(hGlobalMean, &pGlobalMean)) return lErr; if (lErr = m_memory.GetData(hGlobalVar, &pGlobalVar)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdata = (T*)pTopData->Data(); T* scaledata = (T*)pScaleData->Data(); T* biasdata = (T*)pBiasData->Data(); T* globalmean = (T*)pGlobalMean->Data(); T* globalvar = (T*)pGlobalVar->Data(); if (sizeof(T) == 4) { if ((float)fEps < CUDNN_BN_MIN_EPSILON) fEps = 0.0001f; } if (bTraining) { if (lErr = m_memory.GetData(hSaveMean, &pSaveMean)) return lErr; if (lErr = m_memory.GetData(hSaveVar, &pSaveVar)) return lErr; T* savemean = (T*)pSaveMean->Data(); T* savevar = (T*)pSaveVar->Data(); if (lErr = cudnnBatchNormalizationForwardTraining(cudnn, (cudnnBatchNormMode_t)mode, &fAlpha, &fBeta, fwdbtmdesc, btmdata, fwdtopdesc, topdata, fwdscalemeanvardesc, scaledata, biasdata, fFactor, globalmean, globalvar, fEps, savemean, savevar)) return lErr; } else { if (lErr = cudnnBatchNormalizationForwardInference(cudnn, (cudnnBatchNormMode_t)mode, &fAlpha, &fBeta, fwdbtmdesc, btmdata, fwdtopdesc, topdata, fwdscalemeanvardesc, scaledata, biasdata, globalmean, globalvar, fEps)) return lErr; } return hipStreamSynchronize(0); } template long Memory<double>::BatchNormForward(long hHandle, int mode, double dfAlpha, double dfBeta, long hFwdBottomDesc, long hBottomData, long hFwdTopDesc, long hTopData, long hFwdScaleBiasMeanVarDesc, long hScaleData, long hBiasData, double fFactor, long hGlobalMean, long hGlobalVar, double fEps, long hSaveMean, long hSaveVar, bool bTraining); template long Memory<float>::BatchNormForward(long hHandle, int mode, float fAlpha, float fBeta, long hFwdBottomDesc, long hBottomData, long hFwdTopDesc, long hTopData, long hFwdScaleBiasMeanVarDesc, long hScaleData, long hBiasData, float fFactor, long hGlobalMean, long hGlobalVar, float fEps, long hSaveMean, long hSaveVar, bool bTraining); template <class T> long Memory<T>::BatchNormBackward(long hHandle, int mode, T fAlphaDiff, T fBetaDiff, T fAlphaParamDiff, T fBetaParamDiff, long hBwdBottomDesc, long hBottomData, long hTopDiffDesc, long hTopDiff, long hBottomDiffDesc, long hBottomDiff, long hBwdScaleBiasMeanVarDesc, long hScaleData, long hScaleDiff, long hBiasDiff, T fEps, long hSaveMean, long hSaveVar) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t bwdbtmdesc = GetTensorDesc(hBwdBottomDesc); cudnnTensorDescriptor_t topdiffdesc = GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = GetTensorDesc(hBottomDiffDesc); cudnnTensorDescriptor_t bwdscalemeanvardesc = GetTensorDesc(hBwdScaleBiasMeanVarDesc); MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; MemoryItem* pScaleData; MemoryItem* pScaleDiff; MemoryItem* pBiasDiff; MemoryItem* pSaveMean = NULL; MemoryItem* pSaveVar = NULL; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; if (lErr = m_memory.GetData(hScaleData, &pScaleData)) return lErr; if (lErr = m_memory.GetData(hScaleDiff, &pScaleDiff)) return lErr; if (lErr = m_memory.GetData(hBiasDiff, &pBiasDiff)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); T* scaledata = (T*)pScaleData->Data(); T* scalediff = (T*)pScaleDiff->Data(); T* biasdiff = (T*)pBiasDiff->Data(); T* savemean = NULL; T* savevar = NULL; if (hSaveMean != 0 && hSaveVar != 0) { if (lErr = m_memory.GetData(hSaveMean, &pSaveMean)) return lErr; if (lErr = m_memory.GetData(hSaveVar, &pSaveVar)) return lErr; savemean = (T*)pSaveMean->Data(); savevar = (T*)pSaveVar->Data(); } if (sizeof(T) == 4) { if ((float)fEps < CUDNN_BN_MIN_EPSILON) fEps = 0.0001f; } if (lErr = cudnnBatchNormalizationBackward(cudnn, (cudnnBatchNormMode_t)mode, &fAlphaDiff, &fBetaDiff, &fAlphaParamDiff, &fBetaParamDiff, bwdbtmdesc, btmdata, topdiffdesc, topdiff, btmdiffdesc, btmdiff, bwdscalemeanvardesc, scaledata, scalediff, biasdiff, fEps, savemean, savevar)) return lErr; return hipStreamSynchronize(0); } template long Memory<double>::BatchNormBackward(long hHandle, int mode, double dfAlphaDiff, double dfBetaDiff, double dfAlphaParamDiff, double dfBetaParamDiff, long hBtmBottomDesc, long hBottomData, long hTopDiffDesc, long hTopDiff, long hBottomDiffDesc, long hBottomDiff, long hBwdScaleBiasMeanVarDesc, long hScaleData, long hScaleDiff, long hBiasDiff, double fEps, long hSaveMean, long hSaveVar); template long Memory<float>::BatchNormBackward(long hHandle, int mode, float fAlphaDiff, float fBetaDiff, float fAlphaParamDiff, float fBetaParamDiff, long hBtmBottomDesc, long hBottomData, long hTopDiffDesc, long hTopDiff, long hBottomDiffDesc, long hBottomDiff, long hBwdScaleBiasMeanVarDesc, long hScaleData, long hScaleDiff, long hBiasDiff, float fEps, long hSaveMean, long hSaveVar); template <class T> long Memory<T>::CreateDropoutDesc(long* phHandle) { LONG lErr; cudnnDropoutDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateDropoutDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_dropoutDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyDropoutDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateDropoutDesc(long* phHandle); template long Memory<float>::CreateDropoutDesc(long* phHandle); template <class T> long Memory<T>::SetDropoutDesc(long hHandle, long hDropoutDesc, T fDropout, long hStates, long lSeed) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnDropoutDescriptor_t desc = GetDropoutDesc(hDropoutDesc); MemoryItem* pStates; if (lErr = m_memory.GetData(hStates, &pStates)) return lErr; T* states = (T*)pStates->Data(); size_t szStates = (size_t)pStates->Size(); if (lErr = cudnnSetDropoutDescriptor(desc, cudnn, (float)fDropout, states, szStates, (unsigned long long)lSeed)) return lErr | ERROR_CUDNN_OFFSET; return CUDNN_STATUS_SUCCESS; } template long Memory<double>::SetDropoutDesc(long hHandle, long hDropoutDesc, double fDropout, long hStates, long lSeed); template long Memory<float>::SetDropoutDesc(long hHandle, long hDropoutDesc, float fDropout, long hStates, long lSeed); template <class T> long Memory<T>::GetDropoutInfo(long hHandle, long hBottomDesc, unsigned long* plState, unsigned long* plReserved) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t bottomDesc = NULL; size_t szStates = 0; size_t szReserved = 0; if (hBottomDesc > 0) bottomDesc = GetTensorDesc(hBottomDesc); if (plState == NULL || plReserved == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnDropoutGetStatesSize(cudnn, &szStates)) return lErr | ERROR_CUDNN_OFFSET; if (bottomDesc != NULL) { if (lErr = cudnnDropoutGetReserveSpaceSize(bottomDesc, &szReserved)) return lErr | ERROR_CUDNN_OFFSET; } *plState = (unsigned long)szStates; *plReserved = (unsigned long)szReserved; return 0; } template long Memory<double>::GetDropoutInfo(long hHandle, long hBottomDesc, unsigned long* plState, unsigned long* plReserved); template long Memory<float>::GetDropoutInfo(long hHandle, long hBottomDesc, unsigned long* plState, unsigned long* plReserved); template <class T> long Memory<T>::DropoutForward(long hHandle, long hDropoutDesc, long hBottomDesc, long hBottom, long hTopDesc, long hTop, long hReservedSpace) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnDropoutDescriptor_t desc = GetDropoutDesc(hDropoutDesc); cudnnTensorDescriptor_t bottomDesc = GetTensorDesc(hBottomDesc); cudnnTensorDescriptor_t topDesc = GetTensorDesc(hTopDesc); MemoryItem* pBottom; MemoryItem* pTop; MemoryItem* pReserved; if (lErr = m_memory.GetData(hBottom, &pBottom)) return lErr; if (lErr = m_memory.GetData(hTop, &pTop)) return lErr; if (lErr = m_memory.GetData(hReservedSpace, &pReserved)) return lErr; T* bottom = (T*)pBottom->Data(); T* top = (T*)pTop->Data(); T* reserved = (T*)pReserved->Data(); size_t szReserved = (size_t)pReserved->Size(); if (lErr = cudnnDropoutForward(cudnn, desc, bottomDesc, bottom, topDesc, top, reserved, szReserved)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::DropoutForward(long hHandle, long hDropoutDesc, long hBottomDesc, long hBottom, long hTopDesc, long hTop, long hReservedSpace); template long Memory<float>::DropoutForward(long hHandle, long hDropoutDesc, long hBottomDesc, long hBottom, long hTopDesc, long hTop, long hReservedSpace); template <class T> long Memory<T>::DropoutBackward(long hHandle, long hDropoutDesc, long hTopDesc, long hTop, long hBottomDesc, long hBottom, long hReservedSpace) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnDropoutDescriptor_t desc = GetDropoutDesc(hDropoutDesc); cudnnTensorDescriptor_t topDesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t bottomDesc = GetTensorDesc(hBottomDesc); MemoryItem* pTop; MemoryItem* pBottom; MemoryItem* pReserved; if (lErr = m_memory.GetData(hTop, &pTop)) return lErr; if (lErr = m_memory.GetData(hBottom, &pBottom)) return lErr; if (lErr = m_memory.GetData(hReservedSpace, &pReserved)) return lErr; T* top = (T*)pTop->Data(); T* bottom = (T*)pBottom->Data(); T* reserved = (T*)pReserved->Data(); size_t szReserved = (size_t)pReserved->Size(); if (lErr = cudnnDropoutBackward(cudnn, desc, topDesc, top, bottomDesc, bottom, reserved, szReserved)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::DropoutBackward(long hHandle, long hDropoutDesc, long hTopDesc, long hTop, long hBottomDesc, long hBottom, long hReservedSpace); template long Memory<float>::DropoutBackward(long hHandle, long hDropoutDesc, long hTopDesc, long hTop, long hBottomDesc, long hBottom, long hReservedSpace); template <class T> long Memory<T>::CreateLRNDesc(long* phHandle) { LONG lErr; cudnnLRNDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateLRNDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_lrnDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyLRNDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateLRNDesc(long* phHandle); template long Memory<float>::CreateLRNDesc(long* phHandle); template <class T> long Memory<T>::LRNForwardCC(long hHandle, long hNormDesc, T fAlpha, long hBottomDataDesc, long hBottomData, T fBeta, long hTopDataDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); MemoryItem* pBottomData; MemoryItem* pTopData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBottomData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBottomData->Data(); if (lErr = cudnnLRNCrossChannelForward(cudnn, normdesc, CUDNN_LRN_CROSS_CHANNEL_DIM1, &fAlpha, btmdatadesc, btmdata, &fBeta, topdatadesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::LRNForwardCC(long hHandle, long hNormDesc, double fAlpha, long hBottomDesc, long hBottomData, double fBeta, long hTopDesc, long hTopData); template long Memory<float>::LRNForwardCC(long hHandle, long hNormDesc, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::LRNBackwardCC(long hHandle, long hNormDesc, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); if (lErr = cudnnLRNCrossChannelBackward(cudnn, normdesc, CUDNN_LRN_CROSS_CHANNEL_DIM1, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::LRNBackwardCC(long hHandle, long hNormDesc, double fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomDadta, double fBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::LRNBackwardCC(long hHandle, long hNormDesc, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomDadta, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::LCNForwardCC(long hHandle, long hNormDesc, T fAlpha, long hBottomDataDesc, long hBottomData, long hTemp1, long hTemp2, T fBeta, long hTopDataDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); MemoryItem* pBottomData; MemoryItem* pTopData; MemoryItem* pTemp1; MemoryItem* pTemp2; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBottomData)) return lErr; if (lErr = m_memory.GetData(hTemp1, &pTemp1)) return lErr; if (lErr = m_memory.GetData(hTemp2, &pTemp2)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBottomData->Data(); T* temp1 = (T*)pTemp1->Data(); T* temp2 = (T*)pTemp2->Data(); if (lErr = cudnnDivisiveNormalizationForward(cudnn, normdesc, CUDNN_DIVNORM_PRECOMPUTED_MEANS, &fAlpha, btmdatadesc, btmdata, NULL, temp1, temp2, &fBeta, topdatadesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::LCNForwardCC(long hHandle, long hNormDesc, double fAlpha, long hBottomDesc, long hBottomData, long hTemp1, long hTemp2, double fBeta, long hTopDesc, long hTopData); template long Memory<float>::LCNForwardCC(long hHandle, long hNormDesc, float fAlpha, long hBottomDesc, long hBottomData, long hTemp1, long hTemp2, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::LCNBackwardCC(long hHandle, long hNormDesc, T fAlpha, long hBottomDataDesc, long hBottomData, long hTopDiff, long hTemp1, long hTemp2, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; MemoryItem* pTemp1; MemoryItem* pTemp2; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; if (lErr = m_memory.GetData(hTemp1, &pTemp1)) return lErr; if (lErr = m_memory.GetData(hTemp2, &pTemp2)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); T* temp1 = (T*)pTemp1->Data(); T* temp2 = (T*)pTemp2->Data(); if (lErr = cudnnDivisiveNormalizationBackward(cudnn, normdesc, CUDNN_DIVNORM_PRECOMPUTED_MEANS, &fAlpha, btmdatadesc, btmdata, NULL, topdiff, temp1, temp2, &fBeta, btmdiffdesc, btmdiff, NULL)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::LCNBackwardCC(long hHandle, long hNormDesc, double fAlpha, long hBottomDataDesc, long hBottomData, long hTopDiff, long hTemp1, long hTemp2, double fBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::LCNBackwardCC(long hHandle, long hNormDesc, float fAlpha, long hBottomDataDesc, long hBottomData, long hTopDiff, long hTemp1, long hTemp2, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::TanhForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationTanh); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_TANH, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::TanhForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::TanhForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::TanhBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationTanh); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_TANH, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::TanhBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::TanhBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::EluForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationElu); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_ELU, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::EluForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::EluForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::EluBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationElu); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_ELU, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::EluBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::EluBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::SigmoidForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationSigmoid); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_SIGMOID, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::SigmoidForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::SigmoidForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::SigmoidBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationSigmoid); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_SIGMOID, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::SigmoidBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::SigmoidBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::ReLUForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationRelu); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_RELU, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::ReLUForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::ReLUForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::ReLUBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationRelu); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_RELU, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return hipStreamSynchronize(0); } template long Memory<double>::ReLUBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::ReLUBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::SoftmaxForward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); if (lErr = cudnnSoftmaxForward(cudnn, (cudnnSoftmaxAlgorithm_t)alg, (cudnnSoftmaxMode_t)mode, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::SoftmaxForward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::SoftmaxForward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::SoftmaxBackward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); if (lErr = cudnnSoftmaxBackward(cudnn, (cudnnSoftmaxAlgorithm_t)alg, (cudnnSoftmaxMode_t)mode, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; return hipStreamSynchronize(0); } template long Memory<double>::SoftmaxBackward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::SoftmaxBackward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::CreateRnnDataDesc1(long* phHandle) { LONG lErr; rnnDataHandle<T>* desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if ((desc = new rnnDataHandle<T>()) == NULL) return ERROR_MEMORY_OUT; if (lErr = desc->Initialize(this)) return lErr; long hHandle = m_rnnDataDesc1.Allocate(desc); if (hHandle < 0) { desc->CleanUp(); delete desc; return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateRnnDataDesc1(long* phHandle); template long Memory<float>::CreateRnnDataDesc1(long* phHandle); template <class T> long Memory<T>::CreateRnnDataDesc2(long* phHandle) { LONG lErr; cudnnRNNDataDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateRNNDataDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_rnnDataDesc2.Allocate(desc); if (hHandle < 0) { cudnnDestroyRNNDataDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateRnnDataDesc2(long* phHandle); template long Memory<float>::CreateRnnDataDesc2(long* phHandle); template <class T> long Memory<T>::CreateRnnDesc(long* phHandle) { LONG lErr; cudnnRNNDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateRNNDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_rnnDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyRNNDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateRnnDesc(long* phHandle); template long Memory<float>::CreateRnnDesc(long* phHandle); template <class T> long Memory<T>::GetRnnParamCount(long hHandle, long hRnnDesc, long hXDesc, int* pnCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; if (descX == NULL) return ERROR_PARAM_NULL; if (pnCount == NULL) return ERROR_PARAM_NULL; size_t sizeInBytes; if (lErr = cudnnGetRNNParamsSize(cudnn, desc, descX->GetFirstTensor(), &sizeInBytes, type)) return lErr; int nCount = (int)(sizeInBytes / sizeof(T)); *pnCount = nCount; return 0; } template long Memory<double>::GetRnnParamCount(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template long Memory<float>::GetRnnParamCount(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template <class T> long Memory<T>::GetRnnParamCountEx(long hHandle, long hRnnDesc, long hXDesc, int* pnCount) { LONG lErr; cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); if (descX == NULL) return ERROR_PARAM_NULL; if (pnCount == NULL) return ERROR_PARAM_NULL; cudnnDataType_t type0; cudnnRNNDataLayout_t layout; int nMaxSeqLen = 0; int nBatchSize = 0; int nVectorSize = 0; T fFill; int* rgSeqLen = (int*)malloc(sizeof(int) * 1); if (rgSeqLen == NULL) return ERROR_MEMORY_OUT; lErr = cudnnGetRNNDataDescriptor(descX, &type0, &layout, &nMaxSeqLen, &nBatchSize, &nVectorSize, 1, rgSeqLen, (void*)&fFill); free(rgSeqLen); if (lErr) return lErr; cudnnTensorDescriptor_t tensorX; if (lErr = cudnnCreateTensorDescriptor(&tensorX)) return lErr; int rgDim[3]; rgDim[0] = (layout == CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED) ? nMaxSeqLen : nBatchSize; rgDim[1] = nVectorSize; rgDim[2] = 1; int rgStride[3]; rgStride[0] = rgDim[2] * rgDim[1]; rgStride[1] = rgDim[2]; rgStride[2] = 1; if (lErr = cudnnSetTensorNdDescriptor(tensorX, type, 3, rgDim, rgStride)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } size_t sizeInBytes; lErr = cudnnGetRNNParamsSize(cudnn, desc, tensorX, &sizeInBytes, type); cudnnDestroyTensorDescriptor(tensorX); if (lErr) return lErr; int nCount = (int)(sizeInBytes / sizeof(T)); *pnCount = nCount; return 0; } template long Memory<double>::GetRnnParamCountEx(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template long Memory<float>::GetRnnParamCountEx(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template <class T> long Memory<T>::GetRnnWorkspaceCount(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); if (pnWsCount == NULL || pnResCount == NULL) return ERROR_PARAM_NULL; size_t sizeInBytes; if (lErr = cudnnGetRNNWorkspaceSize(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), &sizeInBytes)) return lErr; size_t nWsCount = (size_t)(sizeInBytes / sizeof(T)); if (lErr = cudnnGetRNNTrainingReserveSize(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), &sizeInBytes)) return lErr; size_t nResCount = (size_t)(sizeInBytes / sizeof(T)); *pnWsCount = nWsCount; *pnResCount = nResCount; return 0; } template long Memory<double>::GetRnnWorkspaceCount(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template long Memory<float>::GetRnnWorkspaceCount(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template <class T> long Memory<T>::GetRnnWorkspaceCountEx(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount) { LONG lErr; cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); if (descX == NULL) return ERROR_PARAM_NULL; if (pnWsCount == NULL || pnResCount == NULL) return ERROR_PARAM_NULL; cudnnDataType_t type0; cudnnRNNDataLayout_t layout; int nMaxSeqLen = 0; int nBatchSize = 0; int nVectorSize = 0; T fFill; int* rgSeqLen = (int*)malloc(sizeof(int) * 1); if (rgSeqLen == NULL) return ERROR_MEMORY_OUT; lErr = cudnnGetRNNDataDescriptor(descX, &type0, &layout, &nMaxSeqLen, &nBatchSize, &nVectorSize, 1, rgSeqLen, (void*)&fFill); free(rgSeqLen); if (lErr) return lErr; cudnnTensorDescriptor_t* rgDescX = (cudnnTensorDescriptor_t*)malloc(sizeof(cudnnTensorDescriptor_t) * nMaxSeqLen); if (rgDescX == NULL) return ERROR_OUTOFMEMORY; memset(rgDescX, NULL, sizeof(cudnnTensorDescriptor_t) * nMaxSeqLen); for (int i = 0; i < nMaxSeqLen; i++) { if (lErr = cudnnCreateTensorDescriptor(&rgDescX[i])) break; int rgDim[3]; rgDim[0] = (layout == CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED) ? nMaxSeqLen : nBatchSize; rgDim[1] = nVectorSize; rgDim[2] = 1; int rgStride[3]; rgStride[0] = rgDim[2] * rgDim[1]; rgStride[1] = rgDim[2]; rgStride[2] = 1; if (lErr = cudnnSetTensorNdDescriptor(rgDescX[i], type, 3, rgDim, rgStride)) break; } size_t sizeInBytes; size_t nWsCount = 0; if (!lErr) { lErr = cudnnGetRNNWorkspaceSize(cudnn, desc, nMaxSeqLen, rgDescX, &sizeInBytes); if (!lErr) { nWsCount = (size_t)(sizeInBytes / sizeof(T)) + 1; lErr = cudnnGetRNNTrainingReserveSize(cudnn, desc, nMaxSeqLen, rgDescX, &sizeInBytes); } } for (int i = 0; i < nMaxSeqLen; i++) { if (rgDescX[i] != NULL) cudnnDestroyTensorDescriptor(rgDescX[i]); } free(rgDescX); if (lErr) return lErr; size_t nResCount = (size_t)(sizeInBytes / sizeof(T)) + 1; *pnWsCount = nWsCount; *pnResCount = nResCount; return hipStreamSynchronize(0); } template long Memory<double>::GetRnnWorkspaceCountEx(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template long Memory<float>::GetRnnWorkspaceCountEx(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template <typename T> __global__ void init_data_kernel(const int n, const T alpha, T* y) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) y[tid] = alpha; } template <class T> void init_data(T* pData, int nCount, T val) { int nBlock = 1024; int nGrid = (nCount + nBlock - 1) / nBlock; hipLaunchKernelGGL(( init_data_kernel), dim3(nGrid), dim3(nBlock), 0, 0, nCount, val, pData); } template <class T> long Memory<T>::GetRnnLinLayerParams(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pWtData; if (descX == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (pnWtCount == NULL || phWt == NULL || pnBiasCount == NULL || phBias == NULL) return ERROR_PARAM_NULL; // Get the Weight Counts cudnnFilterDescriptor_t filterWts; if (lErr = cudnnCreateFilterDescriptor(&filterWts)) return lErr; void* pWtDevMem; if (lErr = cudnnGetRNNLinLayerMatrixParams(cudnn, desc, nLayer, descX->GetFirstTensor(), descWt, pWtData->Data(), nLinLayer, filterWts, &pWtDevMem)) { cudnnDestroyFilterDescriptor(filterWts); return lErr; } cudnnDataType_t type; cudnnTensorFormat_t fmt; int nbDims; int rgDimA[3]; if (lErr = cudnnGetFilterNdDescriptor(filterWts, 3, &type, &fmt, &nbDims, rgDimA)) { cudnnDestroyFilterDescriptor(filterWts); return lErr; } int nWtCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; T fVal = T(1.0 / nWtCount); init_data((T*)pWtDevMem, nWtCount, fVal); cudnnDestroyFilterDescriptor(filterWts); // Get the Bias Counts cudnnFilterDescriptor_t filterBias; if (lErr = cudnnCreateFilterDescriptor(&filterBias)) return lErr; void* pBiasDevMem; if (lErr = cudnnGetRNNLinLayerBiasParams(cudnn, desc, nLayer, descX->GetFirstTensor(), descWt, pWtData->Data(), nLinLayer, filterBias, &pBiasDevMem)) { cudnnDestroyFilterDescriptor(filterBias); return lErr; } if (lErr = cudnnGetFilterNdDescriptor(filterBias, 3, &type, &fmt, &nbDims, rgDimA)) { cudnnDestroyFilterDescriptor(filterBias); return lErr; } int nBiasCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; init_data((T*)pBiasDevMem, nBiasCount, T(1.0)); cudnnDestroyFilterDescriptor(filterBias); // Create the memory pointer handles. long hWtMemPtr; long long lWtSize = nWtCount * sizeof(T); if (lWtSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pWtDevMem, (size_t)lWtSize, &hWtMemPtr)) return lErr; long hBiasMemPtr; long long lBiasSize = nBiasCount * sizeof(T); if (lBiasSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pBiasDevMem, (size_t)lBiasSize, &hBiasMemPtr)) return lErr; *pnWtCount = nWtCount; *phWt = hWtMemPtr; *pnBiasCount = nBiasCount; *phBias = hBiasMemPtr; return hipStreamSynchronize(0); } template long Memory<double>::GetRnnLinLayerParams(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template long Memory<float>::GetRnnLinLayerParams(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template <class T> long Memory<T>::GetRnnLinLayerParamsEx(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias) { LONG lErr; cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pWtData; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (descX == NULL) return ERROR_PARAM_NULL; if (pnWtCount == NULL || phWt == NULL || pnBiasCount == NULL || phBias == NULL) return ERROR_PARAM_NULL; cudnnDataType_t type0; cudnnRNNDataLayout_t layout; int nMaxSeqLen = 0; int nBatchSize = 0; int nVectorSize = 0; T fFill; int* rgSeqLen = (int*)malloc(sizeof(int) * 1); if (rgSeqLen == NULL) return ERROR_MEMORY_OUT; lErr = cudnnGetRNNDataDescriptor(descX, &type0, &layout, &nMaxSeqLen, &nBatchSize, &nVectorSize, 1, rgSeqLen, (void*)&fFill); free(rgSeqLen); if (lErr) return lErr; cudnnTensorDescriptor_t tensorX; if (lErr = cudnnCreateTensorDescriptor(&tensorX)) return lErr; int rgDim[3]; rgDim[0] = (layout == CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED) ? nMaxSeqLen : nBatchSize; rgDim[1] = nVectorSize; rgDim[2] = 1; int rgStride[3]; rgStride[0] = rgDim[2] * rgDim[1]; rgStride[1] = rgDim[2]; rgStride[2] = 1; if (lErr = cudnnSetTensorNdDescriptor(tensorX, type, 3, rgDim, rgStride)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } // Get the Weight Counts cudnnFilterDescriptor_t filterWts; if (lErr = cudnnCreateFilterDescriptor(&filterWts)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } void* pWtDevMem; if (lErr = cudnnGetRNNLinLayerMatrixParams(cudnn, desc, nLayer, tensorX, descWt, pWtData->Data(), nLinLayer, filterWts, &pWtDevMem)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterWts); return lErr; } cudnnTensorFormat_t fmt; int nbDims; int rgDimA[3]; if (lErr = cudnnGetFilterNdDescriptor(filterWts, 3, &type0, &fmt, &nbDims, rgDimA)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterWts); return lErr; } int nWtCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; T fVal = T(1.0 / nWtCount); init_data((T*)pWtDevMem, nWtCount, fVal); cudnnDestroyFilterDescriptor(filterWts); // Get the Bias Counts cudnnFilterDescriptor_t filterBias; if (lErr = cudnnCreateFilterDescriptor(&filterBias)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } void* pBiasDevMem; if (lErr = cudnnGetRNNLinLayerBiasParams(cudnn, desc, nLayer, tensorX, descWt, pWtData->Data(), nLinLayer, filterBias, &pBiasDevMem)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterBias); return lErr; } if (lErr = cudnnGetFilterNdDescriptor(filterBias, 3, &type, &fmt, &nbDims, rgDimA)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterBias); return lErr; } int nBiasCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; init_data((T*)pBiasDevMem, nBiasCount, T(1.0)); cudnnDestroyFilterDescriptor(filterBias); cudnnDestroyTensorDescriptor(tensorX); // Create the memory pointer handles. long hWtMemPtr; long long lWtSize = nWtCount * sizeof(T); if (lWtSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pWtDevMem, (size_t)lWtSize, &hWtMemPtr)) return lErr; long hBiasMemPtr; long long lBiasSize = nBiasCount * sizeof(T); if (lBiasSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pBiasDevMem, (size_t)lBiasSize, &hBiasMemPtr)) return lErr; *pnWtCount = nWtCount; *phWt = hWtMemPtr; *pnBiasCount = nBiasCount; *phBias = hBiasMemPtr; return hipStreamSynchronize(0); } template long Memory<double>::GetRnnLinLayerParamsEx(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template long Memory<float>::GetRnnLinLayerParamsEx(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template <class T> long Memory<T>::RnnForward(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount, bool bTraining) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); rnnDataHandle<T>* descY = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pWtData; MemoryItem* pYData; MemoryItem* pHyData; MemoryItem* pCyData; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (descX == NULL || descY == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hHyData, &pHyData)) return lErr; if (lErr = m_memory.GetData(hCyData, &pCyData)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (bTraining) { if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; } if (!bTraining) { lErr = cudnnRNNForwardInference(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY->SeqTensors(), pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size()); } else { lErr = cudnnRNNForwardTraining(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY->SeqTensors(), pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); } return hipStreamSynchronize(0); } template long Memory<double>::RnnForward(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template long Memory<float>::RnnForward(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template <class T> long Memory<T>::RnnForwardEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount, bool bTraining) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnRNNDataDescriptor_t descY = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pWtData; MemoryItem* pYData; MemoryItem* pHyData; MemoryItem* pCyData; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hHyData, &pHyData)) return lErr; if (lErr = m_memory.GetData(hCyData, &pCyData)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (bTraining) { if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; } if (!bTraining) { lErr = cudnnRNNForwardInferenceEx(cudnn, desc, descX, pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY, pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, pWorkspaceData->Data(), pWorkspaceData->Size()); } else { lErr = cudnnRNNForwardTrainingEx(cudnn, desc, descX, pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY, pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); } return hipStreamSynchronize(0); } template long Memory<double>::RnnForwardEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template long Memory<float>::RnnForwardEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template <class T> long Memory<T>::RnnBackwardData(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); rnnDataHandle<T>* descY = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hYDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnTensorDescriptor_t descHxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdHxDesc); cudnnTensorDescriptor_t descCxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdCxDesc); MemoryItem* pYData; MemoryItem* pYDiff; MemoryItem* pHyDiff = NULL; MemoryItem* pCyDiff = NULL; MemoryItem* pWtData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pXDiff; MemoryItem* pHxDiff; MemoryItem* pCxDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (descX == NULL || descY == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hYDiff, &pYDiff)) return lErr; if (hHyDiff != 0) { if (lErr = m_memory.GetData(hHyDiff, &pHyDiff)) return lErr; } if (hCyDiff != 0) { if (lErr = m_memory.GetData(hCyDiff, &pCyDiff)) return lErr; } if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hXDiff, &pXDiff)) return lErr; if (lErr = m_memory.GetData(hHxDiff, &pHxDiff)) return lErr; if (lErr = m_memory.GetData(hCxDiff, &pCxDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardData(cudnn, desc, descY->MaxSeqLen(), descY->SeqTensors(), pYData->Data(), descY->SeqTensors(), pYDiff->Data(), descHy, (pHyDiff == NULL) ? NULL : pHyDiff->Data(), descCy, (pCyDiff == NULL) ? NULL : pCyDiff->Data(), descWt, pWtData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descX->SeqTensors(), pXDiff->Data(), descHxd, pHxDiff->Data(), descCxd, pCxDiff->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); return hipStreamSynchronize(0); } template long Memory<double>::RnnBackwardData(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardData(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template <class T> long Memory<T>::RnnBackwardDataEx(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnRNNDataDescriptor_t descY = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hYDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnTensorDescriptor_t descHxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdHxDesc); cudnnTensorDescriptor_t descCxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdCxDesc); MemoryItem* pYData; MemoryItem* pYDiff; MemoryItem* pHyDiff; MemoryItem* pCyDiff; MemoryItem* pWtData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pXDiff; MemoryItem* pHxDiff; MemoryItem* pCxDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hYDiff, &pYDiff)) return lErr; if (lErr = m_memory.GetData(hHyDiff, &pHyDiff)) return lErr; if (lErr = m_memory.GetData(hCyDiff, &pCyDiff)) return lErr; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hXDiff, &pXDiff)) return lErr; if (lErr = m_memory.GetData(hHxDiff, &pHxDiff)) return lErr; if (lErr = m_memory.GetData(hCxDiff, &pCxDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardDataEx(cudnn, desc, descY, pYData->Data(), descY, pYDiff->Data(), NULL, NULL, descHy, pHyDiff->Data(), descCy, pCyDiff->Data(), descWt, pWtData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descX, pXDiff->Data(), descHxd, pHxDiff->Data(), descCxd, pCxDiff->Data(), NULL, NULL, pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); return hipStreamSynchronize(0); } template long Memory<double>::RnnBackwardDataEx(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardDataEx(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template <class T> long Memory<T>::RnnBackwardWeights(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspaceData, size_t nWsCount, long hWtDesc, long hWtDiff, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); rnnDataHandle<T>* descY = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pYData; MemoryItem* pWtDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (descX == NULL || descY == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hWtDiff, &pWtDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardWeights(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), pXData->Data(), descHx, pHxData->Data(), descY->SeqTensors(), pYData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), descWt, pWtDiff->Data(), pReservedData->Data(), pReservedData->Size()); return hipStreamSynchronize(0); } template long Memory<double>::RnnBackwardWeights(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardWeights(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); template <class T> long Memory<T>::RnnBackwardWeightsEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspaceData, size_t nWsCount, long hWtDesc, long hWtDiff, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnRNNDataDescriptor_t descY = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pYData; MemoryItem* pWtDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hWtDiff, &pWtDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardWeightsEx(cudnn, desc, descX, pXData->Data(), descHx, pHxData->Data(), descY, pYData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), descWt, pWtDiff->Data(), pReservedData->Data(), pReservedData->Size()); return hipStreamSynchronize(0); } template long Memory<double>::RnnBackwardWeightsEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardWeightsEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); //end memory.cu
996775ea6925bf20998b63538fbb51a601e83b73.cu
//============================================================================= // FILE: memory.cu // // DESC: This file the basic memory management for the given device //============================================================================= #include "memory.h" //============================================================================= // Class Methods //============================================================================= template <class T> Memory<T>::Memory() : m_memory(), m_memoryPointers(), m_hostbuffers(), m_streams(), m_tensorDesc(), m_filterDesc(), m_convDesc(), m_poolDesc(), m_rnnDesc(), m_rnnDataDesc2(), m_lrnDesc(), m_cudnn(), m_pca(), m_tsnegp(), m_tsneg(), m_memtest(), m_nccl(), m_ssd(), m_memoryMap() { m_memory.SetMemoryPointers(&m_memoryPointers); m_tOne = (T)1; m_tZero = (T)0; #ifdef CUDNN_5 CreateActivationDesc(&m_hGlobalActivationSigmoid); SetActivationDesc(m_hGlobalActivationSigmoid, SIGMOID); CreateActivationDesc(&m_hGlobalActivationRelu); SetActivationDesc(m_hGlobalActivationRelu, RELU); CreateActivationDesc(&m_hGlobalActivationTanh); SetActivationDesc(m_hGlobalActivationTanh, TANH); CreateActivationDesc(&m_hGlobalActivationElu); SetActivationDesc(m_hGlobalActivationElu, ELU); #endif } template Memory<double>::Memory(); template Memory<float>::Memory(); template <class T> Memory<T>::~Memory() { for (int i=0; i<m_hostbuffers.GetCount(); i++) { FreeHostBuffer(i); } for (int i=0; i<m_streams.GetCount(); i++) { FreeStream(i); } for (int i=0; i<m_tensorDesc.GetCount(); i++) { FreeTensorDesc(i); } for (int i=0; i<m_filterDesc.GetCount(); i++) { FreeFilterDesc(i); } for (int i=0; i<m_convDesc.GetCount(); i++) { FreeConvolutionDesc(i); } for (int i=0; i<m_poolDesc.GetCount(); i++) { FreePoolingDesc(i); } for (int i = 0; i < m_rnnDesc.GetCount(); i++) { FreeRnnDesc(i); } for (int i = 0; i < m_rnnDataDesc1.GetCount(); i++) { FreeRnnDataDesc1(i); } for (int i = 0; i < m_rnnDataDesc2.GetCount(); i++) { FreeRnnDataDesc2(i); } for (int i = 0; i < m_rnn.GetCount(); i++) { FreeRnn8(i); } for (int i=0; i<m_lrnDesc.GetCount(); i++) { FreeLRNDesc(i); } for (int i=0; i<m_cudnn.GetCount(); i++) { FreeCuDNN(i); } #ifdef CUDNN_5 for (int i=0; i<m_activationDesc.GetCount(); i++) { FreeActivationDesc(i); } m_hGlobalActivationSigmoid = 0; m_hGlobalActivationRelu = 0; m_hGlobalActivationTanh = 0; m_hGlobalActivationElu = 0; for (int i = 0; i < m_dropoutDesc.GetCount(); i++) { FreeDropoutDesc(i); } #endif for (int i=0; i<m_pca.GetCount(); i++) { FreePCA(i); } for (int i=0; i<m_tsnegp.GetCount(); i++) { FreeTsneGaussianPerplexity(i); } for (int i = 0; i < m_memtest.GetCount(); i++) { FreeMemoryTest(i); } for (int i = 0; i < m_imgop.GetCount(); i++) { FreeImageOp(i); } for (int i = 0; i < m_nccl.GetCount(); i++) { FreeNCCL(i); } for (int i = 0; i < m_ssd.GetCount(); i++) { FreeSSD(i); } for (int i = 0; i < m_layernorm.GetCount(); i++) { FreeLayerNorm(i); } m_memoryMap.clear(); m_cudnnRef.clear(); m_cudnnH2Dev.clear(); m_cudnnDev2H.clear(); m_streamRef.clear(); m_streamH2Dev.clear(); m_streamH2Idx.clear(); m_streamH2CudnnH.clear(); m_streamCudnnRef.clear(); m_streamDev2Idx2H.clear(); } template Memory<double>::~Memory(); template Memory<float>::~Memory(); template <class T> long Memory<T>::GetDeviceMemory(int nDeviceID, T* pfTotal, T* pfFree, T* pfUsed, bool* pbEstimate) { LONG lErr; size_t lFree = 0; size_t lTotal = 0; size_t lUsed = 0; int nOriginalDeviceID = -1; if (nDeviceID >= 0) { if (lErr = cudaGetDevice(&nOriginalDeviceID)) return lErr; if (nDeviceID != nOriginalDeviceID) { if (lErr = cudaSetDevice(nDeviceID)) return lErr; } } if (nDeviceID == -1) { cudaDeviceProp prop; memset(&prop, 0, sizeof(cudaDeviceProp)); if (lErr = cudaGetDeviceProperties(&prop, nDeviceID)) return lErr; lTotal = prop.totalGlobalMem; lUsed = (size_t)m_memory.GetTotalUsed(); lFree = lTotal - lUsed; *pbEstimate = true; } else { if (lErr = cudaMemGetInfo(&lFree, &lTotal)) return lErr; lUsed = lTotal - lFree; *pbEstimate = false; } *pfTotal = (T)((double)lTotal / (double)1000000000.0); *pfFree = (T)((double)lFree / (double)1000000000.0); *pfUsed = (T)((double)lUsed / (double)1000000000.0); if (nOriginalDeviceID >= 0 && nOriginalDeviceID != nDeviceID) { if (lErr = cudaSetDevice(nOriginalDeviceID)) return lErr; } return cudaStreamSynchronize(0); } template long Memory<double>::GetDeviceMemory(int nDeviceID, double* pdfTotal, double* pdfFree, double* pdfUsed, bool* pbEstimate); template long Memory<float>::GetDeviceMemory(int nDeviceID, float* pfTotal, float* pfFree, float* pfUsed, bool* pbEstimate); template <class T> long Memory<T>::AllocHost(LPTSTR* ppDst, LPTSTR pSrc) { int nLen = (int)_tcslen(pSrc); if (nLen == 0) return ERROR_PARAM_OUT_OF_RANGE; nLen++; // make room for NULL; LPTSTR pDst = NULL; LONG lSize = nLen * sizeof(TCHAR); LONG lErr = 0; if (lErr = alloc_host((void**)&pDst, lSize, false)) return lErr; pDst[nLen] = (TCHAR)NULL; _tcsncpy(pDst, pSrc, nLen); *ppDst = pDst; return lErr; } template long Memory<double>::AllocHost(LPTSTR* ppDst, LPTSTR pSrc); template long Memory<float>::AllocHost(LPTSTR* ppDst, LPTSTR pSrc); template <class T> long Memory<T>::AllocHost(size_t lCount, T** ppDst, void* pSrc, bool bSrcOnDevice, bool bHalf, bool bPinned) { if (lCount == 0) return ERROR_PARAM_OUT_OF_RANGE; if (ppDst == NULL) return ERROR_PARAM_NULL; long long lSize = lCount * sizeof(T); if (lSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; T* pDst = NULL; LONG lErr; if (lErr = alloc_host((void**)&pDst, (size_t)lSize, bPinned)) return lErr; if (pSrc != NULL) { cudaMemcpyKind kind = (bSrcOnDevice) ? cudaMemcpyDeviceToHost : cudaMemcpyHostToHost; if (bHalf) { if (lErr = convertHalf2BaseType(lCount, pSrc, pDst, kind)) { FreeHost(pDst); return lErr; } } else { if (lErr = cudaMemcpy(pDst, pSrc, (size_t)lSize, kind)) { FreeHost(pDst); return lErr; } } } else { memset(pDst, 0, lSize); } *ppDst = pDst; return cudaGetLastError(); } template long Memory<double>::AllocHost(size_t lCount, double** ppDst, void* pSrc, bool bSrcOnDevice, bool bHalf, bool bPinned); template long Memory<float>::AllocHost(size_t lCount, float** ppDst, void* pSrc, bool bSrcOnDevice, bool bHalf, bool bPinned); template <class T> long Memory<T>::CopyToHost(size_t lCount, T* pDst, void* pSrc, bool bSrcOnDevice, bool bHalf) { if (lCount == 0) return ERROR_PARAM_OUT_OF_RANGE; if (pDst == NULL || pSrc == NULL) return ERROR_PARAM_NULL; cudaMemcpyKind kind = (bSrcOnDevice) ? cudaMemcpyDeviceToHost : cudaMemcpyHostToHost; long long lSize = lCount * sizeof(T); if (lSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (bHalf) return convertHalf2BaseType(lCount, pSrc, pDst, kind); else return cudaMemcpy(pDst, pSrc, (size_t)lSize, kind); } template long Memory<double>::CopyToHost(size_t lCount, double* pDst, void* pSrc, bool bSrcOnDevice, bool bHalf); template long Memory<float>::CopyToHost(size_t lCount, float* pDst, void* pSrc, bool bSrcOnDevice, bool bHalf); template <class T> long Memory<T>::CopyGpuToHost(long lCount, long hGpuSrc, long hHostDst) { LONG lErr; MemoryItem* pSrcD; if (lErr = m_memory.GetData(hGpuSrc, &pSrcD)) return lErr; HostBuffer<T>* pDstH = GetHostBuffer(hHostDst); if (pDstH == NULL) return ERROR_MEMORY_NOT_FOUND; T* pSrc = (T*)pSrcD->Data(); T* pDst = (T*)pDstH->Data(); return CopyToHost(lCount, pDst, pSrc, true, false); } template long Memory<double>::CopyGpuToHost(long lCount, long hGpuSrc, long hHostDst); template long Memory<float>::CopyGpuToHost(long lCount, long hGpuSrc, long hHostDst); template <class T> long Memory<T>::CopyHostToGpu(long lCount, long hHostSrc, long hGpuDst) { LONG lErr; MemoryItem* pDstD; if (lErr = m_memory.GetData(hGpuDst, &pDstD)) return lErr; HostBuffer<T>* pSrcH = GetHostBuffer(hHostSrc); if (pSrcH == NULL) return ERROR_MEMORY_NOT_FOUND; T* pDst = (T*)pDstD->Data(); T* pSrc = (T*)pSrcH->Data(); long long lSize = lCount * sizeof(T); if (lSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; return cudaMemcpy(pDst, pSrc, lSize, cudaMemcpyHostToDevice); } template long Memory<double>::CopyHostToGpu(long lCount, long hHostSrc, long hGpuDst); template long Memory<float>::CopyHostToGpu(long lCount, long hHostSrc, long hGpuDst); template <class T> long Memory<T>::AllocHostBuffer(size_t lCount, long* phHandle) { LONG lErr = 0; if (lCount % 2 != 0) lCount++; T* pMem = NULL; if (lErr = AllocHost(lCount, &pMem, NULL, FALSE, FALSE)) return lErr; HostBuffer<T>* pHostBuf = new HostBuffer<T>(pMem, lCount); if (pHostBuf == NULL) { FreeHost(pMem); return ERROR_MEMORY_OUT; } long hHandle = m_hostbuffers.Allocate(pHostBuf); if (hHandle < 0) { delete pHostBuf; FreeHost(pMem); return ERROR_MEMORY_OUT; } m_rgActiveHostBuffers[pMem] = pHostBuf; *phHandle = hHandle; return 0; } template long Memory<double>::AllocHostBuffer(size_t lCount, long* phHandle); template long Memory<float>::AllocHostBuffer(size_t lCount, long* phHandle); template <class T> long Memory<T>::FreeHostBuffer(long hHandle) { HostBuffer<T>* pHostBuf = (HostBuffer<T>*)m_hostbuffers.Free(hHandle); if (pHostBuf != NULL) { if (pHostBuf->Data() != NULL) { m_rgActiveHostBuffers.erase(pHostBuf->Data()); FreeHost(pHostBuf->Data()); } delete pHostBuf; } return 0; } template long Memory<double>::FreeHostBuffer(long hHandle); template long Memory<float>::FreeHostBuffer(long hHandle); template <class T> bool Memory<T>::IsHostBuffer(T* pf) { if (m_rgActiveHostBuffers.find(pf) != m_rgActiveHostBuffers.end()) return true; return false; } template bool Memory<double>::IsHostBuffer(double* pf); template bool Memory<float>::IsHostBuffer(float* pf); template <class T> long Memory<T>::CreateStream(long* phHandle, bool bNonBlocking, int nIndex) { std::unique_lock<std::mutex> lock(m_sync); LONG lErr; cudaStream_t stream = NULL; int nDeviceID = 0; if (phHandle == NULL) return ERROR_PARAM_NULL; if (nIndex >= 0) { if (lErr = cudaGetDevice(&nDeviceID)) return lErr; if (m_streamDev2Idx2H.find(nDeviceID) != m_streamDev2Idx2H.end()) { if (m_streamDev2Idx2H[nDeviceID].find(nIndex) != m_streamDev2Idx2H[nDeviceID].end()) { stream = m_streamDev2Idx2H[nDeviceID][nIndex]; m_streamRef[stream]++; } } } if (stream == NULL) { if (bNonBlocking) { if (lErr = cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)) return lErr; } else { if (lErr = cudaStreamCreate(&stream)) return lErr; } if (nIndex >= 0) { m_streamDev2Idx2H[nDeviceID][nIndex] = stream; m_streamRef[stream] = 1; m_streamH2Dev[stream] = nDeviceID; m_streamH2Idx[stream] = nIndex; } } long hHandle = m_streams.Allocate(stream); if (hHandle < 0) { cudaStreamDestroy(stream); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return cudaStreamSynchronize(0); } template long Memory<double>::CreateStream(long* phHandle, bool bNonBlocking, int nIndex); template long Memory<float>::CreateStream(long* phHandle, bool bNonBlocking, int nIndex); template <typename T> __global__ void synchronize_thread_kernel() { } template <class T> long Memory<T>::SynchronizeThread() { synchronize_thread_kernel<T><<<1, 1>>>(); return cudaGetLastError(); } template long Memory<double>::SynchronizeThread(); template long Memory<float>::SynchronizeThread(); template <class T> long Memory<T>::CreateCuDNN(long hStream, long* phHandle) { std::unique_lock<std::mutex> lock(m_sync); LONG lErr; cudnnHandle_t cudnn = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; int nDeviceID; if (lErr = cudaGetDevice(&nDeviceID)) return lErr; if (hStream == 0) { // If the cudnn is already created for the device, share it // and increase the ref count. if (m_cudnnDev2H.find(nDeviceID) != m_cudnnDev2H.end()) { cudnn = m_cudnnDev2H[nDeviceID]; m_cudnnRef[cudnn]++; } // Otherwise create a new cudnn for the device and add it to // the maps to share with a ref count of 1. else { if (lErr = cudnnCreate(&cudnn)) return lErr | ERROR_CUDNN_OFFSET; m_cudnnRef[cudnn] = 1; m_cudnnDev2H[nDeviceID] = cudnn; m_cudnnH2Dev[cudnn] = nDeviceID; } } else { cudaStream_t stream = GetStream(hStream); // If the stream is a shared stream, share see if the cudnn // is already shared with the shared stream and use the shared // cudnn if it exists, making sure to increase its ref count. if (m_streamRef.find(stream) != m_streamRef.end()) { if (m_streamH2CudnnH.find(stream) != m_streamH2CudnnH.end()) { cudnn = m_streamH2CudnnH[stream]; m_streamCudnnRef[cudnn]++; } } if (cudnn == NULL) { if (lErr = cudnnCreate(&cudnn)) return lErr | ERROR_CUDNN_OFFSET; if (lErr = cudnnSetStream(cudnn, stream)) { cudnnDestroy(cudnn); if (m_streamH2CudnnH.find(stream) != m_streamH2CudnnH.end()) m_streamH2CudnnH.erase(stream); return lErr | ERROR_CUDNN_OFFSET; } // If the stream is a shared stream, add the cudnn to // it and set the ref count to 1. if (m_streamRef.find(stream) != m_streamRef.end()) { m_streamH2CudnnH[stream] = cudnn; m_streamCudnnRef[cudnn] = 1; } } } long hHandle = m_cudnn.Allocate(cudnn); if (hHandle < 0) { free_cudnn(cudnn); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return cudaStreamSynchronize(0); } template long Memory<double>::CreateCuDNN(long hStream, long* phHandle); template long Memory<float>::CreateCuDNN(long hStream, long* phHandle); template <class T> long Memory<T>::CreateTensorDesc(long* phHandle) { LONG lErr; cudnnTensorDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateTensorDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_tensorDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyTensorDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return cudaStreamSynchronize(0); } template long Memory<double>::CreateTensorDesc(long* phHandle); template long Memory<float>::CreateTensorDesc(long* phHandle); template <class T> long Memory<T>::AddTensor(long hHandle, T fAlpha, long hSrcDesc, long hSrc, int nSrcOffset, T fBeta, long hDstDesc, long hDst, int nDstOffset) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t srcdesc = GetTensorDesc(hSrcDesc); cudnnTensorDescriptor_t dstdesc = GetTensorDesc(hDstDesc); MemoryItem* pSrc; MemoryItem* pDst; if (lErr = m_memory.GetData(hSrc, &pSrc)) return lErr; if (lErr = m_memory.GetData(hDst, &pDst)) return lErr; if (cudnn == NULL || srcdesc == NULL || dstdesc == NULL) return ERROR_PARAM_NULL; T* src = (T*)pSrc->Data(); T* dst = (T*)pDst->Data(); if (nSrcOffset > 0) src += nSrcOffset; if (nDstOffset > 0) dst += nDstOffset; #ifdef CUDNN_4 if (lErr = cudnnAddTensor(cudnn, &fAlpha, srcdesc, src, &fBeta, dstdesc, dst)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnAddTensor(cudnn, CUDNN_ADD_SAME_C, &fAlpha, srcdesc, src, &fBeta, dstdesc, dst)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::AddTensor(long hHandle, double dfAlpha, long hSrcDesc, long hSrc, int nSrcOffset, double dfBeta, long hDstDesc, long hDst, int nDstOffset); template long Memory<float>::AddTensor(long hHandle, float fAlpha, long hSrcDesc, long hSrc, int nSrcOffset, float fBeta, long hDstDesc, long hDst, int nDstOffset); template <class T> long Memory<T>::CreateFilterDesc(long* phHandle) { LONG lErr; cudnnFilterDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateFilterDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_filterDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyFilterDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return cudaStreamSynchronize(0); } template long Memory<double>::CreateFilterDesc(long* phHandle); template long Memory<float>::CreateFilterDesc(long* phHandle); template <class T> long Memory<T>::CreateConvolutionDesc(long* phHandle) { LONG lErr; cudnnConvolutionDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateConvolutionDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_convDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyConvolutionDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return cudaStreamSynchronize(0); } template long Memory<double>::CreateConvolutionDesc(long* phHandle); template long Memory<float>::CreateConvolutionDesc(long* phHandle); template <class T> long Memory<T>::GetConvolutionInfo(long hHandle, long hBottomDesc, long hFilterDesc, long hConvDesc, long hTopDesc, size_t lWsLimitInBytes, bool bUseTensorCores, long* palgoFwd, size_t* plWsSizeFwd, long* palgoBwdFilter, size_t* plWsSizeBwdFilter, long* palgoBwdData, size_t* plWsSizeBwdData, int nPreferredFwdAlgo) { cudnnStatus_t lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t bottom = GetTensorDesc(hBottomDesc); cudnnFilterDescriptor_t filter = GetFilterDesc(hFilterDesc); cudnnConvolutionDescriptor_t conv = GetConvolutionDesc(hConvDesc); cudnnTensorDescriptor_t top = GetTensorDesc(hTopDesc); // Choose forward algorithm for convolution. cudnnConvolutionFwdAlgo_t algoFwd; #ifdef CUDA10_2 // Setup the algorithm preference. cudnnConvolutionFwdPreference_t fwdPref = CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT; cudnnConvolutionBwdFilterPreference_t bwdFltPref = CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT; cudnnConvolutionBwdDataPreference_t bwdDataPref = CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { lWsLimitInBytes = 0; fwdPref = CUDNN_CONVOLUTION_FWD_PREFER_FASTEST; bwdFltPref = CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST; bwdDataPref = CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST; } else if (lWsLimitInBytes == 0) { lWsLimitInBytes = 0; fwdPref = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE; bwdFltPref = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE; bwdDataPref = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE; } if (lErr = cudnnGetConvolutionForwardAlgorithm(cudnn, bottom, filter, conv, top, fwdPref, lWsLimitInBytes, &algoFwd)) return lErr | ERROR_CUDNN_OFFSET; #else int nAlgCount; cudnnConvolutionFwdAlgoPerf_t fwdPref[10]; if (lErr = cudnnGetConvolutionForwardAlgorithm_v7(cudnn, bottom, filter, conv, top, 10, &nAlgCount, fwdPref)) return lErr; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { algoFwd = fwdPref[0].algo; } else { int nIdx = 0; while (fwdPref[nIdx].status == CUDNN_STATUS_SUCCESS && fwdPref[nIdx].memory > lWsLimitInBytes && nIdx < nAlgCount && nIdx < 10) { nIdx++; } if (nIdx == nAlgCount) return ERROR_PARAM_OUT_OF_RANGE; algoFwd = fwdPref[nIdx].algo; } #endif // Get workspace size for forward algorithm. size_t szFwd = 0; if (lErr = cudnnGetConvolutionForwardWorkspaceSize(cudnn, bottom, filter, conv, top, algoFwd, &szFwd)) return lErr | ERROR_CUDNN_OFFSET; // CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD has been found by the native Caffe team to work better than // CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM on deconvolution (which acts a bit buggy in this // situation. For this reason, when using cuDnn deconvolution, the C# side sets the preferred // fwd algo to CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD which is used only when the workspace is less // than or equat to the default workspace size and no errors occur when attempting to get the // workspace size for WINOGRAD. By default, the nPrefferredFwdAlgo paraeter is ignored. if (nPreferredFwdAlgo >= 0 && algoFwd == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM && (int)algoFwd != nPreferredFwdAlgo) { size_t lWinogradWorkspaceSize = 0; lErr = cudnnGetConvolutionForwardWorkspaceSize(cudnn, bottom, filter, conv, top, (cudnnConvolutionFwdAlgo_t)nPreferredFwdAlgo, &lWinogradWorkspaceSize); if (lErr == CUDNN_STATUS_SUCCESS) { if (lWinogradWorkspaceSize <= szFwd) { algoFwd = (cudnnConvolutionFwdAlgo_t)nPreferredFwdAlgo; szFwd = lWinogradWorkspaceSize; } } } // Choose backward filter algorithm. cudnnConvolutionBwdFilterAlgo_t algoBwdFilter; #ifdef CUDA10_2 if (lErr = cudnnGetConvolutionBackwardFilterAlgorithm(cudnn, bottom, top, conv, filter, bwdFltPref, lWsLimitInBytes, &algoBwdFilter)) return lErr | ERROR_CUDNN_OFFSET; #else cudnnConvolutionBwdFilterAlgoPerf_t bwdFltPref[10]; if (lErr = cudnnGetConvolutionBackwardFilterAlgorithm_v7(cudnn, bottom, top, conv, filter, 10, &nAlgCount, bwdFltPref)) return lErr; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { algoBwdFilter = bwdFltPref[0].algo; } else { int nIdx = 0; while (bwdFltPref[nIdx].status == CUDNN_STATUS_SUCCESS && bwdFltPref[nIdx].memory > lWsLimitInBytes && nIdx < nAlgCount && nIdx < 10) { nIdx++; } if (nIdx == nAlgCount) return ERROR_PARAM_OUT_OF_RANGE; algoBwdFilter = bwdFltPref[nIdx].algo; } #endif // Get workspace size for backward filter algorithm. size_t szBwdFilter = 0; if (lErr = cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn, bottom, top, conv, filter, algoBwdFilter, &szBwdFilter)) return lErr | ERROR_CUDNN_OFFSET; // Choose backward data algorithm. cudnnConvolutionBwdDataAlgo_t algoBwdData; #ifdef CUDA10_2 if (lErr = cudnnGetConvolutionBackwardDataAlgorithm(cudnn, filter, top, conv, bottom, bwdDataPref, lWsLimitInBytes, &algoBwdData)) return lErr | ERROR_CUDNN_OFFSET; #else cudnnConvolutionBwdDataAlgoPerf_t bwdDataPref[10]; if (lErr = cudnnGetConvolutionBackwardDataAlgorithm_v7(cudnn, filter, top, conv, bottom, 5, &nAlgCount, bwdDataPref)) return lErr; if (lWsLimitInBytes == (((size_t)-1) / 2 + 1) || lWsLimitInBytes >= (SIZE_MAX - 10)) { algoBwdData = bwdDataPref[0].algo; } else { int nIdx = 0; while (bwdDataPref[nIdx].status == CUDNN_STATUS_SUCCESS && bwdDataPref[nIdx].memory > lWsLimitInBytes && nIdx < nAlgCount && nIdx < 10) { nIdx++; } if (nIdx == nAlgCount) return ERROR_PARAM_OUT_OF_RANGE; algoBwdData = bwdDataPref[nIdx].algo; } #endif // Get workspace size for backward data algorithm. size_t szBwdData = 0; if (lErr = cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn, filter, top, conv, bottom, algoBwdData, &szBwdData)) return lErr | ERROR_CUDNN_OFFSET; *palgoFwd = (long)algoFwd; *plWsSizeFwd = szFwd; *palgoBwdFilter = (long)algoBwdFilter; *plWsSizeBwdFilter = szBwdFilter; *palgoBwdData = (long)algoBwdData; *plWsSizeBwdData = szBwdData; return cudaStreamSynchronize(0); } template long Memory<double>::GetConvolutionInfo(long hHandle, long hBottomDesc, long hFilterDesc, long hConvDesc, long hTopDesc, size_t lWsLimitInBytes, bool bUseTensorCores, long* palgoFwd, size_t* plWsSizeFwd, long* palgoBwdFilter, size_t* plWsSizeBwdFilter, long* palgoBwdData, size_t* plWsSizeBwdData, int nPreferredFwdAlgo); template long Memory<float>::GetConvolutionInfo(long hHandle, long hBottomDesc, long hFilterDesc, long hConvDesc, long hTopDesc, size_t lWsLimitInBytes, bool bUseTensorCores, long* palgoFwd, size_t* plWsSizeFwd, long* palgoBwdFilter, size_t* plWsSizeBwdFilter, long* palgoBwdData, size_t* plWsSizeBwdData, int nPreferredFwdAlgo); template <class T> long Memory<T>::ConvolutionForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hFilterDesc, long hWeight, int nWeightOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, T fBeta, long hTopDesc, long hTopData, int nTopOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); cudnnFilterDescriptor_t filterdesc = GetFilterDesc(hFilterDesc); cudnnConvolutionDescriptor_t convdesc = GetConvolutionDesc(hConvDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); MemoryItem* pBtmData; MemoryItem* pTopData; MemoryItem* pWeight; MemoryItem* pWorkspace = NULL; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hWeight, &pWeight)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdata = (T*)pTopData->Data(); T* weight = (T*)pWeight->Data(); T* wksp = NULL; if (hWorkspace != 0) { if (lErr = m_memory.GetData(hWorkspace, &pWorkspace)) return lErr; wksp = (T*)pWorkspace->Data(); } else if (lWorkspaceSize != 0) { return ERROR_PARAM_OUT_OF_RANGE; } if (nBottomOffset > 0) btmdata += nBottomOffset; if (nTopOffset > 0) topdata += nTopOffset; if (nWeightOffset > 0) weight += nWeightOffset; if (wksp != NULL && nWorkspaceOffset > 0) wksp += nWorkspaceOffset; if (lErr = cudnnConvolutionForward(cudnn, &fAlpha, btmdesc, btmdata, filterdesc, weight, convdesc, (cudnnConvolutionFwdAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; if (bSyncStream) return cudaStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hFilterDesc, long hWeight, int nWeightOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, double dfBeta, long hTopDesc, long hTopData, int nTopOffset, bool bSyncStream); template long Memory<float>::ConvolutionForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hFilterDesc, long hWeight, int nWeightOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, float fBeta, long hTopDesc, long hTopData, int nTopOffset, bool bSyncStream); template <class T> long Memory<T>::ConvolutionBackwardBias(long hHandle, T fAlpha, long hTopDesc, long hTopDiff, int nTopOffset, T fBeta, long hBiasDesc, long hBiasDiff, int nBiasOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t biasdesc = GetTensorDesc(hBiasDesc); MemoryItem* pTopDiff; MemoryItem* pBiasDiff; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBiasDiff, &pBiasDiff)) return lErr; T* topdiff = (T*)pTopDiff->Data(); T* biasdiff = (T*)pBiasDiff->Data(); if (nTopOffset > 0) topdiff += nTopOffset; if (nBiasOffset > 0) biasdiff += nBiasOffset; if (lErr = cudnnConvolutionBackwardBias(cudnn, &fAlpha, topdesc, topdiff, &fBeta, biasdesc, biasdiff)) return lErr | ERROR_CUDNN_OFFSET; if (bSyncStream) return cudaStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionBackwardBias(long hHandle, double dfAlpha, long hTopDesc, long hTopDiff, int nTopOffset, double dfBeta, long hBiasDesc, long hBiasDiff, int nBiasOffset, bool bSyncStream); template long Memory<float>::ConvolutionBackwardBias(long hHandle, float fAlpha, long hTopDesc, long hTopDiff, int nTopOffset, float fBeta, long hBiasDesc, long hBiasDiff, int nBiasOffset, bool bSyncStream); template <class T> long Memory<T>::ConvolutionBackwardFilter(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, T fBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnConvolutionDescriptor_t convdesc = GetConvolutionDesc(hConvDesc); cudnnFilterDescriptor_t filterdesc = GetFilterDesc(hFilterDesc); MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pWeightDiff; MemoryItem* pWorkspace = NULL; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hWeightDiff, &pWeightDiff)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* weightdiff = (T*)pWeightDiff->Data(); T* wksp = NULL; if (hWorkspace != 0) { if (lErr = m_memory.GetData(hWorkspace, &pWorkspace)) return lErr; wksp = (T*)pWorkspace->Data(); } else if (lWorkspaceSize != 0) { return ERROR_PARAM_OUT_OF_RANGE; } if (nBottomOffset > 0) btmdata += nBottomOffset; if (nTopOffset > 0) topdiff += nTopOffset; if (nWeightOffset > 0) weightdiff += nWeightOffset; if (wksp != NULL && nWorkspaceOffset > 0) wksp += nWorkspaceOffset; #ifdef CUDNN_5 if (lErr = cudnnConvolutionBackwardFilter(cudnn, &fAlpha, btmdesc, btmdata, topdesc, topdiff, convdesc, (cudnnConvolutionBwdFilterAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, filterdesc, weightdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnConvolutionBackwardFilter_v3(cudnn, &fAlpha, btmdesc, btmdata, topdesc, topdiff, convdesc, (cudnnConvolutionBwdFilterAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, filterdesc, weightdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif if (bSyncStream) return cudaStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionBackwardFilter(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, double dfBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template long Memory<float>::ConvolutionBackwardFilter(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, float fBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template <class T> long Memory<T>::ConvolutionBackwardData(long hHandle, T fAlpha, long hFilterDesc, long hWeight, int nWeightOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, T fBeta, long hBottomDesc, long hBottomDiff, int nBottomOffset, bool bSyncStream) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnFilterDescriptor_t filterdesc = GetFilterDesc(hFilterDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnConvolutionDescriptor_t convdesc = GetConvolutionDesc(hConvDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pWeight; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; MemoryItem* pWorkspace = NULL; if (lErr = m_memory.GetData(hWeight, &pWeight)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* weight = (T*)pWeight->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); T* wksp = NULL; if (hWorkspace != 0) { if (lErr = m_memory.GetData(hWorkspace, &pWorkspace)) return lErr; wksp = (T*)pWorkspace->Data(); } else if (lWorkspaceSize != 0) { return ERROR_PARAM_OUT_OF_RANGE; } if (nWeightOffset > 0) weight += nWeightOffset; if (nTopOffset > 0) topdiff += nTopOffset; if (nBottomOffset > 0) btmdiff += nBottomOffset; if (wksp != NULL && nWorkspaceOffset > 0) wksp += nWorkspaceOffset; #ifdef CUDNN_5 if (lErr = cudnnConvolutionBackwardData(cudnn, &fAlpha, filterdesc, weight, topdesc, topdiff, convdesc, (cudnnConvolutionBwdDataAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, btmdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnConvolutionBackwardData_v3(cudnn, &fAlpha, filterdesc, weight, topdesc, topdiff, convdesc, (cudnnConvolutionBwdDataAlgo_t)algo, wksp, lWorkspaceSize, &fBeta, btmdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif if (bSyncStream) return cudaStreamSynchronize(0); return CUDNN_STATUS_SUCCESS; } template long Memory<double>::ConvolutionBackwardData(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, double dfBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template long Memory<float>::ConvolutionBackwardData(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, int nBottomOffset, long hTopDesc, long hTopDiff, int nTopOffset, long hConvDesc, long algo, long hWorkspace, int nWorkspaceOffset, size_t lWorkspaceSize, float fBeta, long hFilterDesc, long hWeightDiff, int nWeightOffset, bool bSyncStream); template <class T> long Memory<T>::CreatePoolingDesc(long* phHandle) { LONG lErr; cudnnPoolingDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreatePoolingDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_poolDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyPoolingDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreatePoolingDesc(long* phHandle); template long Memory<float>::CreatePoolingDesc(long* phHandle); template <class T> long Memory<T>::PoolingForward(long hHandle, long hPoolingDesc, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnPoolingDescriptor_t pooldesc = GetPoolingDesc(hPoolingDesc); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); if (lErr = cudnnPoolingForward(cudnn, pooldesc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::PoolingForward(long hHandle, long hPoolingDesc, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::PoolingForward(long hHandle, long hPoolingDesc, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::PoolingBackward(long hHandle, long hPoolingDesc, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnPoolingDescriptor_t pooldesc = GetPoolingDesc(hPoolingDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); if (lErr = cudnnPoolingBackward(cudnn, pooldesc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::PoolingBackward(long hHandle, long hPoolingDesc, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::PoolingBackward(long hHandle, long hPoolingDesc, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::DeriveBatchNormDesc(long hFwdScaleBiasMeanVarDesc, long hFwdBottomDesc, long hBwdScaleBiasMeanVarDesc, long hBwdBottomDesc, int mode) { LONG lErr; cudnnTensorDescriptor_t fwdscalemeanvardesc = GetTensorDesc(hFwdScaleBiasMeanVarDesc); cudnnTensorDescriptor_t fwdbtmdesc = GetTensorDesc(hFwdBottomDesc); cudnnTensorDescriptor_t bwdscalemeanvardesc = GetTensorDesc(hBwdScaleBiasMeanVarDesc); cudnnTensorDescriptor_t bwdbtmdesc = GetTensorDesc(hBwdBottomDesc); if (lErr = cudnnDeriveBNTensorDescriptor(fwdscalemeanvardesc, fwdbtmdesc, (cudnnBatchNormMode_t)mode)) return lErr; if (lErr = cudnnDeriveBNTensorDescriptor(bwdscalemeanvardesc, bwdbtmdesc, (cudnnBatchNormMode_t)mode)) return lErr; return CUDNN_STATUS_SUCCESS; } template long Memory<double>::DeriveBatchNormDesc(long hFwdScaleBiasMeanVarDesc, long hFwdBottomDesc, long hBwdScaleBiasMeanVarDesc, long hBwdBottomDesc, int mode); template long Memory<float>::DeriveBatchNormDesc(long hFwdScaleBiasMeanVarDesc, long hFwdBottomDesc, long hBwdScaleBiasMeanVarDesc, long hBwdBottomDesc, int mode); template <class T> long Memory<T>::BatchNormForward(long hHandle, int mode, T fAlpha, T fBeta, long hFwdBottomDesc, long hBottomData, long hFwdTopDesc, long hTopData, long hFwdScaleBiasMeanVarDesc, long hScaleData, long hBiasData, T fFactor, long hGlobalMean, long hGlobalVar, T fEps, long hSaveMean, long hSaveVar, bool bTraining) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t fwdbtmdesc = GetTensorDesc(hFwdBottomDesc); cudnnTensorDescriptor_t fwdtopdesc = GetTensorDesc(hFwdTopDesc); cudnnTensorDescriptor_t fwdscalemeanvardesc = GetTensorDesc(hFwdScaleBiasMeanVarDesc); MemoryItem* pBtmData; MemoryItem* pTopData; MemoryItem* pScaleData; MemoryItem* pBiasData; MemoryItem* pGlobalMean; MemoryItem* pGlobalVar; MemoryItem* pSaveMean; MemoryItem* pSaveVar; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hScaleData, &pScaleData)) return lErr; if (lErr = m_memory.GetData(hBiasData, &pBiasData)) return lErr; if (lErr = m_memory.GetData(hGlobalMean, &pGlobalMean)) return lErr; if (lErr = m_memory.GetData(hGlobalVar, &pGlobalVar)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdata = (T*)pTopData->Data(); T* scaledata = (T*)pScaleData->Data(); T* biasdata = (T*)pBiasData->Data(); T* globalmean = (T*)pGlobalMean->Data(); T* globalvar = (T*)pGlobalVar->Data(); if (sizeof(T) == 4) { if ((float)fEps < CUDNN_BN_MIN_EPSILON) fEps = 0.0001f; } if (bTraining) { if (lErr = m_memory.GetData(hSaveMean, &pSaveMean)) return lErr; if (lErr = m_memory.GetData(hSaveVar, &pSaveVar)) return lErr; T* savemean = (T*)pSaveMean->Data(); T* savevar = (T*)pSaveVar->Data(); if (lErr = cudnnBatchNormalizationForwardTraining(cudnn, (cudnnBatchNormMode_t)mode, &fAlpha, &fBeta, fwdbtmdesc, btmdata, fwdtopdesc, topdata, fwdscalemeanvardesc, scaledata, biasdata, fFactor, globalmean, globalvar, fEps, savemean, savevar)) return lErr; } else { if (lErr = cudnnBatchNormalizationForwardInference(cudnn, (cudnnBatchNormMode_t)mode, &fAlpha, &fBeta, fwdbtmdesc, btmdata, fwdtopdesc, topdata, fwdscalemeanvardesc, scaledata, biasdata, globalmean, globalvar, fEps)) return lErr; } return cudaStreamSynchronize(0); } template long Memory<double>::BatchNormForward(long hHandle, int mode, double dfAlpha, double dfBeta, long hFwdBottomDesc, long hBottomData, long hFwdTopDesc, long hTopData, long hFwdScaleBiasMeanVarDesc, long hScaleData, long hBiasData, double fFactor, long hGlobalMean, long hGlobalVar, double fEps, long hSaveMean, long hSaveVar, bool bTraining); template long Memory<float>::BatchNormForward(long hHandle, int mode, float fAlpha, float fBeta, long hFwdBottomDesc, long hBottomData, long hFwdTopDesc, long hTopData, long hFwdScaleBiasMeanVarDesc, long hScaleData, long hBiasData, float fFactor, long hGlobalMean, long hGlobalVar, float fEps, long hSaveMean, long hSaveVar, bool bTraining); template <class T> long Memory<T>::BatchNormBackward(long hHandle, int mode, T fAlphaDiff, T fBetaDiff, T fAlphaParamDiff, T fBetaParamDiff, long hBwdBottomDesc, long hBottomData, long hTopDiffDesc, long hTopDiff, long hBottomDiffDesc, long hBottomDiff, long hBwdScaleBiasMeanVarDesc, long hScaleData, long hScaleDiff, long hBiasDiff, T fEps, long hSaveMean, long hSaveVar) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t bwdbtmdesc = GetTensorDesc(hBwdBottomDesc); cudnnTensorDescriptor_t topdiffdesc = GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = GetTensorDesc(hBottomDiffDesc); cudnnTensorDescriptor_t bwdscalemeanvardesc = GetTensorDesc(hBwdScaleBiasMeanVarDesc); MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; MemoryItem* pScaleData; MemoryItem* pScaleDiff; MemoryItem* pBiasDiff; MemoryItem* pSaveMean = NULL; MemoryItem* pSaveVar = NULL; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; if (lErr = m_memory.GetData(hScaleData, &pScaleData)) return lErr; if (lErr = m_memory.GetData(hScaleDiff, &pScaleDiff)) return lErr; if (lErr = m_memory.GetData(hBiasDiff, &pBiasDiff)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); T* scaledata = (T*)pScaleData->Data(); T* scalediff = (T*)pScaleDiff->Data(); T* biasdiff = (T*)pBiasDiff->Data(); T* savemean = NULL; T* savevar = NULL; if (hSaveMean != 0 && hSaveVar != 0) { if (lErr = m_memory.GetData(hSaveMean, &pSaveMean)) return lErr; if (lErr = m_memory.GetData(hSaveVar, &pSaveVar)) return lErr; savemean = (T*)pSaveMean->Data(); savevar = (T*)pSaveVar->Data(); } if (sizeof(T) == 4) { if ((float)fEps < CUDNN_BN_MIN_EPSILON) fEps = 0.0001f; } if (lErr = cudnnBatchNormalizationBackward(cudnn, (cudnnBatchNormMode_t)mode, &fAlphaDiff, &fBetaDiff, &fAlphaParamDiff, &fBetaParamDiff, bwdbtmdesc, btmdata, topdiffdesc, topdiff, btmdiffdesc, btmdiff, bwdscalemeanvardesc, scaledata, scalediff, biasdiff, fEps, savemean, savevar)) return lErr; return cudaStreamSynchronize(0); } template long Memory<double>::BatchNormBackward(long hHandle, int mode, double dfAlphaDiff, double dfBetaDiff, double dfAlphaParamDiff, double dfBetaParamDiff, long hBtmBottomDesc, long hBottomData, long hTopDiffDesc, long hTopDiff, long hBottomDiffDesc, long hBottomDiff, long hBwdScaleBiasMeanVarDesc, long hScaleData, long hScaleDiff, long hBiasDiff, double fEps, long hSaveMean, long hSaveVar); template long Memory<float>::BatchNormBackward(long hHandle, int mode, float fAlphaDiff, float fBetaDiff, float fAlphaParamDiff, float fBetaParamDiff, long hBtmBottomDesc, long hBottomData, long hTopDiffDesc, long hTopDiff, long hBottomDiffDesc, long hBottomDiff, long hBwdScaleBiasMeanVarDesc, long hScaleData, long hScaleDiff, long hBiasDiff, float fEps, long hSaveMean, long hSaveVar); template <class T> long Memory<T>::CreateDropoutDesc(long* phHandle) { LONG lErr; cudnnDropoutDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateDropoutDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_dropoutDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyDropoutDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateDropoutDesc(long* phHandle); template long Memory<float>::CreateDropoutDesc(long* phHandle); template <class T> long Memory<T>::SetDropoutDesc(long hHandle, long hDropoutDesc, T fDropout, long hStates, long lSeed) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnDropoutDescriptor_t desc = GetDropoutDesc(hDropoutDesc); MemoryItem* pStates; if (lErr = m_memory.GetData(hStates, &pStates)) return lErr; T* states = (T*)pStates->Data(); size_t szStates = (size_t)pStates->Size(); if (lErr = cudnnSetDropoutDescriptor(desc, cudnn, (float)fDropout, states, szStates, (unsigned long long)lSeed)) return lErr | ERROR_CUDNN_OFFSET; return CUDNN_STATUS_SUCCESS; } template long Memory<double>::SetDropoutDesc(long hHandle, long hDropoutDesc, double fDropout, long hStates, long lSeed); template long Memory<float>::SetDropoutDesc(long hHandle, long hDropoutDesc, float fDropout, long hStates, long lSeed); template <class T> long Memory<T>::GetDropoutInfo(long hHandle, long hBottomDesc, unsigned long* plState, unsigned long* plReserved) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t bottomDesc = NULL; size_t szStates = 0; size_t szReserved = 0; if (hBottomDesc > 0) bottomDesc = GetTensorDesc(hBottomDesc); if (plState == NULL || plReserved == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnDropoutGetStatesSize(cudnn, &szStates)) return lErr | ERROR_CUDNN_OFFSET; if (bottomDesc != NULL) { if (lErr = cudnnDropoutGetReserveSpaceSize(bottomDesc, &szReserved)) return lErr | ERROR_CUDNN_OFFSET; } *plState = (unsigned long)szStates; *plReserved = (unsigned long)szReserved; return 0; } template long Memory<double>::GetDropoutInfo(long hHandle, long hBottomDesc, unsigned long* plState, unsigned long* plReserved); template long Memory<float>::GetDropoutInfo(long hHandle, long hBottomDesc, unsigned long* plState, unsigned long* plReserved); template <class T> long Memory<T>::DropoutForward(long hHandle, long hDropoutDesc, long hBottomDesc, long hBottom, long hTopDesc, long hTop, long hReservedSpace) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnDropoutDescriptor_t desc = GetDropoutDesc(hDropoutDesc); cudnnTensorDescriptor_t bottomDesc = GetTensorDesc(hBottomDesc); cudnnTensorDescriptor_t topDesc = GetTensorDesc(hTopDesc); MemoryItem* pBottom; MemoryItem* pTop; MemoryItem* pReserved; if (lErr = m_memory.GetData(hBottom, &pBottom)) return lErr; if (lErr = m_memory.GetData(hTop, &pTop)) return lErr; if (lErr = m_memory.GetData(hReservedSpace, &pReserved)) return lErr; T* bottom = (T*)pBottom->Data(); T* top = (T*)pTop->Data(); T* reserved = (T*)pReserved->Data(); size_t szReserved = (size_t)pReserved->Size(); if (lErr = cudnnDropoutForward(cudnn, desc, bottomDesc, bottom, topDesc, top, reserved, szReserved)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::DropoutForward(long hHandle, long hDropoutDesc, long hBottomDesc, long hBottom, long hTopDesc, long hTop, long hReservedSpace); template long Memory<float>::DropoutForward(long hHandle, long hDropoutDesc, long hBottomDesc, long hBottom, long hTopDesc, long hTop, long hReservedSpace); template <class T> long Memory<T>::DropoutBackward(long hHandle, long hDropoutDesc, long hTopDesc, long hTop, long hBottomDesc, long hBottom, long hReservedSpace) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnDropoutDescriptor_t desc = GetDropoutDesc(hDropoutDesc); cudnnTensorDescriptor_t topDesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t bottomDesc = GetTensorDesc(hBottomDesc); MemoryItem* pTop; MemoryItem* pBottom; MemoryItem* pReserved; if (lErr = m_memory.GetData(hTop, &pTop)) return lErr; if (lErr = m_memory.GetData(hBottom, &pBottom)) return lErr; if (lErr = m_memory.GetData(hReservedSpace, &pReserved)) return lErr; T* top = (T*)pTop->Data(); T* bottom = (T*)pBottom->Data(); T* reserved = (T*)pReserved->Data(); size_t szReserved = (size_t)pReserved->Size(); if (lErr = cudnnDropoutBackward(cudnn, desc, topDesc, top, bottomDesc, bottom, reserved, szReserved)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::DropoutBackward(long hHandle, long hDropoutDesc, long hTopDesc, long hTop, long hBottomDesc, long hBottom, long hReservedSpace); template long Memory<float>::DropoutBackward(long hHandle, long hDropoutDesc, long hTopDesc, long hTop, long hBottomDesc, long hBottom, long hReservedSpace); template <class T> long Memory<T>::CreateLRNDesc(long* phHandle) { LONG lErr; cudnnLRNDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateLRNDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_lrnDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyLRNDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateLRNDesc(long* phHandle); template long Memory<float>::CreateLRNDesc(long* phHandle); template <class T> long Memory<T>::LRNForwardCC(long hHandle, long hNormDesc, T fAlpha, long hBottomDataDesc, long hBottomData, T fBeta, long hTopDataDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); MemoryItem* pBottomData; MemoryItem* pTopData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBottomData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBottomData->Data(); if (lErr = cudnnLRNCrossChannelForward(cudnn, normdesc, CUDNN_LRN_CROSS_CHANNEL_DIM1, &fAlpha, btmdatadesc, btmdata, &fBeta, topdatadesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::LRNForwardCC(long hHandle, long hNormDesc, double fAlpha, long hBottomDesc, long hBottomData, double fBeta, long hTopDesc, long hTopData); template long Memory<float>::LRNForwardCC(long hHandle, long hNormDesc, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::LRNBackwardCC(long hHandle, long hNormDesc, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); if (lErr = cudnnLRNCrossChannelBackward(cudnn, normdesc, CUDNN_LRN_CROSS_CHANNEL_DIM1, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::LRNBackwardCC(long hHandle, long hNormDesc, double fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomDadta, double fBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::LRNBackwardCC(long hHandle, long hNormDesc, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomDadta, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::LCNForwardCC(long hHandle, long hNormDesc, T fAlpha, long hBottomDataDesc, long hBottomData, long hTemp1, long hTemp2, T fBeta, long hTopDataDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); MemoryItem* pBottomData; MemoryItem* pTopData; MemoryItem* pTemp1; MemoryItem* pTemp2; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBottomData)) return lErr; if (lErr = m_memory.GetData(hTemp1, &pTemp1)) return lErr; if (lErr = m_memory.GetData(hTemp2, &pTemp2)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBottomData->Data(); T* temp1 = (T*)pTemp1->Data(); T* temp2 = (T*)pTemp2->Data(); if (lErr = cudnnDivisiveNormalizationForward(cudnn, normdesc, CUDNN_DIVNORM_PRECOMPUTED_MEANS, &fAlpha, btmdatadesc, btmdata, NULL, temp1, temp2, &fBeta, topdatadesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::LCNForwardCC(long hHandle, long hNormDesc, double fAlpha, long hBottomDesc, long hBottomData, long hTemp1, long hTemp2, double fBeta, long hTopDesc, long hTopData); template long Memory<float>::LCNForwardCC(long hHandle, long hNormDesc, float fAlpha, long hBottomDesc, long hBottomData, long hTemp1, long hTemp2, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::LCNBackwardCC(long hHandle, long hNormDesc, T fAlpha, long hBottomDataDesc, long hBottomData, long hTopDiff, long hTemp1, long hTemp2, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnLRNDescriptor_t normdesc = GetLRNDesc(hNormDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; MemoryItem* pTemp1; MemoryItem* pTemp2; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; if (lErr = m_memory.GetData(hTemp1, &pTemp1)) return lErr; if (lErr = m_memory.GetData(hTemp2, &pTemp2)) return lErr; T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); T* temp1 = (T*)pTemp1->Data(); T* temp2 = (T*)pTemp2->Data(); if (lErr = cudnnDivisiveNormalizationBackward(cudnn, normdesc, CUDNN_DIVNORM_PRECOMPUTED_MEANS, &fAlpha, btmdatadesc, btmdata, NULL, topdiff, temp1, temp2, &fBeta, btmdiffdesc, btmdiff, NULL)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::LCNBackwardCC(long hHandle, long hNormDesc, double fAlpha, long hBottomDataDesc, long hBottomData, long hTopDiff, long hTemp1, long hTemp2, double fBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::LCNBackwardCC(long hHandle, long hNormDesc, float fAlpha, long hBottomDataDesc, long hBottomData, long hTopDiff, long hTemp1, long hTemp2, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::TanhForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationTanh); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_TANH, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::TanhForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::TanhForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::TanhBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationTanh); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_TANH, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::TanhBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::TanhBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::EluForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationElu); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_ELU, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::EluForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::EluForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::EluBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationElu); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_ELU, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::EluBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::EluBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::SigmoidForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationSigmoid); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_SIGMOID, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::SigmoidForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::SigmoidForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::SigmoidBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationSigmoid); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_SIGMOID, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::SigmoidBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::SigmoidBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::ReLUForward(long hHandle, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationRelu); if (lErr = cudnnActivationForward(cudnn, desc, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationForward(cudnn, CUDNN_ACTIVATION_RELU, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::ReLUForward(long hHandle, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::ReLUForward(long hHandle, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::ReLUBackward(long hHandle, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t btmdatadesc = GetTensorDesc(hBottomDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = (hBottomDataDesc == hBottomDiffDesc) ? btmdatadesc : GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pBtmData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); #ifdef CUDNN_5 cudnnActivationDescriptor_t desc = GetActivationDesc(m_hGlobalActivationRelu); if (lErr = cudnnActivationBackward(cudnn, desc, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #else if (lErr = cudnnActivationBackward(cudnn, CUDNN_ACTIVATION_RELU, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, btmdatadesc, btmdata, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; #endif return cudaStreamSynchronize(0); } template long Memory<double>::ReLUBackward(long hHandle, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::ReLUBackward(long hHandle, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, long hBottomDataDesc, long hBottomData, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::SoftmaxForward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, T fAlpha, long hBottomDesc, long hBottomData, T fBeta, long hTopDesc, long hTopData) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdesc = GetTensorDesc(hTopDesc); cudnnTensorDescriptor_t btmdesc = GetTensorDesc(hBottomDesc); MemoryItem* pTopData; MemoryItem* pBtmData; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hBottomData, &pBtmData)) return lErr; T* topdata = (T*)pTopData->Data(); T* btmdata = (T*)pBtmData->Data(); if (lErr = cudnnSoftmaxForward(cudnn, (cudnnSoftmaxAlgorithm_t)alg, (cudnnSoftmaxMode_t)mode, &fAlpha, btmdesc, btmdata, &fBeta, topdesc, topdata)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::SoftmaxForward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, double dfAlpha, long hBottomDesc, long hBottomData, double dfBeta, long hTopDesc, long hTopData); template long Memory<float>::SoftmaxForward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, float fAlpha, long hBottomDesc, long hBottomData, float fBeta, long hTopDesc, long hTopData); template <class T> long Memory<T>::SoftmaxBackward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, T fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, T fBeta, long hBottomDiffDesc, long hBottomDiff) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnTensorDescriptor_t topdatadesc = GetTensorDesc(hTopDataDesc); cudnnTensorDescriptor_t topdiffdesc = (hTopDataDesc == hTopDiffDesc) ? topdatadesc : GetTensorDesc(hTopDiffDesc); cudnnTensorDescriptor_t btmdiffdesc = GetTensorDesc(hBottomDiffDesc); MemoryItem* pTopData; MemoryItem* pTopDiff; MemoryItem* pBtmDiff; if (lErr = m_memory.GetData(hTopData, &pTopData)) return lErr; if (lErr = m_memory.GetData(hTopDiff, &pTopDiff)) return lErr; if (lErr = m_memory.GetData(hBottomDiff, &pBtmDiff)) return lErr; T* topdata = (T*)pTopData->Data(); T* topdiff = (T*)pTopDiff->Data(); T* btmdiff = (T*)pBtmDiff->Data(); if (lErr = cudnnSoftmaxBackward(cudnn, (cudnnSoftmaxAlgorithm_t)alg, (cudnnSoftmaxMode_t)mode, &fAlpha, topdatadesc, topdata, topdiffdesc, topdiff, &fBeta, btmdiffdesc, btmdiff)) return lErr | ERROR_CUDNN_OFFSET; return cudaStreamSynchronize(0); } template long Memory<double>::SoftmaxBackward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, double dfAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, double dfBeta, long hBottomDiffDesc, long hBottomDiff); template long Memory<float>::SoftmaxBackward(long hHandle, SoftmaxAlgorithm alg, SoftmaxMode mode, float fAlpha, long hTopDataDesc, long hTopData, long hTopDiffDesc, long hTopDiff, float fBeta, long hBottomDiffDesc, long hBottomDiff); template <class T> long Memory<T>::CreateRnnDataDesc1(long* phHandle) { LONG lErr; rnnDataHandle<T>* desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if ((desc = new rnnDataHandle<T>()) == NULL) return ERROR_MEMORY_OUT; if (lErr = desc->Initialize(this)) return lErr; long hHandle = m_rnnDataDesc1.Allocate(desc); if (hHandle < 0) { desc->CleanUp(); delete desc; return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateRnnDataDesc1(long* phHandle); template long Memory<float>::CreateRnnDataDesc1(long* phHandle); template <class T> long Memory<T>::CreateRnnDataDesc2(long* phHandle) { LONG lErr; cudnnRNNDataDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateRNNDataDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_rnnDataDesc2.Allocate(desc); if (hHandle < 0) { cudnnDestroyRNNDataDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateRnnDataDesc2(long* phHandle); template long Memory<float>::CreateRnnDataDesc2(long* phHandle); template <class T> long Memory<T>::CreateRnnDesc(long* phHandle) { LONG lErr; cudnnRNNDescriptor_t desc = NULL; if (phHandle == NULL) return ERROR_PARAM_NULL; if (lErr = cudnnCreateRNNDescriptor(&desc)) return lErr | ERROR_CUDNN_OFFSET; long hHandle = m_rnnDesc.Allocate(desc); if (hHandle < 0) { cudnnDestroyRNNDescriptor(desc); return ERROR_MEMORY_OUT; } *phHandle = hHandle; return 0; } template long Memory<double>::CreateRnnDesc(long* phHandle); template long Memory<float>::CreateRnnDesc(long* phHandle); template <class T> long Memory<T>::GetRnnParamCount(long hHandle, long hRnnDesc, long hXDesc, int* pnCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; if (descX == NULL) return ERROR_PARAM_NULL; if (pnCount == NULL) return ERROR_PARAM_NULL; size_t sizeInBytes; if (lErr = cudnnGetRNNParamsSize(cudnn, desc, descX->GetFirstTensor(), &sizeInBytes, type)) return lErr; int nCount = (int)(sizeInBytes / sizeof(T)); *pnCount = nCount; return 0; } template long Memory<double>::GetRnnParamCount(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template long Memory<float>::GetRnnParamCount(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template <class T> long Memory<T>::GetRnnParamCountEx(long hHandle, long hRnnDesc, long hXDesc, int* pnCount) { LONG lErr; cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); if (descX == NULL) return ERROR_PARAM_NULL; if (pnCount == NULL) return ERROR_PARAM_NULL; cudnnDataType_t type0; cudnnRNNDataLayout_t layout; int nMaxSeqLen = 0; int nBatchSize = 0; int nVectorSize = 0; T fFill; int* rgSeqLen = (int*)malloc(sizeof(int) * 1); if (rgSeqLen == NULL) return ERROR_MEMORY_OUT; lErr = cudnnGetRNNDataDescriptor(descX, &type0, &layout, &nMaxSeqLen, &nBatchSize, &nVectorSize, 1, rgSeqLen, (void*)&fFill); free(rgSeqLen); if (lErr) return lErr; cudnnTensorDescriptor_t tensorX; if (lErr = cudnnCreateTensorDescriptor(&tensorX)) return lErr; int rgDim[3]; rgDim[0] = (layout == CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED) ? nMaxSeqLen : nBatchSize; rgDim[1] = nVectorSize; rgDim[2] = 1; int rgStride[3]; rgStride[0] = rgDim[2] * rgDim[1]; rgStride[1] = rgDim[2]; rgStride[2] = 1; if (lErr = cudnnSetTensorNdDescriptor(tensorX, type, 3, rgDim, rgStride)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } size_t sizeInBytes; lErr = cudnnGetRNNParamsSize(cudnn, desc, tensorX, &sizeInBytes, type); cudnnDestroyTensorDescriptor(tensorX); if (lErr) return lErr; int nCount = (int)(sizeInBytes / sizeof(T)); *pnCount = nCount; return 0; } template long Memory<double>::GetRnnParamCountEx(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template long Memory<float>::GetRnnParamCountEx(long hHandle, long hRnnDesc, long hXDesc, int* pnCount); template <class T> long Memory<T>::GetRnnWorkspaceCount(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); if (pnWsCount == NULL || pnResCount == NULL) return ERROR_PARAM_NULL; size_t sizeInBytes; if (lErr = cudnnGetRNNWorkspaceSize(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), &sizeInBytes)) return lErr; size_t nWsCount = (size_t)(sizeInBytes / sizeof(T)); if (lErr = cudnnGetRNNTrainingReserveSize(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), &sizeInBytes)) return lErr; size_t nResCount = (size_t)(sizeInBytes / sizeof(T)); *pnWsCount = nWsCount; *pnResCount = nResCount; return 0; } template long Memory<double>::GetRnnWorkspaceCount(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template long Memory<float>::GetRnnWorkspaceCount(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template <class T> long Memory<T>::GetRnnWorkspaceCountEx(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount) { LONG lErr; cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); if (descX == NULL) return ERROR_PARAM_NULL; if (pnWsCount == NULL || pnResCount == NULL) return ERROR_PARAM_NULL; cudnnDataType_t type0; cudnnRNNDataLayout_t layout; int nMaxSeqLen = 0; int nBatchSize = 0; int nVectorSize = 0; T fFill; int* rgSeqLen = (int*)malloc(sizeof(int) * 1); if (rgSeqLen == NULL) return ERROR_MEMORY_OUT; lErr = cudnnGetRNNDataDescriptor(descX, &type0, &layout, &nMaxSeqLen, &nBatchSize, &nVectorSize, 1, rgSeqLen, (void*)&fFill); free(rgSeqLen); if (lErr) return lErr; cudnnTensorDescriptor_t* rgDescX = (cudnnTensorDescriptor_t*)malloc(sizeof(cudnnTensorDescriptor_t) * nMaxSeqLen); if (rgDescX == NULL) return ERROR_OUTOFMEMORY; memset(rgDescX, NULL, sizeof(cudnnTensorDescriptor_t) * nMaxSeqLen); for (int i = 0; i < nMaxSeqLen; i++) { if (lErr = cudnnCreateTensorDescriptor(&rgDescX[i])) break; int rgDim[3]; rgDim[0] = (layout == CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED) ? nMaxSeqLen : nBatchSize; rgDim[1] = nVectorSize; rgDim[2] = 1; int rgStride[3]; rgStride[0] = rgDim[2] * rgDim[1]; rgStride[1] = rgDim[2]; rgStride[2] = 1; if (lErr = cudnnSetTensorNdDescriptor(rgDescX[i], type, 3, rgDim, rgStride)) break; } size_t sizeInBytes; size_t nWsCount = 0; if (!lErr) { lErr = cudnnGetRNNWorkspaceSize(cudnn, desc, nMaxSeqLen, rgDescX, &sizeInBytes); if (!lErr) { nWsCount = (size_t)(sizeInBytes / sizeof(T)) + 1; lErr = cudnnGetRNNTrainingReserveSize(cudnn, desc, nMaxSeqLen, rgDescX, &sizeInBytes); } } for (int i = 0; i < nMaxSeqLen; i++) { if (rgDescX[i] != NULL) cudnnDestroyTensorDescriptor(rgDescX[i]); } free(rgDescX); if (lErr) return lErr; size_t nResCount = (size_t)(sizeInBytes / sizeof(T)) + 1; *pnWsCount = nWsCount; *pnResCount = nResCount; return cudaStreamSynchronize(0); } template long Memory<double>::GetRnnWorkspaceCountEx(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template long Memory<float>::GetRnnWorkspaceCountEx(long hHandle, long hRnnDesc, long hXDesc, size_t* pnWsCount, size_t* pnResCount); template <typename T> __global__ void init_data_kernel(const int n, const T alpha, T* y) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) y[tid] = alpha; } template <class T> void init_data(T* pData, int nCount, T val) { int nBlock = 1024; int nGrid = (nCount + nBlock - 1) / nBlock; init_data_kernel<<<nGrid, nBlock>>>(nCount, val, pData); } template <class T> long Memory<T>::GetRnnLinLayerParams(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pWtData; if (descX == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (pnWtCount == NULL || phWt == NULL || pnBiasCount == NULL || phBias == NULL) return ERROR_PARAM_NULL; // Get the Weight Counts cudnnFilterDescriptor_t filterWts; if (lErr = cudnnCreateFilterDescriptor(&filterWts)) return lErr; void* pWtDevMem; if (lErr = cudnnGetRNNLinLayerMatrixParams(cudnn, desc, nLayer, descX->GetFirstTensor(), descWt, pWtData->Data(), nLinLayer, filterWts, &pWtDevMem)) { cudnnDestroyFilterDescriptor(filterWts); return lErr; } cudnnDataType_t type; cudnnTensorFormat_t fmt; int nbDims; int rgDimA[3]; if (lErr = cudnnGetFilterNdDescriptor(filterWts, 3, &type, &fmt, &nbDims, rgDimA)) { cudnnDestroyFilterDescriptor(filterWts); return lErr; } int nWtCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; T fVal = T(1.0 / nWtCount); init_data((T*)pWtDevMem, nWtCount, fVal); cudnnDestroyFilterDescriptor(filterWts); // Get the Bias Counts cudnnFilterDescriptor_t filterBias; if (lErr = cudnnCreateFilterDescriptor(&filterBias)) return lErr; void* pBiasDevMem; if (lErr = cudnnGetRNNLinLayerBiasParams(cudnn, desc, nLayer, descX->GetFirstTensor(), descWt, pWtData->Data(), nLinLayer, filterBias, &pBiasDevMem)) { cudnnDestroyFilterDescriptor(filterBias); return lErr; } if (lErr = cudnnGetFilterNdDescriptor(filterBias, 3, &type, &fmt, &nbDims, rgDimA)) { cudnnDestroyFilterDescriptor(filterBias); return lErr; } int nBiasCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; init_data((T*)pBiasDevMem, nBiasCount, T(1.0)); cudnnDestroyFilterDescriptor(filterBias); // Create the memory pointer handles. long hWtMemPtr; long long lWtSize = nWtCount * sizeof(T); if (lWtSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pWtDevMem, (size_t)lWtSize, &hWtMemPtr)) return lErr; long hBiasMemPtr; long long lBiasSize = nBiasCount * sizeof(T); if (lBiasSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pBiasDevMem, (size_t)lBiasSize, &hBiasMemPtr)) return lErr; *pnWtCount = nWtCount; *phWt = hWtMemPtr; *pnBiasCount = nBiasCount; *phBias = hBiasMemPtr; return cudaStreamSynchronize(0); } template long Memory<double>::GetRnnLinLayerParams(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template long Memory<float>::GetRnnLinLayerParams(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template <class T> long Memory<T>::GetRnnLinLayerParamsEx(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias) { LONG lErr; cudnnDataType_t type = (sizeof(T) == 4) ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pWtData; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (descX == NULL) return ERROR_PARAM_NULL; if (pnWtCount == NULL || phWt == NULL || pnBiasCount == NULL || phBias == NULL) return ERROR_PARAM_NULL; cudnnDataType_t type0; cudnnRNNDataLayout_t layout; int nMaxSeqLen = 0; int nBatchSize = 0; int nVectorSize = 0; T fFill; int* rgSeqLen = (int*)malloc(sizeof(int) * 1); if (rgSeqLen == NULL) return ERROR_MEMORY_OUT; lErr = cudnnGetRNNDataDescriptor(descX, &type0, &layout, &nMaxSeqLen, &nBatchSize, &nVectorSize, 1, rgSeqLen, (void*)&fFill); free(rgSeqLen); if (lErr) return lErr; cudnnTensorDescriptor_t tensorX; if (lErr = cudnnCreateTensorDescriptor(&tensorX)) return lErr; int rgDim[3]; rgDim[0] = (layout == CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED) ? nMaxSeqLen : nBatchSize; rgDim[1] = nVectorSize; rgDim[2] = 1; int rgStride[3]; rgStride[0] = rgDim[2] * rgDim[1]; rgStride[1] = rgDim[2]; rgStride[2] = 1; if (lErr = cudnnSetTensorNdDescriptor(tensorX, type, 3, rgDim, rgStride)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } // Get the Weight Counts cudnnFilterDescriptor_t filterWts; if (lErr = cudnnCreateFilterDescriptor(&filterWts)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } void* pWtDevMem; if (lErr = cudnnGetRNNLinLayerMatrixParams(cudnn, desc, nLayer, tensorX, descWt, pWtData->Data(), nLinLayer, filterWts, &pWtDevMem)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterWts); return lErr; } cudnnTensorFormat_t fmt; int nbDims; int rgDimA[3]; if (lErr = cudnnGetFilterNdDescriptor(filterWts, 3, &type0, &fmt, &nbDims, rgDimA)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterWts); return lErr; } int nWtCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; T fVal = T(1.0 / nWtCount); init_data((T*)pWtDevMem, nWtCount, fVal); cudnnDestroyFilterDescriptor(filterWts); // Get the Bias Counts cudnnFilterDescriptor_t filterBias; if (lErr = cudnnCreateFilterDescriptor(&filterBias)) { cudnnDestroyTensorDescriptor(tensorX); return lErr; } void* pBiasDevMem; if (lErr = cudnnGetRNNLinLayerBiasParams(cudnn, desc, nLayer, tensorX, descWt, pWtData->Data(), nLinLayer, filterBias, &pBiasDevMem)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterBias); return lErr; } if (lErr = cudnnGetFilterNdDescriptor(filterBias, 3, &type, &fmt, &nbDims, rgDimA)) { cudnnDestroyTensorDescriptor(tensorX); cudnnDestroyFilterDescriptor(filterBias); return lErr; } int nBiasCount = rgDimA[0] * rgDimA[1] * rgDimA[2]; init_data((T*)pBiasDevMem, nBiasCount, T(1.0)); cudnnDestroyFilterDescriptor(filterBias); cudnnDestroyTensorDescriptor(tensorX); // Create the memory pointer handles. long hWtMemPtr; long long lWtSize = nWtCount * sizeof(T); if (lWtSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pWtDevMem, (size_t)lWtSize, &hWtMemPtr)) return lErr; long hBiasMemPtr; long long lBiasSize = nBiasCount * sizeof(T); if (lBiasSize > SIZE_MAX) return ERROR_MEMORY_RANGE_EXCEEDED; if (lErr = CreateMemoryPointer(pWtData->DeviceID(), pWtData->IsHalf(), (T*)pBiasDevMem, (size_t)lBiasSize, &hBiasMemPtr)) return lErr; *pnWtCount = nWtCount; *phWt = hWtMemPtr; *pnBiasCount = nBiasCount; *phBias = hBiasMemPtr; return cudaStreamSynchronize(0); } template long Memory<double>::GetRnnLinLayerParamsEx(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template long Memory<float>::GetRnnLinLayerParamsEx(long hHandle, long hRnnDesc, int nLayer, long hXDesc, long hWtDesc, long hWtData, int nLinLayer, int* pnWtCount, long* phWt, int* pnBiasCount, long* phBias); template <class T> long Memory<T>::RnnForward(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount, bool bTraining) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); rnnDataHandle<T>* descY = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pWtData; MemoryItem* pYData; MemoryItem* pHyData; MemoryItem* pCyData; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (descX == NULL || descY == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hHyData, &pHyData)) return lErr; if (lErr = m_memory.GetData(hCyData, &pCyData)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (bTraining) { if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; } if (!bTraining) { lErr = cudnnRNNForwardInference(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY->SeqTensors(), pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size()); } else { lErr = cudnnRNNForwardTraining(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY->SeqTensors(), pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); } return cudaStreamSynchronize(0); } template long Memory<double>::RnnForward(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template long Memory<float>::RnnForward(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template <class T> long Memory<T>::RnnForwardEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount, bool bTraining) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnRNNDataDescriptor_t descY = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pWtData; MemoryItem* pYData; MemoryItem* pHyData; MemoryItem* pCyData; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hHyData, &pHyData)) return lErr; if (lErr = m_memory.GetData(hCyData, &pCyData)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (bTraining) { if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; } if (!bTraining) { lErr = cudnnRNNForwardInferenceEx(cudnn, desc, descX, pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY, pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, pWorkspaceData->Data(), pWorkspaceData->Size()); } else { lErr = cudnnRNNForwardTrainingEx(cudnn, desc, descX, pXData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descWt, pWtData->Data(), descY, pYData->Data(), descHy, pHyData->Data(), descCy, pCyData->Data(), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); } return cudaStreamSynchronize(0); } template long Memory<double>::RnnForwardEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template long Memory<float>::RnnForwardEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hWtDesc, long hWtData, long hYDesc, long hYData, long hHyDesc, long hHyData, long hCyDesc, long hCyData, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount, bool bTraining); template <class T> long Memory<T>::RnnBackwardData(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); rnnDataHandle<T>* descY = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hYDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnTensorDescriptor_t descHxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdHxDesc); cudnnTensorDescriptor_t descCxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdCxDesc); MemoryItem* pYData; MemoryItem* pYDiff; MemoryItem* pHyDiff = NULL; MemoryItem* pCyDiff = NULL; MemoryItem* pWtData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pXDiff; MemoryItem* pHxDiff; MemoryItem* pCxDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (descX == NULL || descY == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hYDiff, &pYDiff)) return lErr; if (hHyDiff != 0) { if (lErr = m_memory.GetData(hHyDiff, &pHyDiff)) return lErr; } if (hCyDiff != 0) { if (lErr = m_memory.GetData(hCyDiff, &pCyDiff)) return lErr; } if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hXDiff, &pXDiff)) return lErr; if (lErr = m_memory.GetData(hHxDiff, &pHxDiff)) return lErr; if (lErr = m_memory.GetData(hCxDiff, &pCxDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardData(cudnn, desc, descY->MaxSeqLen(), descY->SeqTensors(), pYData->Data(), descY->SeqTensors(), pYDiff->Data(), descHy, (pHyDiff == NULL) ? NULL : pHyDiff->Data(), descCy, (pCyDiff == NULL) ? NULL : pCyDiff->Data(), descWt, pWtData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descX->SeqTensors(), pXDiff->Data(), descHxd, pHxDiff->Data(), descCxd, pCxDiff->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); return cudaStreamSynchronize(0); } template long Memory<double>::RnnBackwardData(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardData(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template <class T> long Memory<T>::RnnBackwardDataEx(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspaceData, size_t nWsCount, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnRNNDataDescriptor_t descY = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hYDesc); cudnnTensorDescriptor_t descHy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHyDesc); cudnnTensorDescriptor_t descCy = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCyDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnTensorDescriptor_t descCx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hCxDesc); cudnnTensorDescriptor_t descHxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdHxDesc); cudnnTensorDescriptor_t descCxd = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hdCxDesc); MemoryItem* pYData; MemoryItem* pYDiff; MemoryItem* pHyDiff; MemoryItem* pCyDiff; MemoryItem* pWtData; MemoryItem* pHxData; MemoryItem* pCxData; MemoryItem* pXDiff; MemoryItem* pHxDiff; MemoryItem* pCxDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hYDiff, &pYDiff)) return lErr; if (lErr = m_memory.GetData(hHyDiff, &pHyDiff)) return lErr; if (lErr = m_memory.GetData(hCyDiff, &pCyDiff)) return lErr; if (lErr = m_memory.GetData(hWtData, &pWtData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hCxData, &pCxData)) return lErr; if (lErr = m_memory.GetData(hXDiff, &pXDiff)) return lErr; if (lErr = m_memory.GetData(hHxDiff, &pHxDiff)) return lErr; if (lErr = m_memory.GetData(hCxDiff, &pCxDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardDataEx(cudnn, desc, descY, pYData->Data(), descY, pYDiff->Data(), NULL, NULL, descHy, pHyDiff->Data(), descCy, pCyDiff->Data(), descWt, pWtData->Data(), descHx, pHxData->Data(), descCx, pCxData->Data(), descX, pXDiff->Data(), descHxd, pHxDiff->Data(), descCxd, pCxDiff->Data(), NULL, NULL, pWorkspaceData->Data(), pWorkspaceData->Size(), pReservedData->Data(), pReservedData->Size()); return cudaStreamSynchronize(0); } template long Memory<double>::RnnBackwardDataEx(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardDataEx(long hHandle, long hRnnDesc, long hYDesc, long hYData, long hYDiff, long hHyDesc, long hHyDiff, long hCyDesc, long hCyDiff, long hWtDesc, long hWtData, long hHxDesc, long hHxData, long hCxDesc, long hCxData, long hXDesc, long hXDiff, long hdHxDesc, long hHxDiff, long hdCxDesc, long hCxDiff, long hWorkspace, size_t nWsCount, long hReserved, size_t nResCount); template <class T> long Memory<T>::RnnBackwardWeights(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspaceData, size_t nWsCount, long hWtDesc, long hWtDiff, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); rnnDataHandle<T>* descX = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hXDesc); rnnDataHandle<T>* descY = (rnnDataHandle<T>*)m_rnnDataDesc1.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pYData; MemoryItem* pWtDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (descX == NULL || descY == NULL) return ERROR_PARAM_OUT_OF_RANGE; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hWtDiff, &pWtDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardWeights(cudnn, desc, descX->MaxSeqLen(), descX->SeqTensors(), pXData->Data(), descHx, pHxData->Data(), descY->SeqTensors(), pYData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), descWt, pWtDiff->Data(), pReservedData->Data(), pReservedData->Size()); return cudaStreamSynchronize(0); } template long Memory<double>::RnnBackwardWeights(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardWeights(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); template <class T> long Memory<T>::RnnBackwardWeightsEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspaceData, size_t nWsCount, long hWtDesc, long hWtDiff, long hReservedData, size_t nResCount) { LONG lErr; cudnnHandle_t cudnn = GetCuDNN(hHandle); cudnnRNNDescriptor_t desc = (cudnnRNNDescriptor_t)m_rnnDesc.GetData(hRnnDesc); cudnnRNNDataDescriptor_t descX = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hXDesc); cudnnRNNDataDescriptor_t descY = (cudnnRNNDataDescriptor_t)m_rnnDataDesc2.GetData(hYDesc); cudnnTensorDescriptor_t descHx = (cudnnTensorDescriptor_t)m_tensorDesc.GetData(hHxDesc); cudnnFilterDescriptor_t descWt = (cudnnFilterDescriptor_t)m_filterDesc.GetData(hWtDesc); MemoryItem* pXData; MemoryItem* pHxData; MemoryItem* pYData; MemoryItem* pWtDiff; MemoryItem* pWorkspaceData; MemoryItem* pReservedData; if (lErr = m_memory.GetData(hXData, &pXData)) return lErr; if (lErr = m_memory.GetData(hHxData, &pHxData)) return lErr; if (lErr = m_memory.GetData(hYData, &pYData)) return lErr; if (lErr = m_memory.GetData(hWtDiff, &pWtDiff)) return lErr; if (lErr = m_memory.GetData(hWorkspaceData, &pWorkspaceData)) return lErr; if (lErr = m_memory.GetData(hReservedData, &pReservedData)) return lErr; lErr = cudnnRNNBackwardWeightsEx(cudnn, desc, descX, pXData->Data(), descHx, pHxData->Data(), descY, pYData->Data(), pWorkspaceData->Data(), pWorkspaceData->Size(), descWt, pWtDiff->Data(), pReservedData->Data(), pReservedData->Size()); return cudaStreamSynchronize(0); } template long Memory<double>::RnnBackwardWeightsEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); template long Memory<float>::RnnBackwardWeightsEx(long hHandle, long hRnnDesc, long hXDesc, long hXData, long hHxDesc, long hHxData, long hYDesc, long hYData, long hWorkspace, size_t nWsCount, long hWtDesc, long hWtDiff, long hReserved, size_t nResCount); //end memory.cu
7a08e54de365b137b0ec38ef24ac8b2d65f61411.hip
// !!! This is a file automatically generated by hipify!!! ///////// #include <iostream> #include <algorithm> #include "timer.h" ///////// #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "blelloch_scan.h" #define NBITS 4 #define MASK 0xF // Returns the idx-th group of bits from 'num' __device__ static inline unsigned int get_group(unsigned int num, int whichGroup){ return (num >> (whichGroup * NBITS)) & MASK; } // Number of threads allocated must be higher than vecSize __global__ static void make_predicate(const unsigned int * const d_vec, unsigned int * const d_out, int vecSize, int bitGroup, unsigned int groupVal){ const int myIdx = blockDim.x * blockIdx.x + threadIdx.x; if(myIdx < vecSize) d_out[myIdx] = get_group(d_vec[myIdx], bitGroup) == groupVal; } // Number of threads allocated must be higher than vecSize __global__ static void scatter_elements(const unsigned int * const d_vec, const unsigned int * const d_xscan, unsigned int * const d_out, unsigned int * d_baseIndex, int vecSize) { const int myIdx = blockDim.x * blockIdx.x + threadIdx.x; if(myIdx >= vecSize - 1) return; // Last element is a special case if(d_xscan[myIdx] != d_xscan[myIdx + 1]) d_out[d_baseIndex[0] + d_xscan[myIdx]] = d_vec[myIdx]; } // 1 thread only __global__ static void scatter_last( // Will scatter last element if needed, and update d_baseIndex as needed const unsigned int * const d_lastElem, // Last element of original vector unsigned int * const d_out, // output vector unsigned int * const d_lastScan, // Last element of the xscan vector unsigned int * const d_baseIndex, // baseIndex int bitGroup, // bitGroup being analyzed (last 4 bits, second last etc...) unsigned int groupVal) { // value being analyzed in the the bit group (0, 1, 2...) const int lastElem = *d_lastElem; const int mustScatter = get_group(lastElem, bitGroup) == groupVal; const int outIndex = *d_baseIndex + *d_lastScan; if(mustScatter) d_out[outIndex] = lastElem; *d_baseIndex = outIndex + mustScatter; // If last element is of given group, we must add 1 } void print_device(unsigned int *d_vec, int vecSize){ int *vec = (int *) malloc(vecSize * sizeof(int)); hipMemcpy(vec, d_vec, sizeof(int) * vecSize, hipMemcpyDeviceToHost); for(int i = 0; i < vecSize; i++) std::cout << vec[i] << " "; std::cout << "\n"; free(vec); } // Assumes vector's size is a power of 2 void d_radix_sort(unsigned int **d_vec_p, int vecSize){ const int nGroups = (sizeof(int) * 8) / NBITS; const int maxGroupVal = 1 << NBITS; unsigned int *d_vec = *d_vec_p; unsigned int *d_out, *d_baseIndex; hipMalloc(&d_out, sizeof(int) * vecSize); hipMalloc(&d_baseIndex, sizeof(int)); hipStream_t streams[4]; unsigned int *d_pred[4]; for(int i = 0; i < 4; i++){ hipStreamCreate(streams + i); hipMalloc(d_pred + i, sizeof(int) * vecSize); } for(int group = 0; group < nGroups; group++){ const int nThreads = 256; const int nBlocks = ::ceil(vecSize / (double) nThreads); // Reset base index hipMemset(d_baseIndex, 0, sizeof(int)); hipDeviceSynchronize(); for(int val = 0; val < maxGroupVal; val += 4){ // Get predicates // Scan predicates for(int i = 0; i < 4; i++){ if(val + i < maxGroupVal){ hipLaunchKernelGGL(( make_predicate), dim3(nBlocks), dim3(nThreads), 0, streams[i], d_vec, d_pred[i], vecSize, group, val+i); } } for(int i = 0; i < 4; i++){ if(val + i < maxGroupVal){ xscan(d_pred[i], vecSize, streams[i]); } } // Sequentially Scatter and scatter last element for(int i = 0; i < 4; i++){ if(val + i < maxGroupVal){ hipStreamSynchronize(streams[i]); hipLaunchKernelGGL(( scatter_elements), dim3(nBlocks), dim3(nThreads), 0, 0, d_vec, d_pred[i], d_out, d_baseIndex, vecSize); hipLaunchKernelGGL(( scatter_last), dim3(1),dim3(1), 0, 0, d_vec + vecSize - 1, d_out, d_pred[i] + vecSize - 1, d_baseIndex, group, val+i); } } } std::swap(d_vec, d_out); } for(int i = 0; i < 4; i++){ hipStreamDestroy(streams[i]); hipFree(d_pred[i]); } hipFree(d_out); hipFree(d_baseIndex); *d_vec_p = d_vec; } // Assumes vector's size is a power of 2 void d_radix_sort_old(unsigned int **d_vec_p, int vecSize){ const int nGroups = (sizeof(int) * 8) / NBITS; const int maxGroupVal = 1 << NBITS; unsigned int *d_vec = *d_vec_p; unsigned int *d_pred, *d_out, *d_baseIndex; hipMalloc(&d_pred, sizeof(int) * vecSize); hipMalloc(&d_out, sizeof(int) * vecSize); hipMalloc(&d_baseIndex, sizeof(int)); for(int group = 0; group < nGroups; group++){ const int nThreads = 256; const int nBlocks = ::ceil(vecSize / (double) nThreads); // Reset base index hipMemset(d_baseIndex, 0, sizeof(int)); for(int val = 0; val < maxGroupVal; val++){ // Get predicates // Scan predicates hipLaunchKernelGGL(( make_predicate), dim3(nBlocks), dim3(nThreads), 0, 0, d_vec, d_pred, vecSize, group, val); xscan(d_pred, vecSize, (hipStream_t) 0); // Scatter and scatter last element hipLaunchKernelGGL(( scatter_elements), dim3(nBlocks), dim3(nThreads), 0, 0, d_vec, d_pred, d_out, d_baseIndex, vecSize); hipLaunchKernelGGL(( scatter_last), dim3(1),dim3(1), 0, 0, d_vec + vecSize - 1, d_out, d_pred + vecSize - 1, d_baseIndex, group, val); } std::swap(d_vec, d_out); } hipFree(d_pred); hipFree(d_out); hipFree(d_baseIndex); *d_vec_p = d_vec; } // 1 << 15 size // Version 1: ~56.9 msecs int main(int argc, char *argv[]){ using std::cout; using std::sort; const int size = 1 << 15; unsigned int *h_vec = (unsigned int *) malloc(sizeof(int) * size); unsigned int *h_out = (unsigned int *) malloc(sizeof(int) * size); for(int i = 0; i < size; i++) h_vec[i] = i % 16; unsigned int *d_vec; hipMalloc(&d_vec, sizeof(int) * size); hipMemcpy(d_vec, h_vec, sizeof(int) * size, hipMemcpyHostToDevice); GpuTimer timer; /* NEW APPROACH */ timer.Start(); d_radix_sort(&d_vec, size); timer.Stop(); cout << "Elapsed (new): " << timer.Elapsed() << " msecs\n"; /* OLD APPROACH */ hipMemcpy(d_vec, h_vec, sizeof(int) * size, hipMemcpyHostToDevice); timer.Start(); d_radix_sort_old(&d_vec, size); timer.Stop(); cout << "Elapsed (old): " << timer.Elapsed() << " msecs\n"; hipMemcpy(h_out, d_vec, sizeof(int) * size, hipMemcpyDeviceToHost); sort(h_vec, h_vec + size); for(int i = 0; i < size; i++){ if(h_vec[i] != h_out[i]){ cout << "FAILED AT " << i << "!"; break; } if(i == size - 1) cout << "YESSSS!\n"; } free(h_vec); free(h_out); hipFree(d_vec); return 0; }
7a08e54de365b137b0ec38ef24ac8b2d65f61411.cu
///////// #include <iostream> #include <algorithm> #include "timer.h" ///////// #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include "blelloch_scan.h" #define NBITS 4 #define MASK 0xF // Returns the idx-th group of bits from 'num' __device__ static inline unsigned int get_group(unsigned int num, int whichGroup){ return (num >> (whichGroup * NBITS)) & MASK; } // Number of threads allocated must be higher than vecSize __global__ static void make_predicate(const unsigned int * const d_vec, unsigned int * const d_out, int vecSize, int bitGroup, unsigned int groupVal){ const int myIdx = blockDim.x * blockIdx.x + threadIdx.x; if(myIdx < vecSize) d_out[myIdx] = get_group(d_vec[myIdx], bitGroup) == groupVal; } // Number of threads allocated must be higher than vecSize __global__ static void scatter_elements(const unsigned int * const d_vec, const unsigned int * const d_xscan, unsigned int * const d_out, unsigned int * d_baseIndex, int vecSize) { const int myIdx = blockDim.x * blockIdx.x + threadIdx.x; if(myIdx >= vecSize - 1) return; // Last element is a special case if(d_xscan[myIdx] != d_xscan[myIdx + 1]) d_out[d_baseIndex[0] + d_xscan[myIdx]] = d_vec[myIdx]; } // 1 thread only __global__ static void scatter_last( // Will scatter last element if needed, and update d_baseIndex as needed const unsigned int * const d_lastElem, // Last element of original vector unsigned int * const d_out, // output vector unsigned int * const d_lastScan, // Last element of the xscan vector unsigned int * const d_baseIndex, // baseIndex int bitGroup, // bitGroup being analyzed (last 4 bits, second last etc...) unsigned int groupVal) { // value being analyzed in the the bit group (0, 1, 2...) const int lastElem = *d_lastElem; const int mustScatter = get_group(lastElem, bitGroup) == groupVal; const int outIndex = *d_baseIndex + *d_lastScan; if(mustScatter) d_out[outIndex] = lastElem; *d_baseIndex = outIndex + mustScatter; // If last element is of given group, we must add 1 } void print_device(unsigned int *d_vec, int vecSize){ int *vec = (int *) malloc(vecSize * sizeof(int)); cudaMemcpy(vec, d_vec, sizeof(int) * vecSize, cudaMemcpyDeviceToHost); for(int i = 0; i < vecSize; i++) std::cout << vec[i] << " "; std::cout << "\n"; free(vec); } // Assumes vector's size is a power of 2 void d_radix_sort(unsigned int **d_vec_p, int vecSize){ const int nGroups = (sizeof(int) * 8) / NBITS; const int maxGroupVal = 1 << NBITS; unsigned int *d_vec = *d_vec_p; unsigned int *d_out, *d_baseIndex; cudaMalloc(&d_out, sizeof(int) * vecSize); cudaMalloc(&d_baseIndex, sizeof(int)); cudaStream_t streams[4]; unsigned int *d_pred[4]; for(int i = 0; i < 4; i++){ cudaStreamCreate(streams + i); cudaMalloc(d_pred + i, sizeof(int) * vecSize); } for(int group = 0; group < nGroups; group++){ const int nThreads = 256; const int nBlocks = std::ceil(vecSize / (double) nThreads); // Reset base index cudaMemset(d_baseIndex, 0, sizeof(int)); cudaDeviceSynchronize(); for(int val = 0; val < maxGroupVal; val += 4){ // Get predicates // Scan predicates for(int i = 0; i < 4; i++){ if(val + i < maxGroupVal){ make_predicate<<<nBlocks, nThreads, 0, streams[i]>>>(d_vec, d_pred[i], vecSize, group, val+i); } } for(int i = 0; i < 4; i++){ if(val + i < maxGroupVal){ xscan(d_pred[i], vecSize, streams[i]); } } // Sequentially Scatter and scatter last element for(int i = 0; i < 4; i++){ if(val + i < maxGroupVal){ cudaStreamSynchronize(streams[i]); scatter_elements<<<nBlocks, nThreads>>>(d_vec, d_pred[i], d_out, d_baseIndex, vecSize); scatter_last<<<1,1>>>(d_vec + vecSize - 1, d_out, d_pred[i] + vecSize - 1, d_baseIndex, group, val+i); } } } std::swap(d_vec, d_out); } for(int i = 0; i < 4; i++){ cudaStreamDestroy(streams[i]); cudaFree(d_pred[i]); } cudaFree(d_out); cudaFree(d_baseIndex); *d_vec_p = d_vec; } // Assumes vector's size is a power of 2 void d_radix_sort_old(unsigned int **d_vec_p, int vecSize){ const int nGroups = (sizeof(int) * 8) / NBITS; const int maxGroupVal = 1 << NBITS; unsigned int *d_vec = *d_vec_p; unsigned int *d_pred, *d_out, *d_baseIndex; cudaMalloc(&d_pred, sizeof(int) * vecSize); cudaMalloc(&d_out, sizeof(int) * vecSize); cudaMalloc(&d_baseIndex, sizeof(int)); for(int group = 0; group < nGroups; group++){ const int nThreads = 256; const int nBlocks = std::ceil(vecSize / (double) nThreads); // Reset base index cudaMemset(d_baseIndex, 0, sizeof(int)); for(int val = 0; val < maxGroupVal; val++){ // Get predicates // Scan predicates make_predicate<<<nBlocks, nThreads>>>(d_vec, d_pred, vecSize, group, val); xscan(d_pred, vecSize, (cudaStream_t) 0); // Scatter and scatter last element scatter_elements<<<nBlocks, nThreads>>>(d_vec, d_pred, d_out, d_baseIndex, vecSize); scatter_last<<<1,1>>>(d_vec + vecSize - 1, d_out, d_pred + vecSize - 1, d_baseIndex, group, val); } std::swap(d_vec, d_out); } cudaFree(d_pred); cudaFree(d_out); cudaFree(d_baseIndex); *d_vec_p = d_vec; } // 1 << 15 size // Version 1: ~56.9 msecs int main(int argc, char *argv[]){ using std::cout; using std::sort; const int size = 1 << 15; unsigned int *h_vec = (unsigned int *) malloc(sizeof(int) * size); unsigned int *h_out = (unsigned int *) malloc(sizeof(int) * size); for(int i = 0; i < size; i++) h_vec[i] = i % 16; unsigned int *d_vec; cudaMalloc(&d_vec, sizeof(int) * size); cudaMemcpy(d_vec, h_vec, sizeof(int) * size, cudaMemcpyHostToDevice); GpuTimer timer; /* NEW APPROACH */ timer.Start(); d_radix_sort(&d_vec, size); timer.Stop(); cout << "Elapsed (new): " << timer.Elapsed() << " msecs\n"; /* OLD APPROACH */ cudaMemcpy(d_vec, h_vec, sizeof(int) * size, cudaMemcpyHostToDevice); timer.Start(); d_radix_sort_old(&d_vec, size); timer.Stop(); cout << "Elapsed (old): " << timer.Elapsed() << " msecs\n"; cudaMemcpy(h_out, d_vec, sizeof(int) * size, cudaMemcpyDeviceToHost); sort(h_vec, h_vec + size); for(int i = 0; i < size; i++){ if(h_vec[i] != h_out[i]){ cout << "FAILED AT " << i << "!"; break; } if(i == size - 1) cout << "YESSSS!\n"; } free(h_vec); free(h_out); cudaFree(d_vec); return 0; }
f6acb2ac5401f81898a16031e6809e3bc1d411e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ReluBackKernel(float* Z, float* dZ, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size){ if(Z[id] <= 0) dZ[id] = 0; } }
f6acb2ac5401f81898a16031e6809e3bc1d411e6.cu
#include "includes.h" __global__ void ReluBackKernel(float* Z, float* dZ, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size){ if(Z[id] <= 0) dZ[id] = 0; } }
8e536967e54b37c247d5404ea05ec6ddada73d6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_functions.h" #include "helper_cuda.h" #include <ctime> #include <time.h> #include <stdio.h> #include <iostream> #include <math.h> #include <hipfft.h> #include <fstream> using namespace std; typedef float2 Complex; //Found at http://techqa.info/programming/question/36889333/cuda-cufft-2d-example __global__ void ComplexMUL(Complex *a, Complex *b) { int i = threadIdx.x + blockIdx.x * blockDim.x ; a[i].x = a[i].x * b[i].x - a[i].y*b[i].y; a[i].y = a[i].x * b[i].y + a[i].y*b[i].x; } int main() { int N = 5; int SIZE = N*N; Complex *fg = new Complex[SIZE]; for (int i = 0; i < SIZE; i++){ fg[i].x = 1; fg[i].y = 0; } Complex *fig = new Complex[SIZE]; for (int i = 0; i < SIZE; i++){ fig[i].x = 1; // fig[i].y = 0; } for (int i = 0; i < N * N; i = i + N) { for (int j=0; j < N; j++){ cout << fg[i+j].x << " "; } cout << endl; } cout << "----------------" << endl; for (int i = 0; i < N * N; i = i + N) { for (int j=0; j < N; j++){ cout << fig[i+j].x << " "; } cout << endl; } cout << "----------------" << endl; int mem_size = sizeof(Complex)* SIZE; hipfftComplex *d_signal; checkCudaErrors(hipMalloc((void **) &d_signal, mem_size)); checkCudaErrors(hipMemcpy(d_signal, fg, mem_size, hipMemcpyHostToDevice)); hipfftComplex *d_filter_kernel; checkCudaErrors(hipMalloc((void **)&d_filter_kernel, mem_size)); checkCudaErrors(hipMemcpy(d_filter_kernel, fig, mem_size, hipMemcpyHostToDevice)); // cout << d_signal[1].x << endl; // CUFFT plan hipfftHandle plan; hipfftPlan2d(&plan, N, N, HIPFFT_C2C); // Transform signal and filter printf("Transforming signal hipfftExecR2C\n"); hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD); hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD); printf("Launching Complex multiplication<<< >>>\n"); ComplexMUL <<< N, N >> >(d_signal, d_filter_kernel); // Transform signal back printf("Transforming signal back hipfftExecC2C\n"); hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD); Complex *result = new Complex[SIZE]; hipMemcpy(result, d_signal, sizeof(Complex)*SIZE, hipMemcpyDeviceToHost); for (int i = 0; i < SIZE; i = i + N) { for (int j=0; j < N; j++){ cout << result[i+j].x << " "; } cout << endl; } delete result, fg, fig; hipfftDestroy(plan); //hipfftDestroy(plan2); hipFree(d_signal); hipFree(d_filter_kernel); }
8e536967e54b37c247d5404ea05ec6ddada73d6c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_functions.h" #include "helper_cuda.h" #include <ctime> #include <time.h> #include <stdio.h> #include <iostream> #include <math.h> #include <cufft.h> #include <fstream> using namespace std; typedef float2 Complex; //Found at http://techqa.info/programming/question/36889333/cuda-cufft-2d-example __global__ void ComplexMUL(Complex *a, Complex *b) { int i = threadIdx.x + blockIdx.x * blockDim.x ; a[i].x = a[i].x * b[i].x - a[i].y*b[i].y; a[i].y = a[i].x * b[i].y + a[i].y*b[i].x; } int main() { int N = 5; int SIZE = N*N; Complex *fg = new Complex[SIZE]; for (int i = 0; i < SIZE; i++){ fg[i].x = 1; fg[i].y = 0; } Complex *fig = new Complex[SIZE]; for (int i = 0; i < SIZE; i++){ fig[i].x = 1; // fig[i].y = 0; } for (int i = 0; i < N * N; i = i + N) { for (int j=0; j < N; j++){ cout << fg[i+j].x << " "; } cout << endl; } cout << "----------------" << endl; for (int i = 0; i < N * N; i = i + N) { for (int j=0; j < N; j++){ cout << fig[i+j].x << " "; } cout << endl; } cout << "----------------" << endl; int mem_size = sizeof(Complex)* SIZE; cufftComplex *d_signal; checkCudaErrors(cudaMalloc((void **) &d_signal, mem_size)); checkCudaErrors(cudaMemcpy(d_signal, fg, mem_size, cudaMemcpyHostToDevice)); cufftComplex *d_filter_kernel; checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size)); checkCudaErrors(cudaMemcpy(d_filter_kernel, fig, mem_size, cudaMemcpyHostToDevice)); // cout << d_signal[1].x << endl; // CUFFT plan cufftHandle plan; cufftPlan2d(&plan, N, N, CUFFT_C2C); // Transform signal and filter printf("Transforming signal cufftExecR2C\n"); cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD); cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD); printf("Launching Complex multiplication<<< >>>\n"); ComplexMUL <<< N, N >> >(d_signal, d_filter_kernel); // Transform signal back printf("Transforming signal back cufftExecC2C\n"); cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE); Complex *result = new Complex[SIZE]; cudaMemcpy(result, d_signal, sizeof(Complex)*SIZE, cudaMemcpyDeviceToHost); for (int i = 0; i < SIZE; i = i + N) { for (int j=0; j < N; j++){ cout << result[i+j].x << " "; } cout << endl; } delete result, fg, fig; cufftDestroy(plan); //cufftDestroy(plan2); cudaFree(d_signal); cudaFree(d_filter_kernel); }
5de3694172d00abcce4101e25d49d84719d4347f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "libraries/criterion/cuda/ForceAlignmentCriterion.cuh" #include <algorithm> #include <cmath> #include "libraries/common/CudaUtils.cuh" #include "libraries/common/Workspace.h" #include "libraries/criterion/cuda/CriterionUtils.cuh" namespace { template <class Float> struct WorkspacePtrs { explicit WorkspacePtrs(void* workspace, int B, int T, int N, int L) { w2l::Workspace<> ws(workspace); ws.request(&scale, B); ws.request(&alpha, B, T, L); ws.request(&alphaGrad, B, T, L); ws.request(&transBatchGrad, B, N, N); ws.request(&transBuf1, B, L); ws.request(&transBuf2, B, L); ws.request(&transBufGrad1, B, L); ws.request(&transBufGrad2, B, L); requiredSize = ws.requiredSize(); } Float* scale; double* alpha; double* alphaGrad; Float* transBatchGrad; Float* transBuf1; Float* transBuf2; Float* transBufGrad1; Float* transBufGrad2; size_t requiredSize; }; /* * B thread blocks * L threads/block (ideally) */ template <class Float> __global__ void forwardKernel( int T, int N, int _L, const Float* _input, const int* _target, const int* targetSize, const Float* trans, Float* _loss, WorkspacePtrs<Float> ws) { int b = blockIdx.x; auto* alpha = &ws.alpha[b * T * _L]; auto* input = &_input[b * T * N]; auto* target = &_target[b * _L]; auto* transBuf1 = &ws.transBuf1[b * _L]; auto* transBuf2 = &ws.transBuf2[b * _L]; int L = targetSize[b]; for (int i = threadIdx.x; i < L; i += blockDim.x) { alpha[i] = i == 0 ? input[target[0]] : 0; transBuf1[i] = trans[target[i] * N + target[i]]; transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0; } for (int t = 1; t < T; ++t) { auto* inputCur = &input[t * N]; auto* alphaPrev = &alpha[(t - 1) * L]; auto* alphaCur = &alpha[t * L]; int high = t < L ? t : L; int low = T - t < L ? L - (T - t) : 1; __syncthreads(); if (threadIdx.x == 0) { if (T - t >= L) { alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]]; } } else if (threadIdx.x == 1) { if (t < L) { alphaCur[high] = alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]]; } } for (int i = low + threadIdx.x; i < high; i += blockDim.x) { double s1 = alphaPrev[i] + transBuf1[i]; double s2 = alphaPrev[i - 1] + transBuf2[i]; // lse = logSumExp(s1, s2) double lse = s1 < s2 ? s2 + log(1 + exp(s1 - s2)) : s1 + log(1 + exp(s2 - s1)); alphaCur[i] = lse + inputCur[target[i]]; } } __syncthreads(); if (threadIdx.x == 0) { _loss[b] = alpha[T * L - 1] * ws.scale[b]; } } /* * B thread blocks * L threads/block (ideally) */ template <class Float> __global__ void backwardKernel( int T, int N, int _L, const int* _target, const int* targetSize, const Float* grad, Float* _inputGrad, Float* transGrad, WorkspacePtrs<Float> ws) { int b = blockIdx.x; auto* alpha = &ws.alpha[b * T * _L]; auto* alphaGrad = &ws.alphaGrad[b * T * _L]; auto* inputGrad = &_inputGrad[b * T * N]; auto* target = &_target[b * _L]; auto* transBatchGrad = &ws.transBatchGrad[b * N * N]; auto* transBuf1 = &ws.transBuf1[b * _L]; auto* transBuf2 = &ws.transBuf2[b * _L]; auto* transBufGrad1 = &ws.transBufGrad1[b * _L]; auto* transBufGrad2 = &ws.transBufGrad2[b * _L]; int L = targetSize[b]; if (threadIdx.x == 0) { alphaGrad[T * L - 1] = 1; } for (int t = T - 1; t > 0; --t) { auto* inputCurGrad = &inputGrad[t * N]; auto* alphaPrev = &alpha[(t - 1) * L]; auto* alphaCurGrad = &alphaGrad[t * L]; auto* alphaPrevGrad = &alphaGrad[(t - 1) * L]; int high = t < L ? t : L; int low = T - t < L ? L - (T - t) : 1; int high1 = t < L ? t + 1 : L; int low1 = T - t < L ? L - (T - t) : 0; __syncthreads(); for (int i = low1 + threadIdx.x; i < high1; i += blockDim.x) { atomicAdd(&inputCurGrad[target[i]], alphaCurGrad[i]); } if (threadIdx.x == 0) { if (T - t >= L) { atomicAdd(&alphaPrevGrad[0], alphaCurGrad[0]); transBufGrad1[0] += alphaCurGrad[0]; } } else if (threadIdx.x == 1) { if (t < L) { atomicAdd(&alphaPrevGrad[high - 1], alphaCurGrad[high]); transBufGrad2[high] += alphaCurGrad[high]; } } for (int i = low + threadIdx.x; i < high; i += blockDim.x) { double s1 = alphaPrev[i] + transBuf1[i]; double s2 = alphaPrev[i - 1] + transBuf2[i]; // d1, d2 = dLogSumExp(s1, s2) double d1, d2; if (s1 < s2) { d2 = 1 / (1 + exp(s1 - s2)); d1 = 1 - d2; } else { d1 = 1 / (1 + exp(s2 - s1)); d2 = 1 - d1; } atomicAdd(&alphaPrevGrad[i], d1 * alphaCurGrad[i]); atomicAdd(&alphaPrevGrad[i - 1], d2 * alphaCurGrad[i]); transBufGrad1[i] += d1 * alphaCurGrad[i]; transBufGrad2[i] += d2 * alphaCurGrad[i]; } } __syncthreads(); __shared__ Float gradScale; if (threadIdx.x == 0) { inputGrad[target[0]] += alphaGrad[0]; gradScale = grad[b] * ws.scale[b]; } for (int i = threadIdx.x; i < L; i += blockDim.x) { atomicAdd(&transBatchGrad[target[i] * N + target[i]], transBufGrad1[i]); if (i > 0) { atomicAdd( &transBatchGrad[target[i] * N + target[i - 1]], transBufGrad2[i]); } } __syncthreads(); for (int i = threadIdx.x; i < T * N; i += blockDim.x) { inputGrad[i] *= gradScale; } for (int i = threadIdx.x; i < N * N; i += blockDim.x) { atomicAdd(&transGrad[i], gradScale * transBatchGrad[i]); } } template <class Float> __global__ void viterbiPathKernel( int T, int N, int _L, const Float* _input, const int* _target, const int* targetSize, const Float* trans, int* bestPaths, WorkspacePtrs<Float> ws) { int b = blockIdx.x; auto* alpha = &ws.alpha[b * T * _L]; auto* input = &_input[b * T * N]; auto* target = &_target[b * _L]; auto* transBuf1 = &ws.transBuf1[b * _L]; auto* transBuf2 = &ws.transBuf2[b * _L]; int L = targetSize[b]; for (int i = threadIdx.x; i < L; i += blockDim.x) { alpha[i] = i == 0 ? input[target[0]] : 0; transBuf1[i] = trans[target[i] * N + target[i]]; transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0; } if (L > T || L == 0) { return; } for (int t = 1; t < T; ++t) { auto* inputCur = &input[t * N]; auto* alphaPrev = &alpha[(t - 1) * L]; auto* alphaCur = &alpha[t * L]; int high = t < L ? t : L; int low = T - t < L ? L - (T - t) : 1; // Ensure that all previous alphas have been computed __syncthreads(); if (threadIdx.x == 0) { if (T - t >= L) { alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]]; } } else if (threadIdx.x == 1) { if (t < L) { alphaCur[high] = alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]]; } } for (int i = low + threadIdx.x; i < high; i += blockDim.x) { double s1 = alphaPrev[i] + transBuf1[i]; double s2 = alphaPrev[i - 1] + transBuf2[i]; alphaCur[i] = inputCur[target[i]] + max(s1, s2); } } // Ensure all threads are finished and alphas have been computed before // computing backward path __syncthreads(); if (threadIdx.x == 0) { int ltrIdx = L - 1; for (int t = T - 1; t > 0; t--) { bestPaths[t + (b * T)] = target[ltrIdx]; auto* alphaPrev = &alpha[(t - 1) * L]; if (ltrIdx > 0) { double s1 = alphaPrev[ltrIdx] + transBuf1[ltrIdx]; double s2 = alphaPrev[ltrIdx - 1] + transBuf2[ltrIdx]; if (s2 > s1) { ltrIdx--; } } } bestPaths[b * T] = target[ltrIdx]; } } } // namespace namespace w2l { namespace cuda { template <class Float> size_t ForceAlignmentCriterion<Float>::getWorkspaceSize(int B, int T, int N, int L) { return WorkspacePtrs<Float>(nullptr, B, T, N, L).requiredSize; } template <class Float> void ForceAlignmentCriterion<Float>::forward( int B, int T, int N, int L, CriterionScaleMode scaleMode, const Float* input, const int* target, const int* targetSize, const Float* trans, Float* loss, void* workspace, hipStream_t stream) { int blockSize = ::min(256, (L + 31) / 32 * 32); WorkspacePtrs<Float> ws(workspace, B, T, N, L); CriterionUtils<Float>::computeScale( B, T, N, scaleMode, targetSize, ws.scale, stream); hipLaunchKernelGGL(( forwardKernel), dim3(B), dim3(blockSize), 0, stream, T, N, L, input, target, targetSize, trans, loss, ws); } template <class Float> void ForceAlignmentCriterion<Float>::backward( int B, int T, int N, int L, const int* target, const int* targetSize, const Float* grad, Float* inputGrad, Float* transGrad, void* workspace, hipStream_t stream) { int blockSize = ::min(256, (L + 31) / 32 * 32); WorkspacePtrs<Float> ws(workspace, B, T, N, L); setZero(inputGrad, B * T * N, stream); setZero(transGrad, N * N, stream); setZero(ws.alphaGrad, B * T * L, stream); setZero(ws.transBatchGrad, B * N * N, stream); setZero(ws.transBufGrad1, B * L, stream); setZero(ws.transBufGrad2, B * L, stream); hipLaunchKernelGGL(( backwardKernel), dim3(B), dim3(blockSize), 0, stream, T, N, L, target, targetSize, grad, inputGrad, transGrad, ws); } template <class Float> void ForceAlignmentCriterion<Float>::viterbiPath( int B, int T, int N, int L, const Float* input, const int* target, const int* targetSize, const Float* trans, int* bestPaths, void* workspace, hipStream_t stream) { int blockSize = ::min(256, (L + 31) / 32 * 32); WorkspacePtrs<Float> ws(workspace, B, T, N, L); setZero(ws.alpha, B * T * L, stream); hipLaunchKernelGGL(( viterbiPathKernel), dim3(B), dim3(blockSize), 0, stream, T, N, L, input, target, targetSize, trans, bestPaths, ws); } template struct ForceAlignmentCriterion<float>; template struct ForceAlignmentCriterion<double>; } // namespace cuda } // namespace w2l
5de3694172d00abcce4101e25d49d84719d4347f.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "libraries/criterion/cuda/ForceAlignmentCriterion.cuh" #include <algorithm> #include <cmath> #include "libraries/common/CudaUtils.cuh" #include "libraries/common/Workspace.h" #include "libraries/criterion/cuda/CriterionUtils.cuh" namespace { template <class Float> struct WorkspacePtrs { explicit WorkspacePtrs(void* workspace, int B, int T, int N, int L) { w2l::Workspace<> ws(workspace); ws.request(&scale, B); ws.request(&alpha, B, T, L); ws.request(&alphaGrad, B, T, L); ws.request(&transBatchGrad, B, N, N); ws.request(&transBuf1, B, L); ws.request(&transBuf2, B, L); ws.request(&transBufGrad1, B, L); ws.request(&transBufGrad2, B, L); requiredSize = ws.requiredSize(); } Float* scale; double* alpha; double* alphaGrad; Float* transBatchGrad; Float* transBuf1; Float* transBuf2; Float* transBufGrad1; Float* transBufGrad2; size_t requiredSize; }; /* * B thread blocks * L threads/block (ideally) */ template <class Float> __global__ void forwardKernel( int T, int N, int _L, const Float* _input, const int* _target, const int* targetSize, const Float* trans, Float* _loss, WorkspacePtrs<Float> ws) { int b = blockIdx.x; auto* alpha = &ws.alpha[b * T * _L]; auto* input = &_input[b * T * N]; auto* target = &_target[b * _L]; auto* transBuf1 = &ws.transBuf1[b * _L]; auto* transBuf2 = &ws.transBuf2[b * _L]; int L = targetSize[b]; for (int i = threadIdx.x; i < L; i += blockDim.x) { alpha[i] = i == 0 ? input[target[0]] : 0; transBuf1[i] = trans[target[i] * N + target[i]]; transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0; } for (int t = 1; t < T; ++t) { auto* inputCur = &input[t * N]; auto* alphaPrev = &alpha[(t - 1) * L]; auto* alphaCur = &alpha[t * L]; int high = t < L ? t : L; int low = T - t < L ? L - (T - t) : 1; __syncthreads(); if (threadIdx.x == 0) { if (T - t >= L) { alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]]; } } else if (threadIdx.x == 1) { if (t < L) { alphaCur[high] = alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]]; } } for (int i = low + threadIdx.x; i < high; i += blockDim.x) { double s1 = alphaPrev[i] + transBuf1[i]; double s2 = alphaPrev[i - 1] + transBuf2[i]; // lse = logSumExp(s1, s2) double lse = s1 < s2 ? s2 + log(1 + exp(s1 - s2)) : s1 + log(1 + exp(s2 - s1)); alphaCur[i] = lse + inputCur[target[i]]; } } __syncthreads(); if (threadIdx.x == 0) { _loss[b] = alpha[T * L - 1] * ws.scale[b]; } } /* * B thread blocks * L threads/block (ideally) */ template <class Float> __global__ void backwardKernel( int T, int N, int _L, const int* _target, const int* targetSize, const Float* grad, Float* _inputGrad, Float* transGrad, WorkspacePtrs<Float> ws) { int b = blockIdx.x; auto* alpha = &ws.alpha[b * T * _L]; auto* alphaGrad = &ws.alphaGrad[b * T * _L]; auto* inputGrad = &_inputGrad[b * T * N]; auto* target = &_target[b * _L]; auto* transBatchGrad = &ws.transBatchGrad[b * N * N]; auto* transBuf1 = &ws.transBuf1[b * _L]; auto* transBuf2 = &ws.transBuf2[b * _L]; auto* transBufGrad1 = &ws.transBufGrad1[b * _L]; auto* transBufGrad2 = &ws.transBufGrad2[b * _L]; int L = targetSize[b]; if (threadIdx.x == 0) { alphaGrad[T * L - 1] = 1; } for (int t = T - 1; t > 0; --t) { auto* inputCurGrad = &inputGrad[t * N]; auto* alphaPrev = &alpha[(t - 1) * L]; auto* alphaCurGrad = &alphaGrad[t * L]; auto* alphaPrevGrad = &alphaGrad[(t - 1) * L]; int high = t < L ? t : L; int low = T - t < L ? L - (T - t) : 1; int high1 = t < L ? t + 1 : L; int low1 = T - t < L ? L - (T - t) : 0; __syncthreads(); for (int i = low1 + threadIdx.x; i < high1; i += blockDim.x) { atomicAdd(&inputCurGrad[target[i]], alphaCurGrad[i]); } if (threadIdx.x == 0) { if (T - t >= L) { atomicAdd(&alphaPrevGrad[0], alphaCurGrad[0]); transBufGrad1[0] += alphaCurGrad[0]; } } else if (threadIdx.x == 1) { if (t < L) { atomicAdd(&alphaPrevGrad[high - 1], alphaCurGrad[high]); transBufGrad2[high] += alphaCurGrad[high]; } } for (int i = low + threadIdx.x; i < high; i += blockDim.x) { double s1 = alphaPrev[i] + transBuf1[i]; double s2 = alphaPrev[i - 1] + transBuf2[i]; // d1, d2 = dLogSumExp(s1, s2) double d1, d2; if (s1 < s2) { d2 = 1 / (1 + exp(s1 - s2)); d1 = 1 - d2; } else { d1 = 1 / (1 + exp(s2 - s1)); d2 = 1 - d1; } atomicAdd(&alphaPrevGrad[i], d1 * alphaCurGrad[i]); atomicAdd(&alphaPrevGrad[i - 1], d2 * alphaCurGrad[i]); transBufGrad1[i] += d1 * alphaCurGrad[i]; transBufGrad2[i] += d2 * alphaCurGrad[i]; } } __syncthreads(); __shared__ Float gradScale; if (threadIdx.x == 0) { inputGrad[target[0]] += alphaGrad[0]; gradScale = grad[b] * ws.scale[b]; } for (int i = threadIdx.x; i < L; i += blockDim.x) { atomicAdd(&transBatchGrad[target[i] * N + target[i]], transBufGrad1[i]); if (i > 0) { atomicAdd( &transBatchGrad[target[i] * N + target[i - 1]], transBufGrad2[i]); } } __syncthreads(); for (int i = threadIdx.x; i < T * N; i += blockDim.x) { inputGrad[i] *= gradScale; } for (int i = threadIdx.x; i < N * N; i += blockDim.x) { atomicAdd(&transGrad[i], gradScale * transBatchGrad[i]); } } template <class Float> __global__ void viterbiPathKernel( int T, int N, int _L, const Float* _input, const int* _target, const int* targetSize, const Float* trans, int* bestPaths, WorkspacePtrs<Float> ws) { int b = blockIdx.x; auto* alpha = &ws.alpha[b * T * _L]; auto* input = &_input[b * T * N]; auto* target = &_target[b * _L]; auto* transBuf1 = &ws.transBuf1[b * _L]; auto* transBuf2 = &ws.transBuf2[b * _L]; int L = targetSize[b]; for (int i = threadIdx.x; i < L; i += blockDim.x) { alpha[i] = i == 0 ? input[target[0]] : 0; transBuf1[i] = trans[target[i] * N + target[i]]; transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0; } if (L > T || L == 0) { return; } for (int t = 1; t < T; ++t) { auto* inputCur = &input[t * N]; auto* alphaPrev = &alpha[(t - 1) * L]; auto* alphaCur = &alpha[t * L]; int high = t < L ? t : L; int low = T - t < L ? L - (T - t) : 1; // Ensure that all previous alphas have been computed __syncthreads(); if (threadIdx.x == 0) { if (T - t >= L) { alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]]; } } else if (threadIdx.x == 1) { if (t < L) { alphaCur[high] = alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]]; } } for (int i = low + threadIdx.x; i < high; i += blockDim.x) { double s1 = alphaPrev[i] + transBuf1[i]; double s2 = alphaPrev[i - 1] + transBuf2[i]; alphaCur[i] = inputCur[target[i]] + max(s1, s2); } } // Ensure all threads are finished and alphas have been computed before // computing backward path __syncthreads(); if (threadIdx.x == 0) { int ltrIdx = L - 1; for (int t = T - 1; t > 0; t--) { bestPaths[t + (b * T)] = target[ltrIdx]; auto* alphaPrev = &alpha[(t - 1) * L]; if (ltrIdx > 0) { double s1 = alphaPrev[ltrIdx] + transBuf1[ltrIdx]; double s2 = alphaPrev[ltrIdx - 1] + transBuf2[ltrIdx]; if (s2 > s1) { ltrIdx--; } } } bestPaths[b * T] = target[ltrIdx]; } } } // namespace namespace w2l { namespace cuda { template <class Float> size_t ForceAlignmentCriterion<Float>::getWorkspaceSize(int B, int T, int N, int L) { return WorkspacePtrs<Float>(nullptr, B, T, N, L).requiredSize; } template <class Float> void ForceAlignmentCriterion<Float>::forward( int B, int T, int N, int L, CriterionScaleMode scaleMode, const Float* input, const int* target, const int* targetSize, const Float* trans, Float* loss, void* workspace, cudaStream_t stream) { int blockSize = std::min(256, (L + 31) / 32 * 32); WorkspacePtrs<Float> ws(workspace, B, T, N, L); CriterionUtils<Float>::computeScale( B, T, N, scaleMode, targetSize, ws.scale, stream); forwardKernel<<<B, blockSize, 0, stream>>>( T, N, L, input, target, targetSize, trans, loss, ws); } template <class Float> void ForceAlignmentCriterion<Float>::backward( int B, int T, int N, int L, const int* target, const int* targetSize, const Float* grad, Float* inputGrad, Float* transGrad, void* workspace, cudaStream_t stream) { int blockSize = std::min(256, (L + 31) / 32 * 32); WorkspacePtrs<Float> ws(workspace, B, T, N, L); setZero(inputGrad, B * T * N, stream); setZero(transGrad, N * N, stream); setZero(ws.alphaGrad, B * T * L, stream); setZero(ws.transBatchGrad, B * N * N, stream); setZero(ws.transBufGrad1, B * L, stream); setZero(ws.transBufGrad2, B * L, stream); backwardKernel<<<B, blockSize, 0, stream>>>( T, N, L, target, targetSize, grad, inputGrad, transGrad, ws); } template <class Float> void ForceAlignmentCriterion<Float>::viterbiPath( int B, int T, int N, int L, const Float* input, const int* target, const int* targetSize, const Float* trans, int* bestPaths, void* workspace, cudaStream_t stream) { int blockSize = std::min(256, (L + 31) / 32 * 32); WorkspacePtrs<Float> ws(workspace, B, T, N, L); setZero(ws.alpha, B * T * L, stream); viterbiPathKernel<<<B, blockSize, 0, stream>>>( T, N, L, input, target, targetSize, trans, bestPaths, ws); } template struct ForceAlignmentCriterion<float>; template struct ForceAlignmentCriterion<double>; } // namespace cuda } // namespace w2l
aaf12dcd44354eae7ee0c4b8bab96b18b79bd129.hip
// !!! This is a file automatically generated by hipify!!! extern "C" { #include "ccv.h" } #include <ctype.h> #define CASE_TESTS // so that we don't include public available methods #include "../lib/cuda/cwc_convnet.cu" #undef USE_DISPATCH // nvcc doesn't support libdispatch #include "../lib/ccv_convnet.c" extern "C" void cwc_bench_runtime(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_convnet_train_param_t params) { int batch = params.mini_batch; int i, x, y, k, c; _cwc_convnet_alloc_reserved(convnet, batch, params.layer_params); cwc_convnet_context_t* context = GPU(convnet)->contexts; for (i = 0; i < convnet->rows * convnet->cols * convnet->channels; i++) convnet->mean_activity->data.f32[i] = 128; _cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, ccv_size(225, 225), convnet->rows, convnet->cols, convnet->channels, 0, batch, 0, batch, context->host.input, context->host.c); hipMemcpy(context->device.input, context->host.input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * batch, hipMemcpyHostToDevice); ccv_convnet_t* update_params = _ccv_convnet_update_new(convnet); _ccv_convnet_update_zero(update_params); // first convolutional layer forward propagate ccv_convnet_layer_t* first_gpu_layer = GPU(convnet)->layers; _cwc_convnet_convolutional_forward_propagate(first_gpu_layer, batch, context->device.input, GPU(convnet)->forwards[0], context->device.stream); hipStreamSynchronize(context->device.stream); int first_out_rows, first_out_cols, first_out_partition, first_out_channels = first_gpu_layer->net.convolutional.count; _cwc_convnet_layer_deduce_output_format(first_gpu_layer, &first_out_rows, &first_out_cols, &first_out_partition); float* first_out = 0; hipHostMalloc(&first_out, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); hipMemcpy(first_out, GPU(convnet)->forwards[0], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate first convolutional layer on GPU\n"); // second average pool layer forward propagate ccv_convnet_layer_t* second_gpu_layer = GPU(convnet)->layers + 1; _cwc_convnet_average_pool_forward_propagate(second_gpu_layer, batch, GPU(convnet)->forwards[0], GPU(convnet)->forwards[1], context->device.stream); hipStreamSynchronize(context->device.stream); int second_out_rows, second_out_cols, second_out_partition, second_out_channels = second_gpu_layer->input.matrix.channels; _cwc_convnet_layer_deduce_output_format(second_gpu_layer, &second_out_rows, &second_out_cols, &second_out_partition); float* second_out = 0; hipHostMalloc(&second_out, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); hipMemcpy(second_out, GPU(convnet)->forwards[1], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate second average pool layer on GPU\n"); // third convolutional layer forward propagate ccv_convnet_layer_t* third_gpu_layer = GPU(convnet)->layers + 2; _cwc_convnet_convolutional_forward_propagate(third_gpu_layer, batch, GPU(convnet)->forwards[1], GPU(convnet)->forwards[2], context->device.stream); hipStreamSynchronize(context->device.stream); int third_out_rows, third_out_cols, third_out_partition, third_out_channels = third_gpu_layer->net.convolutional.count; _cwc_convnet_layer_deduce_output_format(third_gpu_layer, &third_out_rows, &third_out_cols, &third_out_partition); float* third_out = 0; hipHostMalloc(&third_out, sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch); hipMemcpy(third_out, GPU(convnet)->forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, hipMemcpyDeviceToHost); printf("finished forward propagate third convolutional layer on GPU\n"); // third convolutonal layer backward propagate hipMemcpy(GPU(convnet)->backwards[3], GPU(convnet)->forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, hipMemcpyDeviceToDevice); ccv_convnet_layer_t* third_gpu_configuration = GPU(convnet)->configurations + 2; _cwc_convnet_convolutional_backward_propagate(third_gpu_layer, batch, GPU(convnet)->backwards[3], GPU(convnet)->forwards[2], GPU(convnet)->forwards[1], GPU(convnet)->backwards[2], third_gpu_configuration, GPU(convnet)->scratch, GPU(convnet)->unit, context->device.stream, context->device.cublas); hipStreamSynchronize(context->device.stream); assert(hipGetLastError() == hipSuccess); float* third_back = 0; hipHostMalloc(&third_back, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); hipMemcpy(third_back, GPU(convnet)->backwards[2], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, hipMemcpyDeviceToHost); float* third_grad = 0; hipHostMalloc(&third_grad, sizeof(float) * third_gpu_layer->wnum); assert(third_grad); hipMemcpy(third_grad, third_gpu_configuration->w, sizeof(float) * third_gpu_layer->wnum, hipMemcpyDeviceToHost); printf("finished backward propagate third convolutional layer on GPU\n"); // second average pool layer backward propagate _cwc_convnet_average_pool_backward_propagate(second_gpu_layer, batch, GPU(convnet)->backwards[2], GPU(convnet)->backwards[1], context->device.stream); hipStreamSynchronize(context->device.stream); assert(hipGetLastError() == hipSuccess); float* second_back = 0; hipHostMalloc(&second_back, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); hipMemcpy(second_back, GPU(convnet)->backwards[1], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, hipMemcpyDeviceToHost); printf("finished backward propagate second average pool layer on GPU\n"); // first convolutional layer backward propagate ccv_convnet_layer_t* first_gpu_configuration = GPU(convnet)->configurations; _cwc_convnet_convolutional_backward_propagate(first_gpu_layer, batch, GPU(convnet)->backwards[1], GPU(convnet)->forwards[0], context->device.input, GPU(convnet)->backwards[0], first_gpu_configuration, GPU(convnet)->scratch, GPU(convnet)->unit, context->device.stream, context->device.cublas); hipStreamSynchronize(context->device.stream); assert(hipGetLastError() == hipSuccess); float* first_grad = 0; hipHostMalloc(&first_grad, sizeof(float) * first_gpu_layer->wnum); assert(first_grad); hipMemcpy(first_grad, first_gpu_configuration->w, sizeof(float) * first_gpu_layer->wnum, hipMemcpyDeviceToHost); printf("finished backward propagate first convolutional layer on GPU\n"); for (i = 0; i < batch; i++) { printf("doing batch %d of %d\n", i + 1, batch); ccv_categorized_t* categorized = (ccv_categorized_t*)ccv_array_get(categorizeds, i); for (x = 0; x < categorized->matrix->rows * categorized->matrix->cols * CCV_GET_CHANNEL(categorized->matrix->type); x++) categorized->matrix->data.f32[x] = categorized->matrix->data.f32[x] - 128; // first convolutional layer forward propagate ccv_convnet_layer_t* first_cpu_layer = convnet->layers; _ccv_convnet_convolutional_forward_propagate(first_cpu_layer, categorized->matrix, convnet->acts); ccv_dense_matrix_t* a = convnet->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = first_out[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = a->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 1: %d %d %d %d: |%f - %f| = %f\n", i, x, y, k, p, q, delta); } // second average pool layer forward propagate ccv_convnet_layer_t* second_cpu_layer = convnet->layers + 1; _ccv_convnet_average_pool_forward_propagate(second_cpu_layer, convnet->acts[0], convnet->acts + 1); ccv_dense_matrix_t* b = convnet->acts[1]; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels; k++) { float p = second_out[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = b->data.f32[y * second_out_cols * second_out_channels + x * second_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool fprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer forward propagate // we know this layer we have partition == 2, emulate that without partition support in CPU implementation ccv_convnet_layer_t* third_cpu_layer = convnet->layers + 2; third_cpu_layer->input.matrix.channels = third_cpu_layer->input.matrix.channels / 2; third_cpu_layer->net.convolutional.count = third_cpu_layer->net.convolutional.count / 2; third_cpu_layer->net.convolutional.channels = third_cpu_layer->input.matrix.channels; // first halve of third layer ccv_dense_matrix_t* halve = ccv_dense_matrix_new(second_out_rows, second_out_cols, CCV_32F | third_cpu_layer->input.matrix.channels, 0, 0); for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) halve->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k] = b->data.f32[(y * second_out_cols + x) * second_out_channels + k]; _ccv_convnet_convolutional_forward_propagate(third_cpu_layer, halve, convnet->acts + 2); ccv_dense_matrix_t* c = convnet->acts[2]; for (y = 0; y < third_out_rows; y++) for (x = 0; x < third_out_cols; x++) for (k = 0; k < third_out_channels / 2; k++) { float p = third_out[k * third_out_rows * third_out_cols * batch + (y * third_out_cols + x) * batch + i]; float q = c->data.f32[(y * third_out_cols + x) * third_out_channels / 2 + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer backward propagate ccv_dense_matrix_t* bc = 0; _ccv_convnet_convolutional_backward_propagate(third_cpu_layer, convnet->acts[2], convnet->acts[2], halve, &bc, update_params->layers + 2); if (update_params->acts[1] == 0) update_params->acts[1] = ccv_dense_matrix_new(second_out_rows, second_out_cols, second_out_channels | CCV_32F, 0, 0); for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) { float p = third_back[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = bc->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k]; update_params->acts[1]->data.f32[(y * second_out_cols + x) * second_out_channels + k] = q; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // second halve of third layer third_cpu_layer->w += third_cpu_layer->wnum / 2; third_cpu_layer->bias += third_cpu_layer->net.convolutional.count; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) halve->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k] = b->data.f32[(y * second_out_cols + x) * second_out_channels + second_out_channels / 2 + k]; _ccv_convnet_convolutional_forward_propagate(third_cpu_layer, halve, convnet->acts + 2); c = convnet->acts[2]; for (y = 0; y < third_out_rows; y++) for (x = 0; x < third_out_cols; x++) for (k = 0; k < third_out_channels / 2; k++) { float p = third_out[(third_out_channels / 2 + k) * third_out_rows * third_out_cols * batch + (y * third_out_cols + x) * batch + i]; float q = c->data.f32[(y * third_out_cols + x) * third_out_channels / 2 + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer backward propagate update_params->layers[2].w += third_cpu_layer->wnum / 2; update_params->layers[2].bias += third_cpu_layer->net.convolutional.count; _ccv_convnet_convolutional_backward_propagate(third_cpu_layer, convnet->acts[2], convnet->acts[2], halve, &bc, update_params->layers + 2); for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) { float p = third_back[(second_out_channels / 2 + k) * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = bc->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k]; update_params->acts[1]->data.f32[(y * second_out_cols + x) * second_out_channels + second_out_channels / 2 + k] = q; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // revert changes we made to this layer update_params->layers[2].w -= third_cpu_layer->wnum / 2; update_params->layers[2].bias -= third_cpu_layer->net.convolutional.count; third_cpu_layer->w -= third_cpu_layer->wnum / 2; third_cpu_layer->bias -= third_cpu_layer->net.convolutional.count; third_cpu_layer->input.matrix.channels = third_cpu_layer->input.matrix.channels * 2; third_cpu_layer->net.convolutional.count = third_cpu_layer->net.convolutional.count * 2; third_cpu_layer->net.convolutional.channels = third_cpu_layer->input.matrix.channels; ccv_matrix_free(halve); // second average pool layer backward propagate _ccv_convnet_average_pool_backward_propagate(second_cpu_layer, update_params->acts[1], convnet->acts[0], update_params->acts); ccv_dense_matrix_t* bb = update_params->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = second_back[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = bb->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool bprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // first convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(first_cpu_layer, update_params->acts[0], convnet->acts[0], categorized->matrix, 0, update_params->layers); } ccv_convnet_layer_t* third_cpu_configuration = update_params->layers + 2; int third_filter_rows = third_gpu_layer->net.convolutional.rows; int third_filter_cols = third_gpu_layer->net.convolutional.cols; int third_filter_count = third_gpu_layer->net.convolutional.count; int third_filter_channels = third_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < third_filter_rows; y++) for (x = 0; x < third_filter_cols; x++) for (k = 0; k < third_filter_count; k++) for (c = 0; c < third_filter_channels; c++) { float p = third_cpu_configuration->w[(y * third_filter_cols + x) * third_filter_channels + k * third_filter_cols * third_filter_rows * third_filter_channels + c]; float q = third_grad[(y * third_filter_cols + x) * third_filter_count + k + c * third_filter_cols * third_filter_rows * third_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } ccv_convnet_layer_t* first_cpu_configuration = update_params->layers; int first_filter_rows = first_gpu_layer->net.convolutional.rows; int first_filter_cols = first_gpu_layer->net.convolutional.cols; int first_filter_count = first_gpu_layer->net.convolutional.count; int first_filter_channels = first_gpu_layer->net.convolutional.channels; for (y = 0; y < first_filter_rows; y++) for (x = 0; x < first_filter_cols; x++) for (k = 0; k < 1; k++) // first_filter_count; k++) for (c = 0; c < first_filter_channels; c++) { float p = first_cpu_configuration->w[(y * first_filter_cols + x) * first_filter_channels + k * first_filter_cols * first_filter_rows * first_filter_channels + c]; float q = first_grad[(y * first_filter_cols + x) * first_filter_count + k + c * first_filter_cols * first_filter_rows * first_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 1: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } }
aaf12dcd44354eae7ee0c4b8bab96b18b79bd129.cu
extern "C" { #include "ccv.h" } #include <ctype.h> #define CASE_TESTS // so that we don't include public available methods #include "../lib/cuda/cwc_convnet.cu" #undef USE_DISPATCH // nvcc doesn't support libdispatch #include "../lib/ccv_convnet.c" extern "C" void cwc_bench_runtime(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_convnet_train_param_t params) { int batch = params.mini_batch; int i, x, y, k, c; _cwc_convnet_alloc_reserved(convnet, batch, params.layer_params); cwc_convnet_context_t* context = GPU(convnet)->contexts; for (i = 0; i < convnet->rows * convnet->cols * convnet->channels; i++) convnet->mean_activity->data.f32[i] = 128; _cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, ccv_size(225, 225), convnet->rows, convnet->cols, convnet->channels, 0, batch, 0, batch, context->host.input, context->host.c); cudaMemcpy(context->device.input, context->host.input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * batch, cudaMemcpyHostToDevice); ccv_convnet_t* update_params = _ccv_convnet_update_new(convnet); _ccv_convnet_update_zero(update_params); // first convolutional layer forward propagate ccv_convnet_layer_t* first_gpu_layer = GPU(convnet)->layers; _cwc_convnet_convolutional_forward_propagate(first_gpu_layer, batch, context->device.input, GPU(convnet)->forwards[0], context->device.stream); cudaStreamSynchronize(context->device.stream); int first_out_rows, first_out_cols, first_out_partition, first_out_channels = first_gpu_layer->net.convolutional.count; _cwc_convnet_layer_deduce_output_format(first_gpu_layer, &first_out_rows, &first_out_cols, &first_out_partition); float* first_out = 0; cudaMallocHost(&first_out, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); cudaMemcpy(first_out, GPU(convnet)->forwards[0], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate first convolutional layer on GPU\n"); // second average pool layer forward propagate ccv_convnet_layer_t* second_gpu_layer = GPU(convnet)->layers + 1; _cwc_convnet_average_pool_forward_propagate(second_gpu_layer, batch, GPU(convnet)->forwards[0], GPU(convnet)->forwards[1], context->device.stream); cudaStreamSynchronize(context->device.stream); int second_out_rows, second_out_cols, second_out_partition, second_out_channels = second_gpu_layer->input.matrix.channels; _cwc_convnet_layer_deduce_output_format(second_gpu_layer, &second_out_rows, &second_out_cols, &second_out_partition); float* second_out = 0; cudaMallocHost(&second_out, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); cudaMemcpy(second_out, GPU(convnet)->forwards[1], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate second average pool layer on GPU\n"); // third convolutional layer forward propagate ccv_convnet_layer_t* third_gpu_layer = GPU(convnet)->layers + 2; _cwc_convnet_convolutional_forward_propagate(third_gpu_layer, batch, GPU(convnet)->forwards[1], GPU(convnet)->forwards[2], context->device.stream); cudaStreamSynchronize(context->device.stream); int third_out_rows, third_out_cols, third_out_partition, third_out_channels = third_gpu_layer->net.convolutional.count; _cwc_convnet_layer_deduce_output_format(third_gpu_layer, &third_out_rows, &third_out_cols, &third_out_partition); float* third_out = 0; cudaMallocHost(&third_out, sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch); cudaMemcpy(third_out, GPU(convnet)->forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished forward propagate third convolutional layer on GPU\n"); // third convolutonal layer backward propagate cudaMemcpy(GPU(convnet)->backwards[3], GPU(convnet)->forwards[2], sizeof(float) * third_out_rows * third_out_cols * third_out_channels * batch, cudaMemcpyDeviceToDevice); ccv_convnet_layer_t* third_gpu_configuration = GPU(convnet)->configurations + 2; _cwc_convnet_convolutional_backward_propagate(third_gpu_layer, batch, GPU(convnet)->backwards[3], GPU(convnet)->forwards[2], GPU(convnet)->forwards[1], GPU(convnet)->backwards[2], third_gpu_configuration, GPU(convnet)->scratch, GPU(convnet)->unit, context->device.stream, context->device.cublas); cudaStreamSynchronize(context->device.stream); assert(cudaGetLastError() == cudaSuccess); float* third_back = 0; cudaMallocHost(&third_back, sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch); cudaMemcpy(third_back, GPU(convnet)->backwards[2], sizeof(float) * second_out_rows * second_out_cols * second_out_channels * batch, cudaMemcpyDeviceToHost); float* third_grad = 0; cudaMallocHost(&third_grad, sizeof(float) * third_gpu_layer->wnum); assert(third_grad); cudaMemcpy(third_grad, third_gpu_configuration->w, sizeof(float) * third_gpu_layer->wnum, cudaMemcpyDeviceToHost); printf("finished backward propagate third convolutional layer on GPU\n"); // second average pool layer backward propagate _cwc_convnet_average_pool_backward_propagate(second_gpu_layer, batch, GPU(convnet)->backwards[2], GPU(convnet)->backwards[1], context->device.stream); cudaStreamSynchronize(context->device.stream); assert(cudaGetLastError() == cudaSuccess); float* second_back = 0; cudaMallocHost(&second_back, sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch); cudaMemcpy(second_back, GPU(convnet)->backwards[1], sizeof(float) * first_out_rows * first_out_cols * first_out_channels * batch, cudaMemcpyDeviceToHost); printf("finished backward propagate second average pool layer on GPU\n"); // first convolutional layer backward propagate ccv_convnet_layer_t* first_gpu_configuration = GPU(convnet)->configurations; _cwc_convnet_convolutional_backward_propagate(first_gpu_layer, batch, GPU(convnet)->backwards[1], GPU(convnet)->forwards[0], context->device.input, GPU(convnet)->backwards[0], first_gpu_configuration, GPU(convnet)->scratch, GPU(convnet)->unit, context->device.stream, context->device.cublas); cudaStreamSynchronize(context->device.stream); assert(cudaGetLastError() == cudaSuccess); float* first_grad = 0; cudaMallocHost(&first_grad, sizeof(float) * first_gpu_layer->wnum); assert(first_grad); cudaMemcpy(first_grad, first_gpu_configuration->w, sizeof(float) * first_gpu_layer->wnum, cudaMemcpyDeviceToHost); printf("finished backward propagate first convolutional layer on GPU\n"); for (i = 0; i < batch; i++) { printf("doing batch %d of %d\n", i + 1, batch); ccv_categorized_t* categorized = (ccv_categorized_t*)ccv_array_get(categorizeds, i); for (x = 0; x < categorized->matrix->rows * categorized->matrix->cols * CCV_GET_CHANNEL(categorized->matrix->type); x++) categorized->matrix->data.f32[x] = categorized->matrix->data.f32[x] - 128; // first convolutional layer forward propagate ccv_convnet_layer_t* first_cpu_layer = convnet->layers; _ccv_convnet_convolutional_forward_propagate(first_cpu_layer, categorized->matrix, convnet->acts); ccv_dense_matrix_t* a = convnet->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = first_out[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = a->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 1: %d %d %d %d: |%f - %f| = %f\n", i, x, y, k, p, q, delta); } // second average pool layer forward propagate ccv_convnet_layer_t* second_cpu_layer = convnet->layers + 1; _ccv_convnet_average_pool_forward_propagate(second_cpu_layer, convnet->acts[0], convnet->acts + 1); ccv_dense_matrix_t* b = convnet->acts[1]; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels; k++) { float p = second_out[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = b->data.f32[y * second_out_cols * second_out_channels + x * second_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool fprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer forward propagate // we know this layer we have partition == 2, emulate that without partition support in CPU implementation ccv_convnet_layer_t* third_cpu_layer = convnet->layers + 2; third_cpu_layer->input.matrix.channels = third_cpu_layer->input.matrix.channels / 2; third_cpu_layer->net.convolutional.count = third_cpu_layer->net.convolutional.count / 2; third_cpu_layer->net.convolutional.channels = third_cpu_layer->input.matrix.channels; // first halve of third layer ccv_dense_matrix_t* halve = ccv_dense_matrix_new(second_out_rows, second_out_cols, CCV_32F | third_cpu_layer->input.matrix.channels, 0, 0); for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) halve->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k] = b->data.f32[(y * second_out_cols + x) * second_out_channels + k]; _ccv_convnet_convolutional_forward_propagate(third_cpu_layer, halve, convnet->acts + 2); ccv_dense_matrix_t* c = convnet->acts[2]; for (y = 0; y < third_out_rows; y++) for (x = 0; x < third_out_cols; x++) for (k = 0; k < third_out_channels / 2; k++) { float p = third_out[k * third_out_rows * third_out_cols * batch + (y * third_out_cols + x) * batch + i]; float q = c->data.f32[(y * third_out_cols + x) * third_out_channels / 2 + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer backward propagate ccv_dense_matrix_t* bc = 0; _ccv_convnet_convolutional_backward_propagate(third_cpu_layer, convnet->acts[2], convnet->acts[2], halve, &bc, update_params->layers + 2); if (update_params->acts[1] == 0) update_params->acts[1] = ccv_dense_matrix_new(second_out_rows, second_out_cols, second_out_channels | CCV_32F, 0, 0); for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) { float p = third_back[k * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = bc->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k]; update_params->acts[1]->data.f32[(y * second_out_cols + x) * second_out_channels + k] = q; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // second halve of third layer third_cpu_layer->w += third_cpu_layer->wnum / 2; third_cpu_layer->bias += third_cpu_layer->net.convolutional.count; for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) halve->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k] = b->data.f32[(y * second_out_cols + x) * second_out_channels + second_out_channels / 2 + k]; _ccv_convnet_convolutional_forward_propagate(third_cpu_layer, halve, convnet->acts + 2); c = convnet->acts[2]; for (y = 0; y < third_out_rows; y++) for (x = 0; x < third_out_cols; x++) for (k = 0; k < third_out_channels / 2; k++) { float p = third_out[(third_out_channels / 2 + k) * third_out_rows * third_out_cols * batch + (y * third_out_cols + x) * batch + i]; float q = c->data.f32[(y * third_out_cols + x) * third_out_channels / 2 + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv fprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // third convolutional layer backward propagate update_params->layers[2].w += third_cpu_layer->wnum / 2; update_params->layers[2].bias += third_cpu_layer->net.convolutional.count; _ccv_convnet_convolutional_backward_propagate(third_cpu_layer, convnet->acts[2], convnet->acts[2], halve, &bc, update_params->layers + 2); for (y = 0; y < second_out_rows; y++) for (x = 0; x < second_out_cols; x++) for (k = 0; k < second_out_channels / 2; k++) { float p = third_back[(second_out_channels / 2 + k) * second_out_rows * second_out_cols * batch + (y * second_out_cols + x) * batch + i]; float q = bc->data.f32[(y * second_out_cols + x) * second_out_channels / 2 + k]; update_params->acts[1]->data.f32[(y * second_out_cols + x) * second_out_channels + second_out_channels / 2 + k] = q; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // revert changes we made to this layer update_params->layers[2].w -= third_cpu_layer->wnum / 2; update_params->layers[2].bias -= third_cpu_layer->net.convolutional.count; third_cpu_layer->w -= third_cpu_layer->wnum / 2; third_cpu_layer->bias -= third_cpu_layer->net.convolutional.count; third_cpu_layer->input.matrix.channels = third_cpu_layer->input.matrix.channels * 2; third_cpu_layer->net.convolutional.count = third_cpu_layer->net.convolutional.count * 2; third_cpu_layer->net.convolutional.channels = third_cpu_layer->input.matrix.channels; ccv_matrix_free(halve); // second average pool layer backward propagate _ccv_convnet_average_pool_backward_propagate(second_cpu_layer, update_params->acts[1], convnet->acts[0], update_params->acts); ccv_dense_matrix_t* bb = update_params->acts[0]; for (y = 0; y < first_out_rows; y++) for (x = 0; x < first_out_cols; x++) for (k = 0; k < first_out_channels; k++) { float p = second_back[k * first_out_rows * first_out_cols * batch + (y * first_out_cols + x) * batch + i]; float q = bb->data.f32[y * first_out_cols * first_out_channels + x * first_out_channels + k]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("avgpool bprop 2: %d %d %d %d: |%g - %g| = %g\n", i, x, y, k, p, q, delta); } // first convolutional layer backward propagate _ccv_convnet_convolutional_backward_propagate(first_cpu_layer, update_params->acts[0], convnet->acts[0], categorized->matrix, 0, update_params->layers); } ccv_convnet_layer_t* third_cpu_configuration = update_params->layers + 2; int third_filter_rows = third_gpu_layer->net.convolutional.rows; int third_filter_cols = third_gpu_layer->net.convolutional.cols; int third_filter_count = third_gpu_layer->net.convolutional.count; int third_filter_channels = third_gpu_layer->net.convolutional.channels / 2; for (y = 0; y < third_filter_rows; y++) for (x = 0; x < third_filter_cols; x++) for (k = 0; k < third_filter_count; k++) for (c = 0; c < third_filter_channels; c++) { float p = third_cpu_configuration->w[(y * third_filter_cols + x) * third_filter_channels + k * third_filter_cols * third_filter_rows * third_filter_channels + c]; float q = third_grad[(y * third_filter_cols + x) * third_filter_count + k + c * third_filter_cols * third_filter_rows * third_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-4) printf("conv bprop 3: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } ccv_convnet_layer_t* first_cpu_configuration = update_params->layers; int first_filter_rows = first_gpu_layer->net.convolutional.rows; int first_filter_cols = first_gpu_layer->net.convolutional.cols; int first_filter_count = first_gpu_layer->net.convolutional.count; int first_filter_channels = first_gpu_layer->net.convolutional.channels; for (y = 0; y < first_filter_rows; y++) for (x = 0; x < first_filter_cols; x++) for (k = 0; k < 1; k++) // first_filter_count; k++) for (c = 0; c < first_filter_channels; c++) { float p = first_cpu_configuration->w[(y * first_filter_cols + x) * first_filter_channels + k * first_filter_cols * first_filter_rows * first_filter_channels + c]; float q = first_grad[(y * first_filter_cols + x) * first_filter_count + k + c * first_filter_cols * first_filter_rows * first_filter_count]; float delta = fabs(p - q) / ccv_max(ccv_max(fabs(p), fabs(q)), 1); if (delta > 1e-3) printf("conv bprop 1: %d %d %d %d: |%g - %g| = %g\n", x, y, k, c, p, q, delta); } }
47029704d2bfa078432215cd0641d3eefce607a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MathDotSqrt //"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin\nvcc.exe" -ccbin //"C:\Program Files (x86)\Microsoft Visual // Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" -o main // main.cu -O3 // IDE indexing #ifdef __JETBRAINS_IDE__ #define __host__ #define __device__ #define __shared__ #define __constant__ #define __global__ #define __HIPCC__ #include <__clang_cuda_builtin_vars.h> #include <__clang_cuda_cmath.h> #include <__clang_cuda_complex_builtins.h> #include <__clang_cuda_intrinsics.h> #include <__clang_cuda_math_forward_declares.h> #include <hip/device_functions.h> #endif // reduce array of zeros #include <array> #include <memory> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <assert.h> #include <cmath> #include <fstream> #include <iostream> #include <math.h> #include <sstream> #include <string> #include <vector> // Used for sleep function #include <chrono> #include <thread> #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", hipGetErrorString(code), code, file, line); exit(code); } } /*FILE PATHS*/ constexpr const char *INPUT_FILE_PATH = "data/big_chunk_seeds.txt"; constexpr const char *OUTPUT_FILE_PATH = "data/WorldSeeds.txt"; /*FILE PATHS*/ /*CHUNK CONSTANTS*/ constexpr int32_t CHUNK_X = 3; constexpr int32_t CHUNK_Z = -3; constexpr uint64_t INVALID_SEED = 0; /*CHUNK CONSTANTS*/ /*CUDA LAUNCH CONSTANTS*/ constexpr int32_t BLOCK_DIM_X = 128; constexpr int32_t BLOCK_DIM_Y = 1; //should be 1 constexpr int32_t BLOCK_DIM_Z = 1; //should be 1 constexpr int32_t GRID_DIM_X = 4096; constexpr int32_t GRID_DIM_Y = 1; //should be 1 constexpr int32_t GRID_DIM_Z = 1; //should be 1 /*CUDA LAUNCH CONSTANTS*/ /*MAGIC NUMBERS*/ constexpr uint64_t mod_inv(uint64_t x) { uint64_t inv = 0; uint64_t b = 1; for (int32_t i = 0; i < 16; i++) { inv |= (1ULL << i) * (b & 1); b = (b - x * (b & 1)) >> 1; } return inv; } constexpr int32_t count_trailing_zeros(uint64_t v) { int c = 0; v = (v ^ (v - 1)) >> 1; for (c = 0; v != 0; c++) { v >>= 1; } return c; } constexpr uint64_t MASK48 = ((1ULL << 48) - 1ULL); constexpr uint64_t MASK32 = ((1ULL << 32) - 1ULL); constexpr uint64_t MASK16 = ((1ULL << 16) - 1ULL); constexpr uint64_t M1 = 25214903917ULL; constexpr uint64_t ADDEND1 = 11ULL; constexpr uint64_t M2 = 205749139540585ULL; constexpr uint64_t ADDEND2 = 277363943098ULL; constexpr uint64_t M4 = 55986898099985ULL; constexpr uint64_t ADDEND4 = 49720483695876ULL; constexpr auto FIRST_MULT = (M2 * (uint64_t)CHUNK_X + M4 * (uint64_t)CHUNK_Z) & MASK16; constexpr auto MULT_TRAILING_ZEROS = count_trailing_zeros(FIRST_MULT); constexpr auto FIRST_MULT_INV = (uint64_t)mod_inv(FIRST_MULT >> MULT_TRAILING_ZEROS); constexpr auto X_COUNT = count_trailing_zeros((uint64_t)CHUNK_X); constexpr auto Z_COUNT = count_trailing_zeros((uint64_t)CHUNK_Z); constexpr auto TOTAL_COUNT = count_trailing_zeros(CHUNK_X | CHUNK_Z); constexpr auto C_MAX = (1ULL << 16); constexpr auto C_STRIDE = (1ULL << (TOTAL_COUNT + 1)); /*MAGIC NUMBERS*/ /*DETAILS*/ constexpr int32_t SEEDS_PER_LAUNCH = BLOCK_DIM_X * GRID_DIM_X; constexpr int32_t WORLD_SEEDS_PER_CHUNK_SEED = 8; constexpr size_t INPUT_SEED_ARRAY_SIZE = SEEDS_PER_LAUNCH;//SEEDS_PER_LAUNCH; constexpr size_t OUTPUT_SEED_ARRAY_SIZE = SEEDS_PER_LAUNCH * WORLD_SEEDS_PER_CHUNK_SEED;//1 << 20; constexpr int32_t MAX_LINE = 1000; /*DETAILS*/ __host__ __device__ int64_t next_long(uint64_t *seed) { *seed = (*seed * M1 + ADDEND1) & MASK48; int32_t u = *seed >> 16; *seed = (*seed * M1 + ADDEND1) & MASK48; return ((uint64_t)u << 32) + (int32_t)(*seed >> 16); } __host__ __device__ uint64_t make_mask(int32_t bits) { return (1ULL << bits) - 1; } __device__ int ctz(uint64_t v) { // return __popcll((v & (-v))-1); return __popcll(v ^ (v - 1)) - 1; } __device__ void clear_seed(uint64_t *bucket) { *bucket = INVALID_SEED; } __device__ void add_seed_cond(bool cond, uint64_t new_seed, uint64_t *bucket, uint32_t *index) { if(cond){ bucket[*index] = new_seed; *index += 1; } } __host__ __device__ uint64_t get_chunk_seed(uint64_t worldSeed) { uint64_t seed = (worldSeed ^ M1) & MASK48; int64_t a = next_long(&seed) / 2 * 2 + 1; int64_t b = next_long(&seed) / 2 * 2 + 1; return (uint64_t)(((CHUNK_X * a + CHUNK_Z * b) ^ worldSeed) & MASK48); } __host__ __device__ uint64_t get_partial_addend(uint64_t partialSeed, int32_t bits) { uint64_t mask = make_mask(bits); /* clang-format off */ return ((uint64_t)CHUNK_X) * (((int32_t)(((M2 * ((partialSeed ^ M1) & mask) + ADDEND2) & MASK48) >> 16)) / 2 * 2 + 1) + ((uint64_t)CHUNK_Z) * (((int32_t)(((M4 * ((partialSeed ^ M1) & mask) + ADDEND4) & MASK48) >> 16)) / 2 * 2 + 1); /* clang-format on */ } __device__ void add_world_seed(uint64_t firstAddend, uint64_t c, uint64_t chunkSeed, uint64_t *bucket, uint32_t *index) { if(ctz(firstAddend) < MULT_TRAILING_ZEROS){ return; } uint64_t bottom32BitsChunkseed = chunkSeed & MASK32; uint64_t b = (((FIRST_MULT_INV * firstAddend) >> MULT_TRAILING_ZEROS) ^ (M1 >> 16)) & make_mask(16 - MULT_TRAILING_ZEROS); if (MULT_TRAILING_ZEROS != 0) { uint64_t smallMask = make_mask(MULT_TRAILING_ZEROS); uint64_t smallMultInverse = smallMask & FIRST_MULT_INV; uint64_t target = (((b ^ (bottom32BitsChunkseed >> 16)) & smallMask) - (get_partial_addend((b << 16) + c, 32 - MULT_TRAILING_ZEROS) >> 16)) & smallMask; b += (((target * smallMultInverse) ^ (M1 >> (32 - MULT_TRAILING_ZEROS))) & smallMask) << (16 - MULT_TRAILING_ZEROS); } uint64_t bottom32BitsSeed = (b << 16) + c; uint64_t target2 = (bottom32BitsSeed ^ bottom32BitsChunkseed) >> 16; uint64_t secondAddend = (get_partial_addend(bottom32BitsSeed, 32) >> 16); secondAddend &= MASK16; uint64_t topBits = ((((FIRST_MULT_INV * (target2 - secondAddend)) >> MULT_TRAILING_ZEROS) ^ (M1 >> 32)) & make_mask(16 - MULT_TRAILING_ZEROS)); for (; topBits < (1ULL << 16); topBits += (1ULL << (16 - MULT_TRAILING_ZEROS))) { bool condition = get_chunk_seed((topBits << 32) + bottom32BitsSeed) == chunkSeed; uint64_t seed_candidate = (topBits << 32) + bottom32BitsSeed; add_seed_cond(condition, seed_candidate, bucket, index); } //__syncthreads(); } __device__ void add_some_seeds(uint64_t chunk_seed, uint64_t c, uint64_t *bucket, uint32_t *index){ constexpr auto x = (uint64_t)CHUNK_X; constexpr auto z = (uint64_t)CHUNK_Z; const auto f = chunk_seed & MASK16; const auto target = (c ^ f) & MASK16; uint64_t magic = (uint64_t)(x * ((M2 * ((c ^ M1) & MASK16) + ADDEND2) >> 16)) + (uint64_t)(z * ((M4 * ((c ^ M1) & MASK16) + ADDEND4) >> 16)); add_world_seed(target - (magic & MASK16), c, chunk_seed, bucket, index); //nvcc optimizes this branching conditional statically //no need for macros here if (CHUNK_X != 0) { add_world_seed(target - ((magic + x) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_Z != 0 && CHUNK_X != CHUNK_Z) { add_world_seed(target - ((magic + z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X + CHUNK_Z != 0) { add_world_seed(target - ((magic + x + z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_X != CHUNK_Z) { add_world_seed(target - ((magic + 2 * x) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_Z != 0 && CHUNK_X != CHUNK_Z) { add_world_seed(target - ((magic + 2 * z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X + CHUNK_Z != 0 && CHUNK_X * 2 + CHUNK_Z != 0) { add_world_seed(target - ((magic + 2 * x + z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X != CHUNK_Z && CHUNK_X + CHUNK_Z != 0 && CHUNK_X + CHUNK_Z * 2 != 0) { // is the x supposed to be multiplied add_world_seed(target - ((magic + x + 2 * z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X + CHUNK_Z != 0) { add_world_seed(target - ((magic + 2 * x + 2 * z) & MASK16), c, chunk_seed, bucket, index); } } __global__ void crack(uint64_t input_seed_count, uint64_t *input_seed_array, uint64_t *output_seed_array) { const int32_t thread_id = blockIdx.x * BLOCK_DIM_X + threadIdx.x; const int32_t input_seed_index = thread_id; const int32_t output_seed_index = thread_id * WORLD_SEEDS_PER_CHUNK_SEED; if(input_seed_index >= input_seed_count){ return; } uint64_t chunk_seed = input_seed_array[thread_id]; uint32_t index_count = 0; const uint64_t start_c = X_COUNT == Z_COUNT ? chunk_seed & ((1ULL << (X_COUNT + 1)) - 1) : chunk_seed & ((1ULL << (TOTAL_COUNT + 1)) - 1) ^ (1 << TOTAL_COUNT); for(uint64_t c = start_c; c < C_MAX; c += C_STRIDE){ add_some_seeds(chunk_seed, c, output_seed_array + output_seed_index, &index_count); } } FILE *open_file(const char *path, const char *mode) { auto fp = fopen(path, mode); if (fp == nullptr) { printf("Error: could not open file %s with mode %s", path, mode); exit(1); } return fp; } int32_t count_file_length(FILE *file) { static char line[MAX_LINE]; int32_t total = 0; while (fgets(line, MAX_LINE, file)) total++; // seeks to beginning of file rewind(file); return total; } size_t file_to_buffer(FILE *source, uint64_t *dest, size_t N) { static char line[MAX_LINE]; for (size_t i = 0; i < N; i++) { if (fgets(line, MAX_LINE, source) != nullptr) { sscanf(line, "%llu", &dest[i]); // THIS IS SUPPOSED TO BE LLU //printf("seed %llu | c %llu\n", dest[i], c); } else { return i; } } return N; } int32_t buffer_to_file(uint64_t *source, FILE *dest, size_t N) { int32_t count = 0; for (size_t i = 0; i < N; i++) { if(source[i] != INVALID_SEED){ count++; fprintf(dest, "%llu\n", source[i]); // THIS IS SUPPOSED TO BE LLU } } //printf("COUNT %d\n", count); fflush(dest); return count; } int main() { using clock=std::chrono::high_resolution_clock; using h_duration=std::chrono::duration<double, std::ratio<60 * 60>>; using m_duration=std::chrono::duration<double, std::ratio<60>>; using s_duration=std::chrono::duration<double>; using ms_duration=std::chrono::duration<double, std::milli>; //my implementation doesnt work for special case of CHUNK_X == CHUNK_Z == 0 assert(CHUNK_X != 0 || CHUNK_Z != 0); setbuf(stdout, NULL); std::cout << "Init...\n"; const dim3 GRID_DIM(GRID_DIM_X, GRID_DIM_Y, GRID_DIM_Z); const dim3 BLOCK_DIM(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); std::cout << "Opening files...\n"; FILE *in = open_file(INPUT_FILE_PATH, "r"); FILE *out = open_file(OUTPUT_FILE_PATH, "w"); const int32_t total_input_seeds = count_file_length(in); uint64_t *input_seeds_cpu = (uint64_t *)malloc(sizeof(uint64_t) * INPUT_SEED_ARRAY_SIZE); uint64_t *output_seeds_cpu = (uint64_t *)calloc(OUTPUT_SEED_ARRAY_SIZE, sizeof(uint64_t)); //needs default zeros uint64_t *input_seeds_gpu = nullptr; uint64_t *output_seeds_gpu = nullptr; //not using managed memory because it is slow GPU_ASSERT(hipMalloc(&input_seeds_gpu, sizeof(uint64_t) * INPUT_SEED_ARRAY_SIZE)); GPU_ASSERT(hipMalloc(&output_seeds_gpu, sizeof(uint64_t) * OUTPUT_SEED_ARRAY_SIZE)); uint64_t file_input_count = file_to_buffer(in, input_seeds_cpu, INPUT_SEED_ARRAY_SIZE); GPU_ASSERT(hipMemcpy(input_seeds_gpu, input_seeds_cpu, file_input_count * sizeof(uint64_t), hipMemcpyHostToDevice)); std::cout << "Total seeds: " << total_input_seeds << "\n"; std::cout << "Launching kernel...\n"; auto start_time = clock::now(); auto prev_time = start_time; auto current_time = start_time; int32_t total_searched = 0; int32_t total_found = 0; while (file_input_count > 0) { hipLaunchKernelGGL(( crack), dim3(GRID_DIM), dim3(BLOCK_DIM), 0, 0, file_input_count, input_seeds_gpu, output_seeds_gpu); auto num_seeds_found = buffer_to_file(output_seeds_cpu, out, OUTPUT_SEED_ARRAY_SIZE); auto prev_file_input_count = file_input_count; file_input_count = file_to_buffer(in, input_seeds_cpu, INPUT_SEED_ARRAY_SIZE); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); //output_count = *output_seed_count; GPU_ASSERT(hipMemcpy(input_seeds_gpu, input_seeds_cpu, file_input_count * sizeof(uint64_t), hipMemcpyHostToDevice)); GPU_ASSERT(hipMemcpy(output_seeds_cpu, output_seeds_gpu, OUTPUT_SEED_ARRAY_SIZE * sizeof(uint64_t), hipMemcpyDeviceToHost)); GPU_ASSERT(hipMemset(output_seeds_gpu, INVALID_SEED, OUTPUT_SEED_ARRAY_SIZE * sizeof(uint64_t))); current_time = clock::now(); total_searched += prev_file_input_count; total_found += num_seeds_found; auto s_delta = s_duration(current_time - prev_time).count(); auto k_seeds_per_second = prev_file_input_count / s_delta / 1000.0; auto completion = (double)total_searched / total_input_seeds * 100; auto e_time = (double) (total_input_seeds - total_searched) / INPUT_SEED_ARRAY_SIZE * s_delta; char suffix = 's'; if(e_time >= 60 * 60){ e_time /= 3600.0; suffix = 'h'; } else if(e_time >= 60){ e_time /= 60.0; suffix = 'm'; } auto uptime = s_duration(current_time - start_time).count(); //Searched Uptime printf("Searched: %d seeds | Found: %d seeds | Speed: %.2lfk seeds/s | Completion: %.3lf%% | ETA: %.1lf%c | Uptime: %.1lfs\n", total_searched, total_found, k_seeds_per_second, completion, e_time, suffix, uptime ); // std::cout << "Searched: " << total_searched << " Found: " << total_found // << " Uptime: " << uptime << "s Seeds " << seeds_per_second << "seed/s \n"; prev_time = current_time; } total_found += buffer_to_file(output_seeds_cpu, out, OUTPUT_SEED_ARRAY_SIZE); auto stop_time = clock::now(); std::cout << "Total world seeds converted: " << total_found << " seeds\n"; std::cout << "Total execution time: " << s_duration( stop_time - start_time).count() << "s\n"; free(input_seeds_cpu); free(output_seeds_cpu); hipFree(input_seeds_gpu); hipFree(output_seeds_gpu); //hipFree(output_seed_count); fclose(in); fflush(out); fclose(out); }
47029704d2bfa078432215cd0641d3eefce607a1.cu
// MathDotSqrt //"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin\nvcc.exe" -ccbin //"C:\Program Files (x86)\Microsoft Visual // Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" -o main // main.cu -O3 // IDE indexing #ifdef __JETBRAINS_IDE__ #define __host__ #define __device__ #define __shared__ #define __constant__ #define __global__ #define __CUDACC__ #include <__clang_cuda_builtin_vars.h> #include <__clang_cuda_cmath.h> #include <__clang_cuda_complex_builtins.h> #include <__clang_cuda_intrinsics.h> #include <__clang_cuda_math_forward_declares.h> #include <device_functions.h> #endif // reduce array of zeros #include <array> #include <memory> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <assert.h> #include <cmath> #include <fstream> #include <iostream> #include <math.h> #include <sstream> #include <string> #include <vector> // Used for sleep function #include <chrono> #include <thread> #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line); exit(code); } } /*FILE PATHS*/ constexpr const char *INPUT_FILE_PATH = "data/big_chunk_seeds.txt"; constexpr const char *OUTPUT_FILE_PATH = "data/WorldSeeds.txt"; /*FILE PATHS*/ /*CHUNK CONSTANTS*/ constexpr int32_t CHUNK_X = 3; constexpr int32_t CHUNK_Z = -3; constexpr uint64_t INVALID_SEED = 0; /*CHUNK CONSTANTS*/ /*CUDA LAUNCH CONSTANTS*/ constexpr int32_t BLOCK_DIM_X = 128; constexpr int32_t BLOCK_DIM_Y = 1; //should be 1 constexpr int32_t BLOCK_DIM_Z = 1; //should be 1 constexpr int32_t GRID_DIM_X = 4096; constexpr int32_t GRID_DIM_Y = 1; //should be 1 constexpr int32_t GRID_DIM_Z = 1; //should be 1 /*CUDA LAUNCH CONSTANTS*/ /*MAGIC NUMBERS*/ constexpr uint64_t mod_inv(uint64_t x) { uint64_t inv = 0; uint64_t b = 1; for (int32_t i = 0; i < 16; i++) { inv |= (1ULL << i) * (b & 1); b = (b - x * (b & 1)) >> 1; } return inv; } constexpr int32_t count_trailing_zeros(uint64_t v) { int c = 0; v = (v ^ (v - 1)) >> 1; for (c = 0; v != 0; c++) { v >>= 1; } return c; } constexpr uint64_t MASK48 = ((1ULL << 48) - 1ULL); constexpr uint64_t MASK32 = ((1ULL << 32) - 1ULL); constexpr uint64_t MASK16 = ((1ULL << 16) - 1ULL); constexpr uint64_t M1 = 25214903917ULL; constexpr uint64_t ADDEND1 = 11ULL; constexpr uint64_t M2 = 205749139540585ULL; constexpr uint64_t ADDEND2 = 277363943098ULL; constexpr uint64_t M4 = 55986898099985ULL; constexpr uint64_t ADDEND4 = 49720483695876ULL; constexpr auto FIRST_MULT = (M2 * (uint64_t)CHUNK_X + M4 * (uint64_t)CHUNK_Z) & MASK16; constexpr auto MULT_TRAILING_ZEROS = count_trailing_zeros(FIRST_MULT); constexpr auto FIRST_MULT_INV = (uint64_t)mod_inv(FIRST_MULT >> MULT_TRAILING_ZEROS); constexpr auto X_COUNT = count_trailing_zeros((uint64_t)CHUNK_X); constexpr auto Z_COUNT = count_trailing_zeros((uint64_t)CHUNK_Z); constexpr auto TOTAL_COUNT = count_trailing_zeros(CHUNK_X | CHUNK_Z); constexpr auto C_MAX = (1ULL << 16); constexpr auto C_STRIDE = (1ULL << (TOTAL_COUNT + 1)); /*MAGIC NUMBERS*/ /*DETAILS*/ constexpr int32_t SEEDS_PER_LAUNCH = BLOCK_DIM_X * GRID_DIM_X; constexpr int32_t WORLD_SEEDS_PER_CHUNK_SEED = 8; constexpr size_t INPUT_SEED_ARRAY_SIZE = SEEDS_PER_LAUNCH;//SEEDS_PER_LAUNCH; constexpr size_t OUTPUT_SEED_ARRAY_SIZE = SEEDS_PER_LAUNCH * WORLD_SEEDS_PER_CHUNK_SEED;//1 << 20; constexpr int32_t MAX_LINE = 1000; /*DETAILS*/ __host__ __device__ int64_t next_long(uint64_t *seed) { *seed = (*seed * M1 + ADDEND1) & MASK48; int32_t u = *seed >> 16; *seed = (*seed * M1 + ADDEND1) & MASK48; return ((uint64_t)u << 32) + (int32_t)(*seed >> 16); } __host__ __device__ uint64_t make_mask(int32_t bits) { return (1ULL << bits) - 1; } __device__ int ctz(uint64_t v) { // return __popcll((v & (-v))-1); return __popcll(v ^ (v - 1)) - 1; } __device__ void clear_seed(uint64_t *bucket) { *bucket = INVALID_SEED; } __device__ void add_seed_cond(bool cond, uint64_t new_seed, uint64_t *bucket, uint32_t *index) { if(cond){ bucket[*index] = new_seed; *index += 1; } } __host__ __device__ uint64_t get_chunk_seed(uint64_t worldSeed) { uint64_t seed = (worldSeed ^ M1) & MASK48; int64_t a = next_long(&seed) / 2 * 2 + 1; int64_t b = next_long(&seed) / 2 * 2 + 1; return (uint64_t)(((CHUNK_X * a + CHUNK_Z * b) ^ worldSeed) & MASK48); } __host__ __device__ uint64_t get_partial_addend(uint64_t partialSeed, int32_t bits) { uint64_t mask = make_mask(bits); /* clang-format off */ return ((uint64_t)CHUNK_X) * (((int32_t)(((M2 * ((partialSeed ^ M1) & mask) + ADDEND2) & MASK48) >> 16)) / 2 * 2 + 1) + ((uint64_t)CHUNK_Z) * (((int32_t)(((M4 * ((partialSeed ^ M1) & mask) + ADDEND4) & MASK48) >> 16)) / 2 * 2 + 1); /* clang-format on */ } __device__ void add_world_seed(uint64_t firstAddend, uint64_t c, uint64_t chunkSeed, uint64_t *bucket, uint32_t *index) { if(ctz(firstAddend) < MULT_TRAILING_ZEROS){ return; } uint64_t bottom32BitsChunkseed = chunkSeed & MASK32; uint64_t b = (((FIRST_MULT_INV * firstAddend) >> MULT_TRAILING_ZEROS) ^ (M1 >> 16)) & make_mask(16 - MULT_TRAILING_ZEROS); if (MULT_TRAILING_ZEROS != 0) { uint64_t smallMask = make_mask(MULT_TRAILING_ZEROS); uint64_t smallMultInverse = smallMask & FIRST_MULT_INV; uint64_t target = (((b ^ (bottom32BitsChunkseed >> 16)) & smallMask) - (get_partial_addend((b << 16) + c, 32 - MULT_TRAILING_ZEROS) >> 16)) & smallMask; b += (((target * smallMultInverse) ^ (M1 >> (32 - MULT_TRAILING_ZEROS))) & smallMask) << (16 - MULT_TRAILING_ZEROS); } uint64_t bottom32BitsSeed = (b << 16) + c; uint64_t target2 = (bottom32BitsSeed ^ bottom32BitsChunkseed) >> 16; uint64_t secondAddend = (get_partial_addend(bottom32BitsSeed, 32) >> 16); secondAddend &= MASK16; uint64_t topBits = ((((FIRST_MULT_INV * (target2 - secondAddend)) >> MULT_TRAILING_ZEROS) ^ (M1 >> 32)) & make_mask(16 - MULT_TRAILING_ZEROS)); for (; topBits < (1ULL << 16); topBits += (1ULL << (16 - MULT_TRAILING_ZEROS))) { bool condition = get_chunk_seed((topBits << 32) + bottom32BitsSeed) == chunkSeed; uint64_t seed_candidate = (topBits << 32) + bottom32BitsSeed; add_seed_cond(condition, seed_candidate, bucket, index); } //__syncthreads(); } __device__ void add_some_seeds(uint64_t chunk_seed, uint64_t c, uint64_t *bucket, uint32_t *index){ constexpr auto x = (uint64_t)CHUNK_X; constexpr auto z = (uint64_t)CHUNK_Z; const auto f = chunk_seed & MASK16; const auto target = (c ^ f) & MASK16; uint64_t magic = (uint64_t)(x * ((M2 * ((c ^ M1) & MASK16) + ADDEND2) >> 16)) + (uint64_t)(z * ((M4 * ((c ^ M1) & MASK16) + ADDEND4) >> 16)); add_world_seed(target - (magic & MASK16), c, chunk_seed, bucket, index); //nvcc optimizes this branching conditional statically //no need for macros here if (CHUNK_X != 0) { add_world_seed(target - ((magic + x) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_Z != 0 && CHUNK_X != CHUNK_Z) { add_world_seed(target - ((magic + z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X + CHUNK_Z != 0) { add_world_seed(target - ((magic + x + z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_X != CHUNK_Z) { add_world_seed(target - ((magic + 2 * x) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_Z != 0 && CHUNK_X != CHUNK_Z) { add_world_seed(target - ((magic + 2 * z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X + CHUNK_Z != 0 && CHUNK_X * 2 + CHUNK_Z != 0) { add_world_seed(target - ((magic + 2 * x + z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X != CHUNK_Z && CHUNK_X + CHUNK_Z != 0 && CHUNK_X + CHUNK_Z * 2 != 0) { // is the x supposed to be multiplied add_world_seed(target - ((magic + x + 2 * z) & MASK16), c, chunk_seed, bucket, index); } if (CHUNK_X != 0 && CHUNK_Z != 0 && CHUNK_X + CHUNK_Z != 0) { add_world_seed(target - ((magic + 2 * x + 2 * z) & MASK16), c, chunk_seed, bucket, index); } } __global__ void crack(uint64_t input_seed_count, uint64_t *input_seed_array, uint64_t *output_seed_array) { const int32_t thread_id = blockIdx.x * BLOCK_DIM_X + threadIdx.x; const int32_t input_seed_index = thread_id; const int32_t output_seed_index = thread_id * WORLD_SEEDS_PER_CHUNK_SEED; if(input_seed_index >= input_seed_count){ return; } uint64_t chunk_seed = input_seed_array[thread_id]; uint32_t index_count = 0; const uint64_t start_c = X_COUNT == Z_COUNT ? chunk_seed & ((1ULL << (X_COUNT + 1)) - 1) : chunk_seed & ((1ULL << (TOTAL_COUNT + 1)) - 1) ^ (1 << TOTAL_COUNT); for(uint64_t c = start_c; c < C_MAX; c += C_STRIDE){ add_some_seeds(chunk_seed, c, output_seed_array + output_seed_index, &index_count); } } FILE *open_file(const char *path, const char *mode) { auto fp = fopen(path, mode); if (fp == nullptr) { printf("Error: could not open file %s with mode %s", path, mode); exit(1); } return fp; } int32_t count_file_length(FILE *file) { static char line[MAX_LINE]; int32_t total = 0; while (fgets(line, MAX_LINE, file)) total++; // seeks to beginning of file rewind(file); return total; } size_t file_to_buffer(FILE *source, uint64_t *dest, size_t N) { static char line[MAX_LINE]; for (size_t i = 0; i < N; i++) { if (fgets(line, MAX_LINE, source) != nullptr) { sscanf(line, "%llu", &dest[i]); // THIS IS SUPPOSED TO BE LLU //printf("seed %llu | c %llu\n", dest[i], c); } else { return i; } } return N; } int32_t buffer_to_file(uint64_t *source, FILE *dest, size_t N) { int32_t count = 0; for (size_t i = 0; i < N; i++) { if(source[i] != INVALID_SEED){ count++; fprintf(dest, "%llu\n", source[i]); // THIS IS SUPPOSED TO BE LLU } } //printf("COUNT %d\n", count); fflush(dest); return count; } int main() { using clock=std::chrono::high_resolution_clock; using h_duration=std::chrono::duration<double, std::ratio<60 * 60>>; using m_duration=std::chrono::duration<double, std::ratio<60>>; using s_duration=std::chrono::duration<double>; using ms_duration=std::chrono::duration<double, std::milli>; //my implementation doesnt work for special case of CHUNK_X == CHUNK_Z == 0 assert(CHUNK_X != 0 || CHUNK_Z != 0); setbuf(stdout, NULL); std::cout << "Init...\n"; const dim3 GRID_DIM(GRID_DIM_X, GRID_DIM_Y, GRID_DIM_Z); const dim3 BLOCK_DIM(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); std::cout << "Opening files...\n"; FILE *in = open_file(INPUT_FILE_PATH, "r"); FILE *out = open_file(OUTPUT_FILE_PATH, "w"); const int32_t total_input_seeds = count_file_length(in); uint64_t *input_seeds_cpu = (uint64_t *)malloc(sizeof(uint64_t) * INPUT_SEED_ARRAY_SIZE); uint64_t *output_seeds_cpu = (uint64_t *)calloc(OUTPUT_SEED_ARRAY_SIZE, sizeof(uint64_t)); //needs default zeros uint64_t *input_seeds_gpu = nullptr; uint64_t *output_seeds_gpu = nullptr; //not using managed memory because it is slow GPU_ASSERT(cudaMalloc(&input_seeds_gpu, sizeof(uint64_t) * INPUT_SEED_ARRAY_SIZE)); GPU_ASSERT(cudaMalloc(&output_seeds_gpu, sizeof(uint64_t) * OUTPUT_SEED_ARRAY_SIZE)); uint64_t file_input_count = file_to_buffer(in, input_seeds_cpu, INPUT_SEED_ARRAY_SIZE); GPU_ASSERT(cudaMemcpy(input_seeds_gpu, input_seeds_cpu, file_input_count * sizeof(uint64_t), cudaMemcpyHostToDevice)); std::cout << "Total seeds: " << total_input_seeds << "\n"; std::cout << "Launching kernel...\n"; auto start_time = clock::now(); auto prev_time = start_time; auto current_time = start_time; int32_t total_searched = 0; int32_t total_found = 0; while (file_input_count > 0) { crack<<<GRID_DIM, BLOCK_DIM>>>(file_input_count, input_seeds_gpu, output_seeds_gpu); auto num_seeds_found = buffer_to_file(output_seeds_cpu, out, OUTPUT_SEED_ARRAY_SIZE); auto prev_file_input_count = file_input_count; file_input_count = file_to_buffer(in, input_seeds_cpu, INPUT_SEED_ARRAY_SIZE); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); //output_count = *output_seed_count; GPU_ASSERT(cudaMemcpy(input_seeds_gpu, input_seeds_cpu, file_input_count * sizeof(uint64_t), cudaMemcpyHostToDevice)); GPU_ASSERT(cudaMemcpy(output_seeds_cpu, output_seeds_gpu, OUTPUT_SEED_ARRAY_SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost)); GPU_ASSERT(cudaMemset(output_seeds_gpu, INVALID_SEED, OUTPUT_SEED_ARRAY_SIZE * sizeof(uint64_t))); current_time = clock::now(); total_searched += prev_file_input_count; total_found += num_seeds_found; auto s_delta = s_duration(current_time - prev_time).count(); auto k_seeds_per_second = prev_file_input_count / s_delta / 1000.0; auto completion = (double)total_searched / total_input_seeds * 100; auto e_time = (double) (total_input_seeds - total_searched) / INPUT_SEED_ARRAY_SIZE * s_delta; char suffix = 's'; if(e_time >= 60 * 60){ e_time /= 3600.0; suffix = 'h'; } else if(e_time >= 60){ e_time /= 60.0; suffix = 'm'; } auto uptime = s_duration(current_time - start_time).count(); //Searched Uptime printf("Searched: %d seeds | Found: %d seeds | Speed: %.2lfk seeds/s | Completion: %.3lf%% | ETA: %.1lf%c | Uptime: %.1lfs\n", total_searched, total_found, k_seeds_per_second, completion, e_time, suffix, uptime ); // std::cout << "Searched: " << total_searched << " Found: " << total_found // << " Uptime: " << uptime << "s Seeds " << seeds_per_second << "seed/s \n"; prev_time = current_time; } total_found += buffer_to_file(output_seeds_cpu, out, OUTPUT_SEED_ARRAY_SIZE); auto stop_time = clock::now(); std::cout << "Total world seeds converted: " << total_found << " seeds\n"; std::cout << "Total execution time: " << s_duration( stop_time - start_time).count() << "s\n"; free(input_seeds_cpu); free(output_seeds_cpu); cudaFree(input_seeds_gpu); cudaFree(output_seeds_gpu); //cudaFree(output_seed_count); fclose(in); fflush(out); fclose(out); }
3931c45b587003a977e0db9aa94d89d476999690.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include "awnn/common.h" #include "awnn/memory.h" #include "awnndevice/cublas_wrappers.cuh" #include "awnndevice/device_utils.cuh" #include "awnndevice/layer_conv_device.cuh" #include "awnndevice/layer_sandwich_device.cuh" /** Destroy all cache only for this context*/ void layer_context_destroy_device(struct layer_context_device* context) { tensor_destroy_device(&context->d_tmp); tensor_destroy_device(&context->d_dtmp); } __global__ void do_device_relu_forward(tensor_t d_x, tensor_t d_y) { for (uint i : grid_stride_range(0u, d_capacity(d_x))) { d_y.data[i] = d_x.data[i] > 0 ? d_x.data[i] : 0.0; } } __global__ void do_device_relu_backward(tensor_t d_dx, tensor_t d_x, tensor_t d_dy) { for (uint i : grid_stride_range(0u, d_capacity(d_x))) { d_dx.data[i] = d_x.data[i] > 0 ? d_dy.data[i] : 0.0; } } status_t relu_forward_device(tensor_t const d_x, lcache_t* cache, tensor_t d_y) { hipLaunchKernelGGL(( do_device_relu_forward), dim3(32), dim3(1024), 0, 0, d_x, d_y); if (cache) { lcache_push(cache, d_x); } return S_OK; } status_t relu_backward_device(tensor_t const d_dx, lcache_t* cache, tensor_t d_dy) { // lcache_dump_stat(cache); tensor_t d_x = lcache_pop(cache); hipLaunchKernelGGL(( do_device_relu_backward), dim3(32), dim3(1024), 0, 0, d_dx, d_x, d_dy); return S_OK; } status_t conv_relu_forward_device(hipblasHandle_t handle, tensor_t const d_x, tensor_t d_w, lcache_t* cache, conv_param_t const params, tensor_t d_y, struct layer_context_device* context) { AWNN_CHECK_EQ(d_x.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_w.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_y.mem_type, GPU_MEM); tensor_t d_tmp = context->d_tmp; AWNN_CHECK_EQ( S_OK, convolution_forward_device(handle, d_x, d_w, cache, params, d_tmp)); AWNN_CHECK_EQ(S_OK, relu_forward_device(d_tmp, cache, d_y)); // lcache_dump_stat(cache); return S_OK; } status_t conv_relu_backward_device(hipblasHandle_t handle, tensor_t d_dx, tensor_t d_dw, lcache_t* cache, conv_param_t const params, tensor_t const d_dy, struct layer_context_device* context) { AWNN_CHECK_EQ(d_dx.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dw.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dy.mem_type, GPU_MEM); PINF("CONV_RELU_BACKWARD"); tensor_t d_dtmp = context->d_dtmp; PINF("d_dy"); // lcache_dump_stat(cache); AWNN_CHECK_EQ(S_OK, relu_backward_device(d_dtmp, cache, d_dy)); AWNN_CHECK_EQ(S_OK, convolution_backward_device(handle, d_dx, d_dw, cache, params, d_dtmp)); return S_OK; } status_t conv_iden_relu_forward_device(hipblasHandle_t handle, tensor_t const d_x, tensor_t const d_iden, tensor_t d_w, lcache_t* cache, conv_param_t const params, tensor_t d_y, struct layer_context_device* context) { AWNN_CHECK_EQ(d_x.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_iden.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_w.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_y.mem_type, GPU_MEM); AWNN_CHECK_EQ( d_x.dim.dims[3], d_y.dim.dims[3]); // in resnet tensor h/w doesn't change in each stage tensor_t d_tmp = context->d_tmp; AWNN_CHECK_EQ( S_OK, convolution_forward_device(handle, d_x, d_w, cache, params, d_tmp)); hipLaunchKernelGGL(( elementwise_add_inplace_device), dim3(32), dim3(1024), 0, 0, d_tmp, d_iden); AWNN_CHECK_EQ(S_OK, relu_forward_device(d_tmp, cache, d_y)); return S_OK; } status_t conv_iden_relu_backward_device(hipblasHandle_t handle, tensor_t d_dx, tensor_t d_diden, tensor_t d_dw, lcache_t* cache, conv_param_t const params, tensor_t const d_dy, struct layer_context_device* context) { AWNN_CHECK_EQ(d_dx.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_diden.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dw.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dy.mem_type, GPU_MEM); AWNN_NO_USE(context); // backward doesn;t need temp AWNN_CHECK_EQ(S_OK, relu_backward_device(d_diden, cache, d_dy)); AWNN_CHECK_EQ(S_OK, convolution_backward_device(handle, d_dx, d_dw, cache, params, d_diden)); return S_OK; } /** Create cache memory for this and all its childer layers */ void resblock_create_context_device(struct layer_context_device** ptr_context, tensor_t d_y) { // one context for this layer, the other two for its children uint nr_child_context = 2; *ptr_context = (struct layer_context_device*)mem_alloc( sizeof(struct layer_context_device) * (2 + nr_child_context)); struct layer_context_device* context = *ptr_context; for (uint i = 0; i < nr_child_context + 2; i++) { context[i].d_tmp = tensor_make_alike_device(d_y); context[i].d_dtmp = tensor_make_alike_device(d_y); } } /** Free cache memory for this and all its childer layers */ void resblock_destroy_context_device(struct layer_context_device* context) { uint nr_child_context = 2; for (uint i = 0; i < nr_child_context + 2; i++) { layer_context_destroy_device(&context[i]); } mem_free(context); } status_t resblock_forward_device(hipblasHandle_t handle, tensor_t const d_x, tensor_t d_w1, tensor_t d_w2, lcache_t* cache, conv_param_t const params, tensor_t d_y, struct layer_context_device* context) { tensor_t d_tmp = context[0].d_tmp; // TODO: pass context conv_relu_forward_device(handle, d_x, d_w1, cache, params, d_tmp, &context[2]); conv_iden_relu_forward_device(handle, d_tmp, d_x, d_w2, cache, params, d_y, &context[3]); return S_OK; } status_t resblock_backward_device(hipblasHandle_t handle, tensor_t d_dx, tensor_t d_dw1, tensor_t d_dw2, lcache_t* cache, conv_param_t const params, tensor_t const d_dy, struct layer_context_device* context) { tensor_t d_dtmp = context[0].d_dtmp; tensor_t d_dx_iden = context[1].d_dtmp; conv_iden_relu_backward_device(handle, d_dtmp, d_dx_iden, d_dw2, cache, params, d_dy, &context[3]); // TODO: pass context conv_relu_backward_device(handle, d_dx, d_dw1, cache, params, d_dtmp, &context[2]); hipLaunchKernelGGL(( elementwise_add_inplace_device), dim3(32), dim3(1024), 0, 0, d_dx, d_dx_iden); return S_OK; }
3931c45b587003a977e0db9aa94d89d476999690.cu
#include <stdlib.h> #include "awnn/common.h" #include "awnn/memory.h" #include "awnndevice/cublas_wrappers.cuh" #include "awnndevice/device_utils.cuh" #include "awnndevice/layer_conv_device.cuh" #include "awnndevice/layer_sandwich_device.cuh" /** Destroy all cache only for this context*/ void layer_context_destroy_device(struct layer_context_device* context) { tensor_destroy_device(&context->d_tmp); tensor_destroy_device(&context->d_dtmp); } __global__ void do_device_relu_forward(tensor_t d_x, tensor_t d_y) { for (uint i : grid_stride_range(0u, d_capacity(d_x))) { d_y.data[i] = d_x.data[i] > 0 ? d_x.data[i] : 0.0; } } __global__ void do_device_relu_backward(tensor_t d_dx, tensor_t d_x, tensor_t d_dy) { for (uint i : grid_stride_range(0u, d_capacity(d_x))) { d_dx.data[i] = d_x.data[i] > 0 ? d_dy.data[i] : 0.0; } } status_t relu_forward_device(tensor_t const d_x, lcache_t* cache, tensor_t d_y) { do_device_relu_forward<<<32, 1024>>>(d_x, d_y); if (cache) { lcache_push(cache, d_x); } return S_OK; } status_t relu_backward_device(tensor_t const d_dx, lcache_t* cache, tensor_t d_dy) { // lcache_dump_stat(cache); tensor_t d_x = lcache_pop(cache); do_device_relu_backward<<<32, 1024>>>(d_dx, d_x, d_dy); return S_OK; } status_t conv_relu_forward_device(cublasHandle_t handle, tensor_t const d_x, tensor_t d_w, lcache_t* cache, conv_param_t const params, tensor_t d_y, struct layer_context_device* context) { AWNN_CHECK_EQ(d_x.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_w.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_y.mem_type, GPU_MEM); tensor_t d_tmp = context->d_tmp; AWNN_CHECK_EQ( S_OK, convolution_forward_device(handle, d_x, d_w, cache, params, d_tmp)); AWNN_CHECK_EQ(S_OK, relu_forward_device(d_tmp, cache, d_y)); // lcache_dump_stat(cache); return S_OK; } status_t conv_relu_backward_device(cublasHandle_t handle, tensor_t d_dx, tensor_t d_dw, lcache_t* cache, conv_param_t const params, tensor_t const d_dy, struct layer_context_device* context) { AWNN_CHECK_EQ(d_dx.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dw.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dy.mem_type, GPU_MEM); PINF("CONV_RELU_BACKWARD"); tensor_t d_dtmp = context->d_dtmp; PINF("d_dy"); // lcache_dump_stat(cache); AWNN_CHECK_EQ(S_OK, relu_backward_device(d_dtmp, cache, d_dy)); AWNN_CHECK_EQ(S_OK, convolution_backward_device(handle, d_dx, d_dw, cache, params, d_dtmp)); return S_OK; } status_t conv_iden_relu_forward_device(cublasHandle_t handle, tensor_t const d_x, tensor_t const d_iden, tensor_t d_w, lcache_t* cache, conv_param_t const params, tensor_t d_y, struct layer_context_device* context) { AWNN_CHECK_EQ(d_x.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_iden.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_w.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_y.mem_type, GPU_MEM); AWNN_CHECK_EQ( d_x.dim.dims[3], d_y.dim.dims[3]); // in resnet tensor h/w doesn't change in each stage tensor_t d_tmp = context->d_tmp; AWNN_CHECK_EQ( S_OK, convolution_forward_device(handle, d_x, d_w, cache, params, d_tmp)); elementwise_add_inplace_device<<<32, 1024>>>(d_tmp, d_iden); AWNN_CHECK_EQ(S_OK, relu_forward_device(d_tmp, cache, d_y)); return S_OK; } status_t conv_iden_relu_backward_device(cublasHandle_t handle, tensor_t d_dx, tensor_t d_diden, tensor_t d_dw, lcache_t* cache, conv_param_t const params, tensor_t const d_dy, struct layer_context_device* context) { AWNN_CHECK_EQ(d_dx.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_diden.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dw.mem_type, GPU_MEM); AWNN_CHECK_EQ(d_dy.mem_type, GPU_MEM); AWNN_NO_USE(context); // backward doesn;t need temp AWNN_CHECK_EQ(S_OK, relu_backward_device(d_diden, cache, d_dy)); AWNN_CHECK_EQ(S_OK, convolution_backward_device(handle, d_dx, d_dw, cache, params, d_diden)); return S_OK; } /** Create cache memory for this and all its childer layers */ void resblock_create_context_device(struct layer_context_device** ptr_context, tensor_t d_y) { // one context for this layer, the other two for its children uint nr_child_context = 2; *ptr_context = (struct layer_context_device*)mem_alloc( sizeof(struct layer_context_device) * (2 + nr_child_context)); struct layer_context_device* context = *ptr_context; for (uint i = 0; i < nr_child_context + 2; i++) { context[i].d_tmp = tensor_make_alike_device(d_y); context[i].d_dtmp = tensor_make_alike_device(d_y); } } /** Free cache memory for this and all its childer layers */ void resblock_destroy_context_device(struct layer_context_device* context) { uint nr_child_context = 2; for (uint i = 0; i < nr_child_context + 2; i++) { layer_context_destroy_device(&context[i]); } mem_free(context); } status_t resblock_forward_device(cublasHandle_t handle, tensor_t const d_x, tensor_t d_w1, tensor_t d_w2, lcache_t* cache, conv_param_t const params, tensor_t d_y, struct layer_context_device* context) { tensor_t d_tmp = context[0].d_tmp; // TODO: pass context conv_relu_forward_device(handle, d_x, d_w1, cache, params, d_tmp, &context[2]); conv_iden_relu_forward_device(handle, d_tmp, d_x, d_w2, cache, params, d_y, &context[3]); return S_OK; } status_t resblock_backward_device(cublasHandle_t handle, tensor_t d_dx, tensor_t d_dw1, tensor_t d_dw2, lcache_t* cache, conv_param_t const params, tensor_t const d_dy, struct layer_context_device* context) { tensor_t d_dtmp = context[0].d_dtmp; tensor_t d_dx_iden = context[1].d_dtmp; conv_iden_relu_backward_device(handle, d_dtmp, d_dx_iden, d_dw2, cache, params, d_dy, &context[3]); // TODO: pass context conv_relu_backward_device(handle, d_dx, d_dw1, cache, params, d_dtmp, &context[2]); elementwise_add_inplace_device<<<32, 1024>>>(d_dx, d_dx_iden); return S_OK; }
0b5c2eaf60a333df52ced5acc37651fb8d28ba1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { //Multiply the two matrices int Row = blockIdx.y*blockDim.y+threadIdx.y; int Col = blockIdx.x*blockDim.x+threadIdx.x; if ((Row < M.width) && (Col < N.height)) { float Pvalue = 0; for (int k = 0; k < MATRIX_SIZE; ++k) { Pvalue += M.elements[Row*M.width+k]*N.elements[k*N.width+Col]; } P.elements[Row*N.width+Col] = Pvalue; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
0b5c2eaf60a333df52ced5acc37651fb8d28ba1c.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { //Multiply the two matrices int Row = blockIdx.y*blockDim.y+threadIdx.y; int Col = blockIdx.x*blockDim.x+threadIdx.x; if ((Row < M.width) && (Col < N.height)) { float Pvalue = 0; for (int k = 0; k < MATRIX_SIZE; ++k) { Pvalue += M.elements[Row*M.width+k]*N.elements[k*N.width+Col]; } P.elements[Row*N.width+Col] = Pvalue; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
35635c7df2269f6c12e9c57834605ebc5cc6e6e4.hip
// !!! This is a file automatically generated by hipify!!! #pragma once #include <hip/hip_runtime_api.h> template<unsigned long long k> class BitSequence { public: __host__ BitSequence() { } __host__ BitSequence(char array[]) { hipMemcpy(this->array, array, arSize, hipMemcpyHostToHost); } __host__ __device__ inline char GetBit(unsigned long long index) const { return array[index / 8] >> (index % 8) & 1; } __host__ __device__ inline void SetBit(unsigned long long index, char value) { array[index / 8] = (array[index / 8] & (~(1 << (index % 8)))) | ((!!value) << (index % 8)); } __host__ __device__ inline unsigned int *GetWord32(unsigned long long word_index) { return (unsigned int*)(array + word_index * 32 / 8); } __host__ __device__ inline unsigned long long *GetWord64(unsigned long long word_index) { return (unsigned long long*)(array + word_index * 64 / 8); } static const unsigned long long arSize = (k/64 + (!!(k%64)))*8; private: char array[arSize]; }; /*void f() { BitSequence<1000> bs; BitSequence<1000000> bs2; bs.GetBit(0); bs.SetBit(0, 0); bs.GetWord32(0); bs.GetWord64(0); bs2.GetBit(0); bs2.GetWord32(0); bs2.GetWord64(0); bs2.SetBit(0, 0); }*/
35635c7df2269f6c12e9c57834605ebc5cc6e6e4.cu
#pragma once #include <cuda_runtime_api.h> template<unsigned long long k> class BitSequence { public: __host__ BitSequence() { } __host__ BitSequence(char array[]) { cudaMemcpy(this->array, array, arSize, cudaMemcpyHostToHost); } __host__ __device__ inline char GetBit(unsigned long long index) const { return array[index / 8] >> (index % 8) & 1; } __host__ __device__ inline void SetBit(unsigned long long index, char value) { array[index / 8] = (array[index / 8] & (~(1 << (index % 8)))) | ((!!value) << (index % 8)); } __host__ __device__ inline unsigned int *GetWord32(unsigned long long word_index) { return (unsigned int*)(array + word_index * 32 / 8); } __host__ __device__ inline unsigned long long *GetWord64(unsigned long long word_index) { return (unsigned long long*)(array + word_index * 64 / 8); } static const unsigned long long arSize = (k/64 + (!!(k%64)))*8; private: char array[arSize]; }; /*void f() { BitSequence<1000> bs; BitSequence<1000000> bs2; bs.GetBit(0); bs.SetBit(0, 0); bs.GetWord32(0); bs.GetWord64(0); bs2.GetBit(0); bs2.GetWord32(0); bs2.GetWord64(0); bs2.SetBit(0, 0); }*/
occlusion.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if _WIN32 #pragma warning(disable : 4244) #endif #include "./occlusion.h" #include "../utils/cuda_helper.h" __global__ void occlusionKernel(hipTextureObject_t positions, hipSurfaceObject_t output, bool addToOutputValue, int width, int height, float widthScale, float heightScale, int layerIndex) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; float minTransparency = 0.0f; for (int i = 0; i < widthScale; ++i) { for (int j = 0; j < heightScale; ++j) { float4 color = tex3D<float4>(positions, x * widthScale + 0.5f + i, y * heightScale + 0.5f + j, layerIndex + 0.5f); if (color.w > minTransparency) minTransparency = color.w; } } if (addToOutputValue) { float value; surf2Dread(&value, output, x * sizeof(float), y); float newValue = fmin(value + minTransparency, 1.0f); surf2Dwrite(newValue, output, x * sizeof(float), y); } else { surf2Dwrite(minTransparency, output, x * sizeof(float), y); } } namespace Placement { Occlusion::Occlusion( std::shared_ptr<CudaArrayProvider> colorProvider, std::shared_ptr<CudaArrayProvider> outputProvider, int layerIndex) : colorProvider(colorProvider), outputProvider(outputProvider), layerIndex(layerIndex) { } Occlusion::~Occlusion() { if (positions) hipDestroyTextureObject(positions); if (output) hipDestroySurfaceObject(output); } void Occlusion::addOcclusion() { runKernel(true); } void Occlusion::calculateOcclusion() { runKernel(false); } void Occlusion::runKernel(bool addToOutputValue) { if (!positions) createSurfaceObjects(); float outputWidth = outputProvider->getWidth(); float outputHeight = outputProvider->getHeight(); dim3 dimBlock(32, 32, 1); dim3 dimGrid(divUp(outputWidth, dimBlock.x), divUp(outputHeight, dimBlock.y), 1); float widthScale = colorProvider->getWidth() / outputWidth; float heightScale = colorProvider->getHeight() / outputHeight; hipLaunchKernelGGL(( occlusionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, positions, output, addToOutputValue, outputWidth, outputHeight, widthScale, heightScale, layerIndex); HANDLE_ERROR(hipDeviceSynchronize()); } void Occlusion::createSurfaceObjects() { colorProvider->map(); outputProvider->map(); auto resDesc = colorProvider->getResourceDesc(); struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeWrap; texDesc.addressMode[1] = hipAddressModeWrap; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeElementType; texDesc.normalizedCoords = 0; hipCreateTextureObject(&positions, &resDesc, &texDesc, NULL); auto outputResDesc = outputProvider->getResourceDesc(); hipCreateSurfaceObject(&output, &outputResDesc); colorProvider->unmap(); outputProvider->unmap(); } } // namespace Placement
occlusion.cu
#if _WIN32 #pragma warning(disable : 4244) #endif #include "./occlusion.h" #include "../utils/cuda_helper.h" __global__ void occlusionKernel(cudaTextureObject_t positions, cudaSurfaceObject_t output, bool addToOutputValue, int width, int height, float widthScale, float heightScale, int layerIndex) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; float minTransparency = 0.0f; for (int i = 0; i < widthScale; ++i) { for (int j = 0; j < heightScale; ++j) { float4 color = tex3D<float4>(positions, x * widthScale + 0.5f + i, y * heightScale + 0.5f + j, layerIndex + 0.5f); if (color.w > minTransparency) minTransparency = color.w; } } if (addToOutputValue) { float value; surf2Dread(&value, output, x * sizeof(float), y); float newValue = fmin(value + minTransparency, 1.0f); surf2Dwrite(newValue, output, x * sizeof(float), y); } else { surf2Dwrite(minTransparency, output, x * sizeof(float), y); } } namespace Placement { Occlusion::Occlusion( std::shared_ptr<CudaArrayProvider> colorProvider, std::shared_ptr<CudaArrayProvider> outputProvider, int layerIndex) : colorProvider(colorProvider), outputProvider(outputProvider), layerIndex(layerIndex) { } Occlusion::~Occlusion() { if (positions) cudaDestroyTextureObject(positions); if (output) cudaDestroySurfaceObject(output); } void Occlusion::addOcclusion() { runKernel(true); } void Occlusion::calculateOcclusion() { runKernel(false); } void Occlusion::runKernel(bool addToOutputValue) { if (!positions) createSurfaceObjects(); float outputWidth = outputProvider->getWidth(); float outputHeight = outputProvider->getHeight(); dim3 dimBlock(32, 32, 1); dim3 dimGrid(divUp(outputWidth, dimBlock.x), divUp(outputHeight, dimBlock.y), 1); float widthScale = colorProvider->getWidth() / outputWidth; float heightScale = colorProvider->getHeight() / outputHeight; occlusionKernel<<<dimGrid, dimBlock>>>(positions, output, addToOutputValue, outputWidth, outputHeight, widthScale, heightScale, layerIndex); HANDLE_ERROR(cudaThreadSynchronize()); } void Occlusion::createSurfaceObjects() { colorProvider->map(); outputProvider->map(); auto resDesc = colorProvider->getResourceDesc(); struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeWrap; texDesc.addressMode[1] = cudaAddressModeWrap; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; cudaCreateTextureObject(&positions, &resDesc, &texDesc, NULL); auto outputResDesc = outputProvider->getResourceDesc(); cudaCreateSurfaceObject(&output, &outputResDesc); colorProvider->unmap(); outputProvider->unmap(); } } // namespace Placement
0208d98499a238e44191d20d3c5a8ec38daf8aef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void CopyRectangleKernel( float *src, int srcOffset, int srcWidth, int srcRectX, int srcRectY, int rectWidth, int rectHeight, float *dest, int destOffset, int destWidth, int destRectX, int destRectY ) { int id = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; int size = rectWidth * rectHeight; if (id < size) { int localX = id % rectWidth; int localY = id / rectWidth; int srcPixelX = srcRectX + localX; int srcPixelY = srcRectY + localY; int destPixelX = destRectX + localX; int destPixelY = destRectY + localY; (dest + destOffset)[destPixelX + destPixelY * destWidth] = (src + srcOffset)[srcPixelX + srcPixelY * srcWidth]; } }
0208d98499a238e44191d20d3c5a8ec38daf8aef.cu
#include "includes.h" __global__ void CopyRectangleKernel( float *src, int srcOffset, int srcWidth, int srcRectX, int srcRectY, int rectWidth, int rectHeight, float *dest, int destOffset, int destWidth, int destRectX, int destRectY ) { int id = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; int size = rectWidth * rectHeight; if (id < size) { int localX = id % rectWidth; int localY = id / rectWidth; int srcPixelX = srcRectX + localX; int srcPixelY = srcRectY + localY; int destPixelX = destRectX + localX; int destPixelY = destRectY + localY; (dest + destOffset)[destPixelX + destPixelY * destWidth] = (src + srcOffset)[srcPixelX + srcPixelY * srcWidth]; } }
1146a8d3cfbbcce38678bb4311f02fe6b90ea75d.hip
// !!! This is a file automatically generated by hipify!!! /** * @file test_dynamic_vector.cu * @author Adam Rogowiec * * This file is an integral part of the master thesis entitled: * "Elaboration and implementation in CUDA technology parallel version of * estimation of multidimensional random variable density function ridge * detection algorithm." * , which is conducted under the supervision of prof. dr hab. in. Marek * Nacz. * * Institute of Control and Computation Engineering Faculty of Electronics and * Information Technology Warsaw University of Technology 2016 */ #include "cub/test_util.h" #include "cub/util_debug.cuh" #include "cub/util_device.cuh" #include "rd/gpu/block/block_dynamic_vector.cuh" #include "rd/gpu/util/dev_samples_set.cuh" #include "rd/utils/rd_params.hpp" #include <hip/hip_runtime_api.h> #include <helper_cuda.h> #include <iostream> #include <omp.h> #include <vector> #include <string> #include <cmath> static const int POINTS_PER_THREAD = 4; static const int DIM = 2; static const int SAMPLES_PER_THREAD = POINTS_PER_THREAD * DIM; static const int INIT_VEC_SIZE = 50000; //----------------------------------------------------------------------- // LOAD & STORE //----------------------------------------------------------------------- template < int BLOCK_THREADS, typename VectorT, typename T> __device__ __forceinline__ void loadFullTile( int globalTilePointsOffset, T const * in, VectorT & dynVec) { enum { TILE_SAMPLES = BLOCK_THREADS * SAMPLES_PER_THREAD, }; #ifdef RD_DEBUG if (threadIdx.x == 0) { _CubLog("load ---full--- tile offset: %d\n", globalTilePointsOffset); } #endif in += globalTilePointsOffset * DIM + threadIdx.x * DIM; T samples[POINTS_PER_THREAD][DIM]; T * dynBuffer = dynVec.begin(); #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { #pragma unroll for (int d = 0; d < DIM; ++d) { samples[p][d] = in[p * BLOCK_THREADS * DIM + d]; } } dynVec.resize(dynVec.size() + TILE_SAMPLES); dynBuffer = dynVec.begin(); int x = dynVec.size() + threadIdx.x * DIM; #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { #pragma unrolle for (int d = 0; d < DIM; ++d) { dynBuffer[x + p * BLOCK_THREADS * DIM + d] = samples[p][d]; } } dynVec.incrementItemsCnt(TILE_SAMPLES, true); } template <int BLOCK_THREADS, typename T> __device__ __forceinline__ void storeFullTile( int globalTileOffset, int blockTileOffset, T * out, T * dynBuffer) { // #ifdef RD_DEBUG // if (threadIdx.x == 0) // { // _CubLog("store global --full-- tile offset: %d, block tile offset: %d\n", globalTileOffset, blockTileOffset); // } // #endif out += globalTileOffset * DIM + threadIdx.x * DIM; dynBuffer += blockTileOffset * DIM + threadIdx.x * DIM; #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { #pragma unroll for (int d = 0; d < DIM; ++d) { out[p * BLOCK_THREADS * DIM + d] = dynBuffer[p * BLOCK_THREADS * DIM + d]; } } } template < int BLOCK_THREADS, typename VectorT, typename T> __device__ __forceinline__ void loadPartialTile( int globalTilePointsOffset, int validPoints, T const * in, VectorT & dynVec) { enum { TILE_SAMPLES = BLOCK_THREADS * SAMPLES_PER_THREAD, }; #ifdef RD_DEBUG if (threadIdx.x == 0) { _CubLog("load ---partial--- tile offset: %d, validPoints: %d\n", globalTilePointsOffset, validPoints); } #endif in += globalTilePointsOffset * DIM + threadIdx.x * DIM; T samples[POINTS_PER_THREAD][DIM]; T * dynBuffer = dynVec.begin(); #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { if (p * BLOCK_THREADS + threadIdx.x < validPoints) { #pragma unroll for (int d = 0; d < DIM; ++d) { samples[p][d] = in[p * BLOCK_THREADS * DIM + d]; } } } dynVec.resize(dynVec.size() + validPoints * DIM); dynBuffer = dynVec.begin(); int x = dynVec.size() + threadIdx.x * DIM; #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { if (p * BLOCK_THREADS + threadIdx.x < validPoints) { #pragma unroll for (int d = 0; d < DIM; ++d) { dynBuffer[x + p * BLOCK_THREADS * DIM + d] = samples[p][d]; } } } dynVec.incrementItemsCnt(validPoints, true); } template <int BLOCK_THREADS, typename T> __device__ __forceinline__ void storePartialTile( int globalTileOffset, int blockTileOffset, int validPoints, T * out, T * dynBuffer) { out += globalTileOffset * DIM + threadIdx.x * DIM; dynBuffer += blockTileOffset * DIM + threadIdx.x * DIM; // #ifdef RD_DEBUG // if (threadIdx.x == 0) // { // _CubLog("store global --partial-- tile offset: %d, block tile offset: %d, validPoints: %d \n", globalTileOffset, blockTileOffset, validPoints); // } // #endif #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { if (p * BLOCK_THREADS + threadIdx.x < validPoints) { #pragma unroll for (int d = 0; d < DIM; ++d) { out[p * BLOCK_THREADS * DIM + d] = dynBuffer[p * BLOCK_THREADS * DIM + d]; } } } } template <int BLOCK_THREADS, typename T> static __global__ void kernel(T const *in, T *out, int size) { enum { TILE_POINTS = BLOCK_THREADS * POINTS_PER_THREAD, }; const int tileCount = (size + TILE_POINTS - 1) / TILE_POINTS; typedef rd::gpu::BlockDynamicVector< BLOCK_THREADS, SAMPLES_PER_THREAD, T, 8, float> VectorT; // VectorT dynVec(INIT_VEC_SIZE, 2); VectorT dynVec({0.01f, 0.025f, 0.05f, 0.1f, 0.15f, 0.25f, 0.66f, 1.0f}, (float)size * DIM); // VectorT dynVec({ (unsigned int)(0.1f * size * DIM), // (unsigned int)(0.25f * size * DIM), // (unsigned int)(0.66f * size * DIM), // (unsigned int)(1.0f * size * DIM)}); for (int t = blockIdx.x; t < tileCount; t += gridDim.x) { int globalTileOffset = t * TILE_POINTS; if (globalTileOffset + TILE_POINTS > size) { loadPartialTile<BLOCK_THREADS>(globalTileOffset, size - globalTileOffset, in, dynVec); } else { loadFullTile<BLOCK_THREADS>(globalTileOffset, in, dynVec); } } __syncthreads(); // dynVec.print(">>> Tiles loaded.", 0); for (int t = blockIdx.x, k = 0; t < tileCount; t += gridDim.x, ++k) { int globalTileOffset = t * TILE_POINTS; int blockTileOffset = k * TILE_POINTS; if (globalTileOffset + TILE_POINTS > size) { storePartialTile<BLOCK_THREADS>(globalTileOffset, blockTileOffset, size - globalTileOffset, out, dynVec.begin()); } else { storeFullTile<BLOCK_THREADS>(globalTileOffset, blockTileOffset, out, dynVec.begin()); } } __syncthreads(); dynVec.clear(); } //----------------------------------------------------------------------- // TEST //----------------------------------------------------------------------- template <typename T> void test() { rd::RDParams<T> rdp; rd::RDSpiralParams<T> rds; rdp.dim = DIM; // rdp.np = size_t(2 * 1e4); // rds.a = 35; // rds.b = 25; // rds.sigma = 8; rds.loadFromFile = true; // rds.file = "segment6D_50K.txt"; // rds.file = "segment6D_20K_ones.txt"; // rds.file = "spiral3D_20K.txt"; rds.file = "spiral3D_1M.txt"; // rds.file = "spiral3D_100K.txt"; std::cout << "generate data.. " << std::endl; std::vector<std::string> samplesDir{"../../examples/data/nd_segments/", "../../examples/data/spirals/"}; rd::gpu::Samples<T> d_samplesSet(rdp, rds, samplesDir, DIM); checkCudaErrors(hipDeviceSynchronize()); unsigned int nPoints = rdp.np; unsigned int nElements = nPoints * DIM; std::cout << "nPoints: " << nPoints; std::cout << "\nnElements: " << nElements; std::cout << "\nmemSize: " << nElements * sizeof(T) / 1024.0 / 1024.0 << "(MB)\n"; T *d_in, *h_in; T *d_out, *h_out; GpuTimer timer; std::cout << "allocate and copy data... " << std::endl; h_in = new T[nElements]; h_out = new T[nElements]; checkCudaErrors(hipMalloc((void**)&d_in, nElements * sizeof(T))); checkCudaErrors(hipMalloc((void**)&d_out, nElements * sizeof(T))); checkCudaErrors(hipMemcpy(d_in, d_samplesSet.samples_, nElements * sizeof(T), hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(h_in, d_samplesSet.samples_, nElements * sizeof(T), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemset(d_out, 0, nElements * sizeof(T))); checkCudaErrors(hipDeviceSynchronize()); dim3 gridSize(1); const int blockThreads = 64; const int iterations = 100; // gridSize.x = 4; int deviceOrdinal; checkCudaErrors(hipGetDevice(&deviceOrdinal)); // Get SM count int smCount; checkCudaErrors(hipDeviceGetAttribute(&smCount, hipDeviceAttributeMultiprocessorCount, deviceOrdinal)); typedef void (*KernelPtrT)(T const *, T *, int); KernelPtrT kernelPtr = kernel<blockThreads>; // get SM occupancy int smOccupancy; checkCudaErrors(cub::MaxSmOccupancy( smOccupancy, kernelPtr, const_cast<int&>(blockThreads)) ); gridSize.x = smCount * smOccupancy; printf("smCount: %d, smOccupancy: %d\n", smCount, smOccupancy); checkCudaErrors(hipDeviceSetLimit(hipLimitMallocHeapSize, nElements * 0.5f * gridSize.x * sizeof(T))); size_t deviceHeapSize = 0; checkCudaErrors(hipDeviceGetLimit(&deviceHeapSize, hipLimitMallocHeapSize)); std::cout << "-- Device malloc heap size: " << deviceHeapSize / 1024 / 1024 << "(MB)\n"; float avgMilis, gigaRate, gigaBandwidth; bool success = true; // check correctnes iteration printf("invoke correctness check, kernel<<<%d, %d>>>\n", gridSize.x, blockThreads); hipLaunchKernelGGL(( kernel<blockThreads>), dim3(gridSize), dim3(blockThreads), 0, 0, d_in, d_out, nPoints); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(h_out, d_out, nElements * sizeof(T), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); #pragma omp parallel for schedule(static) for (unsigned int k = 0; k < nElements; ++k) { T value = h_in[k]; if (h_out[k] != value) { success = false; printf("ERROR! is h_out[%d]: %f ---> should be: %f\n", k, h_out[k], value); } } if (success) { std::cout << "\tSUCCESS!\n"; } else { std::cout << "---- INCORECT RESULTS!----- " << std::endl; // clean-up delete[] h_in; delete[] h_out; checkCudaErrors(hipFree(d_in)); checkCudaErrors(hipFree(d_out)); exit(1); } #if !defined(RD_DEBUG) && !defined(RD_PROFILE) std::cout << "Measure performance... " << std::endl; // warm-up hipLaunchKernelGGL(( kernel<blockThreads>), dim3(gridSize), dim3(blockThreads), 0, 0, d_in, d_out, nPoints); timer.Start(); for (int i = 0; i < iterations; ++i) { hipLaunchKernelGGL(( kernel<blockThreads>), dim3(gridSize), dim3(blockThreads), 0, 0, d_in, d_out, nPoints); checkCudaErrors(hipGetLastError()); } timer.Stop(); avgMilis = timer.ElapsedMillis() / float(iterations); checkCudaErrors(hipDeviceSynchronize()); gigaRate = nElements * 4 / avgMilis / 1000.0 / 1000.0; gigaBandwidth = gigaRate * sizeof(T); printf("----- avgMilis: %f, gigaBandwidth: %f\n", avgMilis, gigaBandwidth); std::cout.flush(); #endif // clean-up delete[] h_in; delete[] h_out; checkCudaErrors(hipFree(d_in)); checkCudaErrors(hipFree(d_out)); } int main() { checkCudaErrors(deviceInit()); test<float>(); checkCudaErrors(hipDeviceReset()); return 0; }
1146a8d3cfbbcce38678bb4311f02fe6b90ea75d.cu
/** * @file test_dynamic_vector.cu * @author Adam Rogowiec * * This file is an integral part of the master thesis entitled: * "Elaboration and implementation in CUDA technology parallel version of * estimation of multidimensional random variable density function ridge * detection algorithm." * , which is conducted under the supervision of prof. dr hab. inż. Marek * Nałęcz. * * Institute of Control and Computation Engineering Faculty of Electronics and * Information Technology Warsaw University of Technology 2016 */ #include "cub/test_util.h" #include "cub/util_debug.cuh" #include "cub/util_device.cuh" #include "rd/gpu/block/block_dynamic_vector.cuh" #include "rd/gpu/util/dev_samples_set.cuh" #include "rd/utils/rd_params.hpp" #include <cuda_runtime_api.h> #include <helper_cuda.h> #include <iostream> #include <omp.h> #include <vector> #include <string> #include <cmath> static const int POINTS_PER_THREAD = 4; static const int DIM = 2; static const int SAMPLES_PER_THREAD = POINTS_PER_THREAD * DIM; static const int INIT_VEC_SIZE = 50000; //----------------------------------------------------------------------- // LOAD & STORE //----------------------------------------------------------------------- template < int BLOCK_THREADS, typename VectorT, typename T> __device__ __forceinline__ void loadFullTile( int globalTilePointsOffset, T const * in, VectorT & dynVec) { enum { TILE_SAMPLES = BLOCK_THREADS * SAMPLES_PER_THREAD, }; #ifdef RD_DEBUG if (threadIdx.x == 0) { _CubLog("load ---full--- tile offset: %d\n", globalTilePointsOffset); } #endif in += globalTilePointsOffset * DIM + threadIdx.x * DIM; T samples[POINTS_PER_THREAD][DIM]; T * dynBuffer = dynVec.begin(); #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { #pragma unroll for (int d = 0; d < DIM; ++d) { samples[p][d] = in[p * BLOCK_THREADS * DIM + d]; } } dynVec.resize(dynVec.size() + TILE_SAMPLES); dynBuffer = dynVec.begin(); int x = dynVec.size() + threadIdx.x * DIM; #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { #pragma unrolle for (int d = 0; d < DIM; ++d) { dynBuffer[x + p * BLOCK_THREADS * DIM + d] = samples[p][d]; } } dynVec.incrementItemsCnt(TILE_SAMPLES, true); } template <int BLOCK_THREADS, typename T> __device__ __forceinline__ void storeFullTile( int globalTileOffset, int blockTileOffset, T * out, T * dynBuffer) { // #ifdef RD_DEBUG // if (threadIdx.x == 0) // { // _CubLog("store global --full-- tile offset: %d, block tile offset: %d\n", globalTileOffset, blockTileOffset); // } // #endif out += globalTileOffset * DIM + threadIdx.x * DIM; dynBuffer += blockTileOffset * DIM + threadIdx.x * DIM; #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { #pragma unroll for (int d = 0; d < DIM; ++d) { out[p * BLOCK_THREADS * DIM + d] = dynBuffer[p * BLOCK_THREADS * DIM + d]; } } } template < int BLOCK_THREADS, typename VectorT, typename T> __device__ __forceinline__ void loadPartialTile( int globalTilePointsOffset, int validPoints, T const * in, VectorT & dynVec) { enum { TILE_SAMPLES = BLOCK_THREADS * SAMPLES_PER_THREAD, }; #ifdef RD_DEBUG if (threadIdx.x == 0) { _CubLog("load ---partial--- tile offset: %d, validPoints: %d\n", globalTilePointsOffset, validPoints); } #endif in += globalTilePointsOffset * DIM + threadIdx.x * DIM; T samples[POINTS_PER_THREAD][DIM]; T * dynBuffer = dynVec.begin(); #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { if (p * BLOCK_THREADS + threadIdx.x < validPoints) { #pragma unroll for (int d = 0; d < DIM; ++d) { samples[p][d] = in[p * BLOCK_THREADS * DIM + d]; } } } dynVec.resize(dynVec.size() + validPoints * DIM); dynBuffer = dynVec.begin(); int x = dynVec.size() + threadIdx.x * DIM; #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { if (p * BLOCK_THREADS + threadIdx.x < validPoints) { #pragma unroll for (int d = 0; d < DIM; ++d) { dynBuffer[x + p * BLOCK_THREADS * DIM + d] = samples[p][d]; } } } dynVec.incrementItemsCnt(validPoints, true); } template <int BLOCK_THREADS, typename T> __device__ __forceinline__ void storePartialTile( int globalTileOffset, int blockTileOffset, int validPoints, T * out, T * dynBuffer) { out += globalTileOffset * DIM + threadIdx.x * DIM; dynBuffer += blockTileOffset * DIM + threadIdx.x * DIM; // #ifdef RD_DEBUG // if (threadIdx.x == 0) // { // _CubLog("store global --partial-- tile offset: %d, block tile offset: %d, validPoints: %d \n", globalTileOffset, blockTileOffset, validPoints); // } // #endif #pragma unroll for (int p = 0; p < POINTS_PER_THREAD; ++p) { if (p * BLOCK_THREADS + threadIdx.x < validPoints) { #pragma unroll for (int d = 0; d < DIM; ++d) { out[p * BLOCK_THREADS * DIM + d] = dynBuffer[p * BLOCK_THREADS * DIM + d]; } } } } template <int BLOCK_THREADS, typename T> static __global__ void kernel(T const *in, T *out, int size) { enum { TILE_POINTS = BLOCK_THREADS * POINTS_PER_THREAD, }; const int tileCount = (size + TILE_POINTS - 1) / TILE_POINTS; typedef rd::gpu::BlockDynamicVector< BLOCK_THREADS, SAMPLES_PER_THREAD, T, 8, float> VectorT; // VectorT dynVec(INIT_VEC_SIZE, 2); VectorT dynVec({0.01f, 0.025f, 0.05f, 0.1f, 0.15f, 0.25f, 0.66f, 1.0f}, (float)size * DIM); // VectorT dynVec({ (unsigned int)(0.1f * size * DIM), // (unsigned int)(0.25f * size * DIM), // (unsigned int)(0.66f * size * DIM), // (unsigned int)(1.0f * size * DIM)}); for (int t = blockIdx.x; t < tileCount; t += gridDim.x) { int globalTileOffset = t * TILE_POINTS; if (globalTileOffset + TILE_POINTS > size) { loadPartialTile<BLOCK_THREADS>(globalTileOffset, size - globalTileOffset, in, dynVec); } else { loadFullTile<BLOCK_THREADS>(globalTileOffset, in, dynVec); } } __syncthreads(); // dynVec.print(">>> Tiles loaded.", 0); for (int t = blockIdx.x, k = 0; t < tileCount; t += gridDim.x, ++k) { int globalTileOffset = t * TILE_POINTS; int blockTileOffset = k * TILE_POINTS; if (globalTileOffset + TILE_POINTS > size) { storePartialTile<BLOCK_THREADS>(globalTileOffset, blockTileOffset, size - globalTileOffset, out, dynVec.begin()); } else { storeFullTile<BLOCK_THREADS>(globalTileOffset, blockTileOffset, out, dynVec.begin()); } } __syncthreads(); dynVec.clear(); } //----------------------------------------------------------------------- // TEST //----------------------------------------------------------------------- template <typename T> void test() { rd::RDParams<T> rdp; rd::RDSpiralParams<T> rds; rdp.dim = DIM; // rdp.np = size_t(2 * 1e4); // rds.a = 35; // rds.b = 25; // rds.sigma = 8; rds.loadFromFile = true; // rds.file = "segment6D_50K.txt"; // rds.file = "segment6D_20K_ones.txt"; // rds.file = "spiral3D_20K.txt"; rds.file = "spiral3D_1M.txt"; // rds.file = "spiral3D_100K.txt"; std::cout << "generate data.. " << std::endl; std::vector<std::string> samplesDir{"../../examples/data/nd_segments/", "../../examples/data/spirals/"}; rd::gpu::Samples<T> d_samplesSet(rdp, rds, samplesDir, DIM); checkCudaErrors(cudaDeviceSynchronize()); unsigned int nPoints = rdp.np; unsigned int nElements = nPoints * DIM; std::cout << "nPoints: " << nPoints; std::cout << "\nnElements: " << nElements; std::cout << "\nmemSize: " << nElements * sizeof(T) / 1024.0 / 1024.0 << "(MB)\n"; T *d_in, *h_in; T *d_out, *h_out; GpuTimer timer; std::cout << "allocate and copy data... " << std::endl; h_in = new T[nElements]; h_out = new T[nElements]; checkCudaErrors(cudaMalloc((void**)&d_in, nElements * sizeof(T))); checkCudaErrors(cudaMalloc((void**)&d_out, nElements * sizeof(T))); checkCudaErrors(cudaMemcpy(d_in, d_samplesSet.samples_, nElements * sizeof(T), cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(h_in, d_samplesSet.samples_, nElements * sizeof(T), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemset(d_out, 0, nElements * sizeof(T))); checkCudaErrors(cudaDeviceSynchronize()); dim3 gridSize(1); const int blockThreads = 64; const int iterations = 100; // gridSize.x = 4; int deviceOrdinal; checkCudaErrors(cudaGetDevice(&deviceOrdinal)); // Get SM count int smCount; checkCudaErrors(cudaDeviceGetAttribute(&smCount, cudaDevAttrMultiProcessorCount, deviceOrdinal)); typedef void (*KernelPtrT)(T const *, T *, int); KernelPtrT kernelPtr = kernel<blockThreads>; // get SM occupancy int smOccupancy; checkCudaErrors(cub::MaxSmOccupancy( smOccupancy, kernelPtr, const_cast<int&>(blockThreads)) ); gridSize.x = smCount * smOccupancy; printf("smCount: %d, smOccupancy: %d\n", smCount, smOccupancy); checkCudaErrors(cudaDeviceSetLimit(cudaLimitMallocHeapSize, nElements * 0.5f * gridSize.x * sizeof(T))); size_t deviceHeapSize = 0; checkCudaErrors(cudaDeviceGetLimit(&deviceHeapSize, cudaLimitMallocHeapSize)); std::cout << "-- Device malloc heap size: " << deviceHeapSize / 1024 / 1024 << "(MB)\n"; float avgMilis, gigaRate, gigaBandwidth; bool success = true; // check correctnes iteration printf("invoke correctness check, kernel<<<%d, %d>>>\n", gridSize.x, blockThreads); kernel<blockThreads><<<gridSize, blockThreads>>>(d_in, d_out, nPoints); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(h_out, d_out, nElements * sizeof(T), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); #pragma omp parallel for schedule(static) for (unsigned int k = 0; k < nElements; ++k) { T value = h_in[k]; if (h_out[k] != value) { success = false; printf("ERROR! is h_out[%d]: %f ---> should be: %f\n", k, h_out[k], value); } } if (success) { std::cout << "\tSUCCESS!\n"; } else { std::cout << "---- INCORECT RESULTS!----- " << std::endl; // clean-up delete[] h_in; delete[] h_out; checkCudaErrors(cudaFree(d_in)); checkCudaErrors(cudaFree(d_out)); exit(1); } #if !defined(RD_DEBUG) && !defined(RD_PROFILE) std::cout << "Measure performance... " << std::endl; // warm-up kernel<blockThreads><<<gridSize, blockThreads>>>(d_in, d_out, nPoints); timer.Start(); for (int i = 0; i < iterations; ++i) { kernel<blockThreads><<<gridSize, blockThreads>>>(d_in, d_out, nPoints); checkCudaErrors(cudaGetLastError()); } timer.Stop(); avgMilis = timer.ElapsedMillis() / float(iterations); checkCudaErrors(cudaDeviceSynchronize()); gigaRate = nElements * 4 / avgMilis / 1000.0 / 1000.0; gigaBandwidth = gigaRate * sizeof(T); printf("----- avgMilis: %f, gigaBandwidth: %f\n", avgMilis, gigaBandwidth); std::cout.flush(); #endif // clean-up delete[] h_in; delete[] h_out; checkCudaErrors(cudaFree(d_in)); checkCudaErrors(cudaFree(d_out)); } int main() { checkCudaErrors(deviceInit()); test<float>(); checkCudaErrors(cudaDeviceReset()); return 0; }
4bf41986d67667b5052e3f80dfbe3013a6a453d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <benchmark/benchmark.h> #include <hipcub/hipcub.hpp> #include <cuda/std/atomic> #include <iostream> #include <new> #include <thrust/iterator/iterator_traits.h> template <std::size_t block_size, typename T> __global__ void simple_reduction(std::size_t input_size, T *global_result) { auto tid = threadIdx.x + blockIdx.x * gridDim.x; T thread_data{}; while (tid < input_size) { ++thread_data; tid += blockDim.x * gridDim.x; } using BlockReduce = hipcub::BlockReduce<T, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; T block_result = BlockReduce(temp_storage).Sum(thread_data); if (threadIdx.x == 0) atomicAdd(global_result, block_result); } template <typename T> __global__ void zero(T *count) { new (count) T{0}; } template <std::size_t block_size, typename T> __global__ void std_reduction( std::size_t input_size, cuda::atomic<T, cuda::thread_scope_system> *global_result, cuda::atomic<T, cuda::thread_scope_device> *device_result, cuda::atomic<unsigned int, cuda::thread_scope_device> *atomic_count) { auto tid = threadIdx.x + blockIdx.x * gridDim.x; T thread_data{}; while (tid < input_size) { ++thread_data; tid += blockDim.x * gridDim.x; } using BlockReduce = hipcub::BlockReduce<T, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; T block_result = BlockReduce(temp_storage).Sum(thread_data); bool is_last_block_done = false; if (threadIdx.x == 0) { device_result->fetch_add(block_result, cuda::std::memory_order_relaxed); unsigned value = atomic_count->fetch_add(1, cuda::memory_order_release); is_last_block_done = value == (gridDim.x - 1); } if (is_last_block_done) { // copy result to global buffer if (threadIdx.x == 0) { global_result->store(device_result->load(cuda::std::memory_order_relaxed), cuda::std::memory_order_relaxed); device_result->store( 0, cuda::std::memory_order_relaxed); // set to zero for next time atomic_count->store( 0, cuda::std::memory_order_relaxed); // set to zero for next time } } } static void generate_size(benchmark::internal::Benchmark *b) { constexpr auto multiplier{10}; constexpr auto min{10'000}; constexpr auto max{100'000'000}; for (auto size = min; size <= max; size *= multiplier) { b->Args({size}); } } template <typename T> static void BM_std_pinned_memory(::benchmark::State &state) { auto size = state.range(0); using count_t = cuda::atomic<unsigned int, cuda::thread_scope_device>; count_t *atomic_count; hipMalloc(&atomic_count, sizeof(count_t)); hipLaunchKernelGGL(( zero), dim3(1), dim3(1), 0, 0, atomic_count); using device_atomic = cuda::atomic<T, cuda::thread_scope_device>; device_atomic *d_result{}; hipMalloc(&d_result, sizeof(device_atomic)); hipLaunchKernelGGL(( zero), dim3(1), dim3(1), 0, 0, d_result); using system_atomic = cuda::atomic<T, cuda::thread_scope_system>; system_atomic *hd_result{}; hipHostMalloc(&hd_result, sizeof(system_atomic)); T h_result{}; constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *hd_result = 0; hipLaunchKernelGGL(( std_reduction<block_size>) , dim3(block_size), dim3(grid_size), 0, 0, size, hd_result, d_result, atomic_count); while (!hd_result->load(cuda::memory_order_acquire)) ; benchmark::DoNotOptimize(h_result = *hd_result); } hipHostFree(hd_result); hipFree(d_result); hipFree(atomic_count); } BENCHMARK_TEMPLATE(BM_std_pinned_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_device_memory(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; hipMalloc(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { hipMemset(d_result, 0, sizeof(T)); hipLaunchKernelGGL(( simple_reduction<block_size>), dim3(block_size), dim3(grid_size), 0, 0, size, d_result); hipMemcpy(&h_result, d_result, sizeof(h_result), hipMemcpyDefault); } hipFree(d_result); } BENCHMARK_TEMPLATE(BM_device_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_managed_memory(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; hipMallocManaged(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *d_result = 0; hipLaunchKernelGGL(( simple_reduction<block_size>), dim3(block_size), dim3(grid_size), 0, 0, size, d_result); hipDeviceSynchronize(); benchmark::DoNotOptimize(h_result = *d_result); } hipFree(d_result); } BENCHMARK_TEMPLATE(BM_managed_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_managed_memory_prefetch(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; hipMallocManaged(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *d_result = 0; hipMemPrefetchAsync(d_result, sizeof(T), 0); hipLaunchKernelGGL(( simple_reduction<block_size>), dim3(block_size), dim3(grid_size), 0, 0, size, d_result); hipDeviceSynchronize(); benchmark::DoNotOptimize(h_result = *d_result); } hipFree(d_result); } BENCHMARK_TEMPLATE(BM_managed_memory_prefetch, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_pinned_memory(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; hipHostMalloc(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *d_result = 0; hipLaunchKernelGGL(( simple_reduction<block_size>), dim3(block_size), dim3(grid_size), 0, 0, size, d_result); hipDeviceSynchronize(); benchmark::DoNotOptimize(h_result = *d_result); } hipHostFree(d_result); } BENCHMARK_TEMPLATE(BM_pinned_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond);
4bf41986d67667b5052e3f80dfbe3013a6a453d7.cu
#include <benchmark/benchmark.h> #include <cub/cub.cuh> #include <cuda/std/atomic> #include <iostream> #include <new> #include <thrust/iterator/iterator_traits.h> template <std::size_t block_size, typename T> __global__ void simple_reduction(std::size_t input_size, T *global_result) { auto tid = threadIdx.x + blockIdx.x * gridDim.x; T thread_data{}; while (tid < input_size) { ++thread_data; tid += blockDim.x * gridDim.x; } using BlockReduce = cub::BlockReduce<T, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; T block_result = BlockReduce(temp_storage).Sum(thread_data); if (threadIdx.x == 0) atomicAdd(global_result, block_result); } template <typename T> __global__ void zero(T *count) { new (count) T{0}; } template <std::size_t block_size, typename T> __global__ void std_reduction( std::size_t input_size, cuda::atomic<T, cuda::thread_scope_system> *global_result, cuda::atomic<T, cuda::thread_scope_device> *device_result, cuda::atomic<unsigned int, cuda::thread_scope_device> *atomic_count) { auto tid = threadIdx.x + blockIdx.x * gridDim.x; T thread_data{}; while (tid < input_size) { ++thread_data; tid += blockDim.x * gridDim.x; } using BlockReduce = cub::BlockReduce<T, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; T block_result = BlockReduce(temp_storage).Sum(thread_data); bool is_last_block_done = false; if (threadIdx.x == 0) { device_result->fetch_add(block_result, cuda::std::memory_order_relaxed); unsigned value = atomic_count->fetch_add(1, cuda::memory_order_release); is_last_block_done = value == (gridDim.x - 1); } if (is_last_block_done) { // copy result to global buffer if (threadIdx.x == 0) { global_result->store(device_result->load(cuda::std::memory_order_relaxed), cuda::std::memory_order_relaxed); device_result->store( 0, cuda::std::memory_order_relaxed); // set to zero for next time atomic_count->store( 0, cuda::std::memory_order_relaxed); // set to zero for next time } } } static void generate_size(benchmark::internal::Benchmark *b) { constexpr auto multiplier{10}; constexpr auto min{10'000}; constexpr auto max{100'000'000}; for (auto size = min; size <= max; size *= multiplier) { b->Args({size}); } } template <typename T> static void BM_std_pinned_memory(::benchmark::State &state) { auto size = state.range(0); using count_t = cuda::atomic<unsigned int, cuda::thread_scope_device>; count_t *atomic_count; cudaMalloc(&atomic_count, sizeof(count_t)); zero<<<1, 1>>>(atomic_count); using device_atomic = cuda::atomic<T, cuda::thread_scope_device>; device_atomic *d_result{}; cudaMalloc(&d_result, sizeof(device_atomic)); zero<<<1, 1>>>(d_result); using system_atomic = cuda::atomic<T, cuda::thread_scope_system>; system_atomic *hd_result{}; cudaMallocHost(&hd_result, sizeof(system_atomic)); T h_result{}; constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *hd_result = 0; std_reduction<block_size> <<<block_size, grid_size>>>(size, hd_result, d_result, atomic_count); while (!hd_result->load(cuda::memory_order_acquire)) ; benchmark::DoNotOptimize(h_result = *hd_result); } cudaFreeHost(hd_result); cudaFree(d_result); cudaFree(atomic_count); } BENCHMARK_TEMPLATE(BM_std_pinned_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_device_memory(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; cudaMalloc(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { cudaMemset(d_result, 0, sizeof(T)); simple_reduction<block_size><<<block_size, grid_size>>>(size, d_result); cudaMemcpy(&h_result, d_result, sizeof(h_result), cudaMemcpyDefault); } cudaFree(d_result); } BENCHMARK_TEMPLATE(BM_device_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_managed_memory(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; cudaMallocManaged(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *d_result = 0; simple_reduction<block_size><<<block_size, grid_size>>>(size, d_result); cudaDeviceSynchronize(); benchmark::DoNotOptimize(h_result = *d_result); } cudaFree(d_result); } BENCHMARK_TEMPLATE(BM_managed_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_managed_memory_prefetch(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; cudaMallocManaged(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *d_result = 0; cudaMemPrefetchAsync(d_result, sizeof(T), 0); simple_reduction<block_size><<<block_size, grid_size>>>(size, d_result); cudaDeviceSynchronize(); benchmark::DoNotOptimize(h_result = *d_result); } cudaFree(d_result); } BENCHMARK_TEMPLATE(BM_managed_memory_prefetch, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond); template <typename T> static void BM_pinned_memory(::benchmark::State &state) { auto size = state.range(0); T *d_result{}; T h_result{}; cudaMallocHost(&d_result, sizeof(T)); constexpr std::size_t block_size{256}; auto grid_size = (size + block_size + 1) / size; for (auto _ : state) { *d_result = 0; simple_reduction<block_size><<<block_size, grid_size>>>(size, d_result); cudaDeviceSynchronize(); benchmark::DoNotOptimize(h_result = *d_result); } cudaFreeHost(d_result); } BENCHMARK_TEMPLATE(BM_pinned_memory, int) ->Apply(generate_size) ->Unit(benchmark::kMicrosecond);
ea49d4b13fb91d3e233bb48f87257000daae84d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = ScalarConvert<half, accreal>::to( THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1)); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->dim() != 2) || (vec->dim() != 1) ) THError("2D tensor and 1D tensor expected, got %dD, %dD tensors", mat->dim(), vec->dim()); if( mat->size(1) != vec->size(0) ) THError("size mismatch"); if(t->dim() != 1) THError("size mismatch"); if(t->size(0) != mat->size(0)) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(mat->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else if(mat->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #endif THCTensor_(free)(state, cmat); } // In hipblasSgemv, hipblasDgemv (x,0).mv(0) does not // handle beta, whereas hipblasSgemm, hipblasDgemm do for case where (x,0).mm(0,y). if (vec->size(0) == 0 && mat->size(0) != 0) { if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vecAsMatrix->size(0), 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, tAsMatrix->size(0), 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, r_->size(0)); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (vec1->dim() != 1) || (vec2->dim() != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", vec1->dim(), vec2->dim()); } if (t->dim() != 2) { THError("size mismatch"); } if ( (t->size(0) != vec1->size(0)) || (t->size(1) != vec2->size(0)) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1->size(0), vec2->size(0), alpha, THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, r_), r_->stride(1)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1->size(0), vec2->size(0), alpha, THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, r_), r_->stride(1)); #endif } else if(r_->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, cr), cr->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, cr), cr->stride(0)); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2T->size(0), 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1M->size(0), 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->dim() != 2) || (m2->dim() != 2) ) THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim()); if(t->dim() != 2) THError("2D tensor expected, got %dD tensor for t", t->dim()); if(m1->size(1) != m2->size(0)) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } /* r_ */ if(r_->stride(0) == 1 && r_->stride(1) != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride(1) == 1 && r_->stride(0) != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m1->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m1->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m2->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m2->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffer2, const real ** buffer3, real* data1, real * data2, real * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride(1) == 1) { transpose_result = false; result_ = result; ldc = result_->stride(2); } else if (result->stride(2) == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride(1); } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride(2); } if (batch1->stride(transpose_result ? 2 : 1) == 1 && batch1->stride(transpose_result ? 1 : 2) != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 1 : 2); } else if (batch1->stride(transpose_result ? 1 : 2) == 1 && batch1->stride(transpose_result ? 2 : 1) != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 2 : 1); } else { transpose_batch1 = transpose_result ? 'n' : 't'; // batch1_ is later freed if batch1_ != batch1 if (THCTensor_(isContiguous)(state, batch1)) { batch1_ = batch1; } else { batch1_ = THCTensor_(newContiguous)(state, batch1); } lda = batch1_->stride(1); } if (batch2->stride(transpose_result ? 2 : 1) == 1 && batch2->stride(transpose_result ? 1 : 2) != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 1 : 2); } else if (batch2->stride(transpose_result ? 1 : 2) == 1 && batch2->stride(transpose_result ? 2 : 1) != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 2 : 1); } else { transpose_batch2 = transpose_result ? 'n' : 't'; // batch2_ is later freed if batch2_ != batch2 if (THCTensor_(isContiguous)(state, batch2)) { batch2_ = batch2; } else { batch2_ = THCTensor_(newContiguous)(state, batch2); } ldb = batch2_->stride(1); } int64_t num_batches = result_->size(0); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if TORCH_HIP_VERSION < 8000 size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_matrices1 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_matrices2 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_result_matrices = static_cast<real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer3), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_matrices1, d_matrices2, (const real**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #endif //THC_REAL #endif //TORCH_HIP_VERSION #elif defined(THC_REAL_IS_HALF) #if TORCH_HIP_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } #else hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } } #endif #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 2, ra_, a)); THArgCheck(THCTensor_(nDimension)(state, a) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, a, 1) == THCTensor_(size)(state, a, 2), 3, "matrices must be square"); if (ra_ != a) { THCTensor_(resizeAs)(state, ra_, a); if (ra_->stride(2) == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 2); } THCTensor_(copy)(state, ra_, a); } int n = a->size(1); int lda; THCTensor *ra__; if (ra_->stride(1) == 1) { // column ordered, what BLAS wants lda = ra_->stride(2); ra__ = ra_; } else { // not column ordered, need to make it such (requires copy) THCTensor *transp_r_ = THCTensor_(newTranspose)(state, ra_, 1, 2); ra__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, ra__, NULL, 1, 2); lda = ra__->stride(2); } int64_t num_batches = ra__->size(0); if (!pivot) { THCudaIntTensor *t = THCudaIntTensor_new(state); THCudaIntTensor_range(state, t, 1, n, 1); THCudaIntTensor_unsqueeze1d(state, t, t, 0); THCudaIntTensor** ptrs = (THCudaIntTensor**) THAlloc(sizeof(THCudaIntTensor*)*num_batches); for (int64_t i=0; i<num_batches; i++) { ptrs[i] = t; } THCudaIntTensor_catArray(state, rpivots_, ptrs, num_batches, 0); THCudaIntTensor_free(state, t); THFree(ptrs); } else { THCudaIntTensor_resize2d(state, rpivots_, num_batches, n); } bool free_rinfo_ = !rinfo_; if (rinfo_ == NULL) rinfo_ = THCudaIntTensor_new(state); THCudaIntTensor_resize1d(state, rinfo_, num_batches); int *info_gpu = THCudaIntTensor_data(state, rinfo_); // Copy pointers to device. size_t matrices_size = num_batches * sizeof(real*); auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); if (num_batches > 0) { const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), (const real**)d_result, THCTensor_(data)(state, ra__), ra__->stride(0), num_batches); } int *pivots_gpu = NULL; if (pivot) { pivots_gpu = THCudaIntTensor_data(state, rpivots_); } #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #endif THCudaFree(state, d_result); if (ra__ != ra_) { THCTensor_(freeCopyTo)(state, ra__, ra_); } if (free_rinfo_) { if(THCTensor_nElement(state, rinfo_) != 0) { int min = THCudaIntTensor_minall(state, rinfo_); int max = THCudaIntTensor_maxall(state, rinfo_); THCudaIntTensor_free(state, rinfo_); if (min != 0 || max != 0) { THError("failed to factorize some batch elements (min info == %d, max info == %d)", min, max); } } else { THCudaIntTensor_free(state, rinfo_); } } #else THError("btrifact for CUDA tensors is only supported for floats and doubles"); #endif } THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *atf, THCudaIntTensor *pivots) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 3, rb_, atf, b)); THArgCheck(THCTensor_(_nDimension)(state, atf) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(_nDimension)(state, b) == 3 || THCTensor_(_nDimension)(state, b) == 2, 4, "expected 2D or 3D tensor"); THArgCheck(THCTensor_(size)(state, atf, 0) == THCTensor_(size)(state, b, 0), 3, "number of batches must be equal"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, atf, 2), 3, "A matrices must be square"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, b, 1), 3, "dimensions of A and b must be equal"); if (rb_ != b) { THCTensor_(resizeAs)(state, rb_, b); THCTensor_(copy)(state, rb_, b); } int n = atf->size(1); int nrhs = rb_->_dim() > 2 ? rb_->size(2) : 1; THCTensor *atf_; THCTensor *rb__; int lda, ldb; // correct ordering of A_tf if (atf->stride(1) == 1) { // column ordered, what BLAS wants lda = atf->stride(2); atf_ = atf; } else { // not column ordered, need to make it such (requires copy) // it would be nice if we could use the op(A) flags to automatically // transpose A if needed, but this leads to unpredictable behavior if the // user clones A_tf later with a different ordering THCTensor *transp_r_ = THCTensor_(newTranspose)(state, atf, 1, 2); atf_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, atf_, NULL, 1, 2); lda = atf_->stride(2); } // correct ordering of B if (rb_->stride(1) == 1) { // column ordered if (rb_->_dim() == 2 || rb_->size(2) == 1) { ldb = n; } else { ldb = rb_->stride(2); } rb__ = rb_; } else { // make column ordered if (rb_->_dim() > 2) { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, rb_, 1, 2); rb__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, rb__, NULL, 1, 2); ldb = rb__->stride(2); } else { rb__ = THCTensor_(newClone)(state, rb_); ldb = n; } } int64_t num_batches = rb_->size(0); size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); auto d_atf = static_cast<const real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), (const real**)d_result, THCTensor_(data)(state, rb__), rb__->stride(0), num_batches); hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_atf, THCTensor_(data)(state, atf_), atf_->stride(0), num_batches); if (!THCudaIntTensor_isContiguous(state, pivots)) { THError("Error: pivots is not contiguous."); } int *pivots_data = THCudaIntTensor_data(state, pivots); int info; #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #endif if (info < 0) { THError("Illegal arg %d", -info); } THCudaFree(state, d_result); THCudaFree(state, d_atf); if (atf_ != atf) { THCTensor_(free)(state, atf_); } if (rb__ != rb_) { THCTensor_(freeCopyTo)(state, rb__, rb_); } #else THError("btrisolve for CUDA tensors is only supported for floats and doubles"); #endif } #endif
ea49d4b13fb91d3e233bb48f87257000daae84d5.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = ScalarConvert<half, accreal>::to( THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1)); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->dim() != 2) || (vec->dim() != 1) ) THError("2D tensor and 1D tensor expected, got %dD, %dD tensors", mat->dim(), vec->dim()); if( mat->size(1) != vec->size(0) ) THError("size mismatch"); if(t->dim() != 1) THError("size mismatch"); if(t->size(0) != mat->size(0)) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(mat->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else if(mat->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec->stride(0), beta, THCTensor_(data)(state, r_), r_->stride(0)); #endif THCTensor_(free)(state, cmat); } // In cublasSgemv, cublasDgemv (x,0).mv(0) does not // handle beta, whereas cublasSgemm, cublasDgemm do for case where (x,0).mm(0,y). if (vec->size(0) == 0 && mat->size(0) != 0) { if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vecAsMatrix->size(0), 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, tAsMatrix->size(0), 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, r_->size(0)); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (vec1->dim() != 1) || (vec2->dim() != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", vec1->dim(), vec2->dim()); } if (t->dim() != 2) { THError("size mismatch"); } if ( (t->size(0) != vec1->size(0)) || (t->size(1) != vec2->size(0)) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1->size(0), vec2->size(0), alpha, THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, r_), r_->stride(1)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1->size(0), vec2->size(0), alpha, THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, r_), r_->stride(1)); #endif } else if(r_->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, cr), cr->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2->size(0), vec1->size(0), alpha, THCTensor_(data)(state, vec2), vec2->stride(0), THCTensor_(data)(state, vec1), vec1->stride(0), THCTensor_(data)(state, cr), cr->stride(0)); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2T->size(0), 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1M->size(0), 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->dim() != 2) || (m2->dim() != 2) ) THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim()); if(t->dim() != 2) THError("2D tensor expected, got %dD tensor for t", t->dim()); if(m1->size(1) != m2->size(0)) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } /* r_ */ if(r_->stride(0) == 1 && r_->stride(1) != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride(1) == 1 && r_->stride(0) != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m1->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m1->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m2->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m2->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffer2, const real ** buffer3, real* data1, real * data2, real * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimension)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimension)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride(1) == 1) { transpose_result = false; result_ = result; ldc = result_->stride(2); } else if (result->stride(2) == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride(1); } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride(2); } if (batch1->stride(transpose_result ? 2 : 1) == 1 && batch1->stride(transpose_result ? 1 : 2) != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 1 : 2); } else if (batch1->stride(transpose_result ? 1 : 2) == 1 && batch1->stride(transpose_result ? 2 : 1) != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 2 : 1); } else { transpose_batch1 = transpose_result ? 'n' : 't'; // batch1_ is later freed if batch1_ != batch1 if (THCTensor_(isContiguous)(state, batch1)) { batch1_ = batch1; } else { batch1_ = THCTensor_(newContiguous)(state, batch1); } lda = batch1_->stride(1); } if (batch2->stride(transpose_result ? 2 : 1) == 1 && batch2->stride(transpose_result ? 1 : 2) != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 1 : 2); } else if (batch2->stride(transpose_result ? 1 : 2) == 1 && batch2->stride(transpose_result ? 2 : 1) != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 2 : 1); } else { transpose_batch2 = transpose_result ? 'n' : 't'; // batch2_ is later freed if batch2_ != batch2 if (THCTensor_(isContiguous)(state, batch2)) { batch2_ = batch2; } else { batch2_ = THCTensor_(newContiguous)(state, batch2); } ldb = batch2_->stride(1); } int64_t num_batches = result_->size(0); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if CUDA_VERSION < 8000 size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_matrices1 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_matrices2 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_result_matrices = static_cast<real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer3<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_matrices1, d_matrices2, (const real**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #endif //THC_REAL #endif //CUDA_VERSION #elif defined(THC_REAL_IS_HALF) #if CUDA_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } #else cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } } #endif #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 2, ra_, a)); THArgCheck(THCTensor_(nDimension)(state, a) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, a, 1) == THCTensor_(size)(state, a, 2), 3, "matrices must be square"); if (ra_ != a) { THCTensor_(resizeAs)(state, ra_, a); if (ra_->stride(2) == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 2); } THCTensor_(copy)(state, ra_, a); } int n = a->size(1); int lda; THCTensor *ra__; if (ra_->stride(1) == 1) { // column ordered, what BLAS wants lda = ra_->stride(2); ra__ = ra_; } else { // not column ordered, need to make it such (requires copy) THCTensor *transp_r_ = THCTensor_(newTranspose)(state, ra_, 1, 2); ra__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, ra__, NULL, 1, 2); lda = ra__->stride(2); } int64_t num_batches = ra__->size(0); if (!pivot) { THCudaIntTensor *t = THCudaIntTensor_new(state); THCudaIntTensor_range(state, t, 1, n, 1); THCudaIntTensor_unsqueeze1d(state, t, t, 0); THCudaIntTensor** ptrs = (THCudaIntTensor**) THAlloc(sizeof(THCudaIntTensor*)*num_batches); for (int64_t i=0; i<num_batches; i++) { ptrs[i] = t; } THCudaIntTensor_catArray(state, rpivots_, ptrs, num_batches, 0); THCudaIntTensor_free(state, t); THFree(ptrs); } else { THCudaIntTensor_resize2d(state, rpivots_, num_batches, n); } bool free_rinfo_ = !rinfo_; if (rinfo_ == NULL) rinfo_ = THCudaIntTensor_new(state); THCudaIntTensor_resize1d(state, rinfo_, num_batches); int *info_gpu = THCudaIntTensor_data(state, rinfo_); // Copy pointers to device. size_t matrices_size = num_batches * sizeof(real*); auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); if (num_batches > 0) { const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( (const real**)d_result, THCTensor_(data)(state, ra__), ra__->stride(0), num_batches); } int *pivots_gpu = NULL; if (pivot) { pivots_gpu = THCudaIntTensor_data(state, rpivots_); } #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #endif THCudaFree(state, d_result); if (ra__ != ra_) { THCTensor_(freeCopyTo)(state, ra__, ra_); } if (free_rinfo_) { if(THCTensor_nElement(state, rinfo_) != 0) { int min = THCudaIntTensor_minall(state, rinfo_); int max = THCudaIntTensor_maxall(state, rinfo_); THCudaIntTensor_free(state, rinfo_); if (min != 0 || max != 0) { THError("failed to factorize some batch elements (min info == %d, max info == %d)", min, max); } } else { THCudaIntTensor_free(state, rinfo_); } } #else THError("btrifact for CUDA tensors is only supported for floats and doubles"); #endif } THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *atf, THCudaIntTensor *pivots) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 3, rb_, atf, b)); THArgCheck(THCTensor_(_nDimension)(state, atf) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(_nDimension)(state, b) == 3 || THCTensor_(_nDimension)(state, b) == 2, 4, "expected 2D or 3D tensor"); THArgCheck(THCTensor_(size)(state, atf, 0) == THCTensor_(size)(state, b, 0), 3, "number of batches must be equal"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, atf, 2), 3, "A matrices must be square"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, b, 1), 3, "dimensions of A and b must be equal"); if (rb_ != b) { THCTensor_(resizeAs)(state, rb_, b); THCTensor_(copy)(state, rb_, b); } int n = atf->size(1); int nrhs = rb_->_dim() > 2 ? rb_->size(2) : 1; THCTensor *atf_; THCTensor *rb__; int lda, ldb; // correct ordering of A_tf if (atf->stride(1) == 1) { // column ordered, what BLAS wants lda = atf->stride(2); atf_ = atf; } else { // not column ordered, need to make it such (requires copy) // it would be nice if we could use the op(A) flags to automatically // transpose A if needed, but this leads to unpredictable behavior if the // user clones A_tf later with a different ordering THCTensor *transp_r_ = THCTensor_(newTranspose)(state, atf, 1, 2); atf_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, atf_, NULL, 1, 2); lda = atf_->stride(2); } // correct ordering of B if (rb_->stride(1) == 1) { // column ordered if (rb_->_dim() == 2 || rb_->size(2) == 1) { ldb = n; } else { ldb = rb_->stride(2); } rb__ = rb_; } else { // make column ordered if (rb_->_dim() > 2) { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, rb_, 1, 2); rb__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, rb__, NULL, 1, 2); ldb = rb__->stride(2); } else { rb__ = THCTensor_(newClone)(state, rb_); ldb = n; } } int64_t num_batches = rb_->size(0); size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); auto d_atf = static_cast<const real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( (const real**)d_result, THCTensor_(data)(state, rb__), rb__->stride(0), num_batches); createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_atf, THCTensor_(data)(state, atf_), atf_->stride(0), num_batches); if (!THCudaIntTensor_isContiguous(state, pivots)) { THError("Error: pivots is not contiguous."); } int *pivots_data = THCudaIntTensor_data(state, pivots); int info; #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #endif if (info < 0) { THError("Illegal arg %d", -info); } THCudaFree(state, d_result); THCudaFree(state, d_atf); if (atf_ != atf) { THCTensor_(free)(state, atf_); } if (rb__ != rb_) { THCTensor_(freeCopyTo)(state, rb__, rb_); } #else THError("btrisolve for CUDA tensors is only supported for floats and doubles"); #endif } #endif