hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
b1c3c3ee7ec50023d14beb636c683c3acd02b66c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef FWT_KERNEL_CUH #define FWT_KERNEL_CUH #ifndef fwt_kernel_cuh #define fwt_kernel_cuh /////////////////////////////////////////////////////////////////////////////// // Elementary(for vectors less than elementary size) in-shared memory // combined radix-2 + radix-4 Fast Walsh Transform /////////////////////////////////////////////////////////////////////////////// #define ELEMENTARY_LOG2SIZE 11 void fwtBatch1Kernel(float *d_Output, float *d_Input, int log2N, int DIMX){ for(tidx=0; tidx<DIMX; tidx++){ const int N = 1 << log2N; const int base = (tidx/BLOCKDIMX)<<log2N; //blockIdx.x << log2N; //(2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80 float s_data[N]; float *d_Src = d_Input + base; float *d_Dst = d_Output + base; for(int pos = threadIdx.x; pos < N; pos += blockDim.x) s_data[pos] = d_Input[base+pos];//d_Src[pos]; //Main radix-4 stages const int pos = tidx%BLOCKDIMY;//threadIdx.x; for(int stride = base + (N >> 2); stride > (base+0); stride >>= 2){ int lo = (pos & (stride - 1)); int i0 = base + (((pos - lo) << 2) + lo); int i1 = base + (i0 + stride); int i2 = base + (i1 + stride); int i3 = base + (i2 + stride); __syncthreads(); float D0 = d_Input[i0];//s_data[i0]; float D1 = d_Input[i1];//s_data[i1]; float D2 = d_Input[i2];//s_data[i2]; float D3 = d_Input[i3];//s_data[i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; d_Input[i0]/*s_data[i0]*/ = D0 + D1; d_Input[i1]/*s_data[i1]*/ = T - D1; T = D2; d_Input[i2]/*s_data[i2]*/ = D2 + D3; d_Input[i3]/*s_data[i3]*/ = T - D3; } //Do single radix-2 stage for odd power of two if(log2N & 1){ __syncthreads(); for(int pos = threadIdx.x; pos < N / 2; pos += blockDim.x){ int i0 = pos << 1; int i1 = i0 + 1; float D0 = s_data[i0]; float D1 = s_data[i1]; s_data[i0] = D0 + D1; s_data[i1] = D0 - D1; } } __syncthreads(); for(int pos = threadIdx.x; pos < N; pos += blockDim.x) d_Dst[pos] = s_data[pos]; } } //////////////////////////////////////////////////////////////////////////////// // Single in-global memory radix-4 Fast Walsh Transform pass // (for strides exceeding elementary vector size) //////////////////////////////////////////////////////////////////////////////// void fwtBatch2Kernel( float *d_Output, float *d_Input, int stride, int DIMX, int DIMY ){ //#define DIMX (blockDim.x*gridDim.x) //#define DIMY (blockDim.y*gridDim.y) //#define BLOCKDIMY 16 for(tidy=0; tidy<DIMY; tidy++){ for(tidx=0; tidx<DIMX; tidx++){ const int pos = tidx; // blockIdx.x * blockDim.x + threadIdx.x; const int N = DIMX*4;//blockDim.x * gridDim.x * 4; //float *d_Src = d_Input + blockIdx.y * N; //float *d_Dst = d_Output + blockIdx.y * N; float offset = tidy*N; int lo = pos & (stride - 1); int i0 = ((pos - lo) << 2) + lo; int i1 = i0 + stride; int i2 = i1 + stride; int i3 = i2 + stride; float D0 = d_Input[offset+i0]; float D1 = d_Input[offset+i1]; float D2 = d_Input[offset+i2]; float D3 = d_Input[offset+i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; d_Output[offset+i0] = D0 + D1; d_Output[offset+i1] = T - D1; T = D2; d_Output[offset+i2] = D2 + D3; d_Output[offset+i3] = T - D3; } } } //////////////////////////////////////////////////////////////////////////////// // Put everything together: batched Fast Walsh Transform CPU front-end //////////////////////////////////////////////////////////////////////////////// void fwtBatchGPU(float *d_Data, int M, int log2N){ const int THREAD_N = 256; int N = 1 << log2N; //dim3 grid((1 << log2N) / (4 * THREAD_N), M, 1); for(; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2){ //fwtBatch2Kernel<<<grid, THREAD_N>>>(d_Data, fwtBatch2Kernel(d_Data, d_Data, N / 4, (1 << log2N) / (4 * THREAD_N), M); //cutilCheckMsg("fwtBatch2Kernel() execution failed\n"); } hipLaunchKernelGGL(( fwtBatch1Kernel), dim3(M), dim3(N / 4), N * sizeof(float), 0, d_Data, d_Data, log2N ); cutilCheckMsg("fwtBatch1Kernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Modulate two arrays //////////////////////////////////////////////////////////////////////////////// __global__ void modulateKernel(float *d_A, float *d_B, int N){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; float rcpN = 1.0f / (float)N; for(int pos = tid; pos < N; pos += numThreads) d_A[pos] *= d_B[pos] * rcpN; } //Interface to modulateKernel() void modulateGPU(float *d_A, float *d_B, int N){ hipLaunchKernelGGL(( modulateKernel), dim3(128), dim3(256), 0, 0, d_A, d_B, N); } #endif #endif
b1c3c3ee7ec50023d14beb636c683c3acd02b66c.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #ifndef FWT_KERNEL_CUH #define FWT_KERNEL_CUH #ifndef fwt_kernel_cuh #define fwt_kernel_cuh /////////////////////////////////////////////////////////////////////////////// // Elementary(for vectors less than elementary size) in-shared memory // combined radix-2 + radix-4 Fast Walsh Transform /////////////////////////////////////////////////////////////////////////////// #define ELEMENTARY_LOG2SIZE 11 void fwtBatch1Kernel(float *d_Output, float *d_Input, int log2N, int DIMX){ for(tidx=0; tidx<DIMX; tidx++){ const int N = 1 << log2N; const int base = (tidx/BLOCKDIMX)<<log2N; //blockIdx.x << log2N; //(2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80 float s_data[N]; float *d_Src = d_Input + base; float *d_Dst = d_Output + base; for(int pos = threadIdx.x; pos < N; pos += blockDim.x) s_data[pos] = d_Input[base+pos];//d_Src[pos]; //Main radix-4 stages const int pos = tidx%BLOCKDIMY;//threadIdx.x; for(int stride = base + (N >> 2); stride > (base+0); stride >>= 2){ int lo = (pos & (stride - 1)); int i0 = base + (((pos - lo) << 2) + lo); int i1 = base + (i0 + stride); int i2 = base + (i1 + stride); int i3 = base + (i2 + stride); __syncthreads(); float D0 = d_Input[i0];//s_data[i0]; float D1 = d_Input[i1];//s_data[i1]; float D2 = d_Input[i2];//s_data[i2]; float D3 = d_Input[i3];//s_data[i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; d_Input[i0]/*s_data[i0]*/ = D0 + D1; d_Input[i1]/*s_data[i1]*/ = T - D1; T = D2; d_Input[i2]/*s_data[i2]*/ = D2 + D3; d_Input[i3]/*s_data[i3]*/ = T - D3; } //Do single radix-2 stage for odd power of two if(log2N & 1){ __syncthreads(); for(int pos = threadIdx.x; pos < N / 2; pos += blockDim.x){ int i0 = pos << 1; int i1 = i0 + 1; float D0 = s_data[i0]; float D1 = s_data[i1]; s_data[i0] = D0 + D1; s_data[i1] = D0 - D1; } } __syncthreads(); for(int pos = threadIdx.x; pos < N; pos += blockDim.x) d_Dst[pos] = s_data[pos]; } } //////////////////////////////////////////////////////////////////////////////// // Single in-global memory radix-4 Fast Walsh Transform pass // (for strides exceeding elementary vector size) //////////////////////////////////////////////////////////////////////////////// void fwtBatch2Kernel( float *d_Output, float *d_Input, int stride, int DIMX, int DIMY ){ //#define DIMX (blockDim.x*gridDim.x) //#define DIMY (blockDim.y*gridDim.y) //#define BLOCKDIMY 16 for(tidy=0; tidy<DIMY; tidy++){ for(tidx=0; tidx<DIMX; tidx++){ const int pos = tidx; // blockIdx.x * blockDim.x + threadIdx.x; const int N = DIMX*4;//blockDim.x * gridDim.x * 4; //float *d_Src = d_Input + blockIdx.y * N; //float *d_Dst = d_Output + blockIdx.y * N; float offset = tidy*N; int lo = pos & (stride - 1); int i0 = ((pos - lo) << 2) + lo; int i1 = i0 + stride; int i2 = i1 + stride; int i3 = i2 + stride; float D0 = d_Input[offset+i0]; float D1 = d_Input[offset+i1]; float D2 = d_Input[offset+i2]; float D3 = d_Input[offset+i3]; float T; T = D0; D0 = D0 + D2; D2 = T - D2; T = D1; D1 = D1 + D3; D3 = T - D3; T = D0; d_Output[offset+i0] = D0 + D1; d_Output[offset+i1] = T - D1; T = D2; d_Output[offset+i2] = D2 + D3; d_Output[offset+i3] = T - D3; } } } //////////////////////////////////////////////////////////////////////////////// // Put everything together: batched Fast Walsh Transform CPU front-end //////////////////////////////////////////////////////////////////////////////// void fwtBatchGPU(float *d_Data, int M, int log2N){ const int THREAD_N = 256; int N = 1 << log2N; //dim3 grid((1 << log2N) / (4 * THREAD_N), M, 1); for(; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2){ //fwtBatch2Kernel<<<grid, THREAD_N>>>(d_Data, fwtBatch2Kernel(d_Data, d_Data, N / 4, (1 << log2N) / (4 * THREAD_N), M); //cutilCheckMsg("fwtBatch2Kernel() execution failed\n"); } fwtBatch1Kernel<<<M, N / 4, N * sizeof(float)>>>( d_Data, d_Data, log2N ); cutilCheckMsg("fwtBatch1Kernel() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Modulate two arrays //////////////////////////////////////////////////////////////////////////////// __global__ void modulateKernel(float *d_A, float *d_B, int N){ int tid = blockIdx.x * blockDim.x + threadIdx.x; int numThreads = blockDim.x * gridDim.x; float rcpN = 1.0f / (float)N; for(int pos = tid; pos < N; pos += numThreads) d_A[pos] *= d_B[pos] * rcpN; } //Interface to modulateKernel() void modulateGPU(float *d_A, float *d_B, int N){ modulateKernel<<<128, 256>>>(d_A, d_B, N); } #endif #endif
6d9697ab86c41afffab4df479c7e404945ebd081.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <memory.h> #include <stdlib.h> // these numbers can not be larger, it is not because of GPU global memory size limit(for example:flag size), // maybe it is limits by other level memory size. const unsigned int blockNum = 1024; const unsigned int threadNum = 1024; __global__ void isDivisible(unsigned long value, unsigned long start, unsigned char *flag) { //int threadid = threadIdx.x + blockNum * blockIdx.x; int threadid = blockDim.x * blockIdx.x + threadIdx.x; if ((value % (start + threadid)) == 0) { flag[threadid] = 1; } } int checkPrimary(unsigned long value) { hipError_t cudaStatus; unsigned char * dev_flag; unsigned char * host_flag; const int size = blockNum * threadNum; host_flag = (unsigned char*)malloc(size * sizeof(unsigned char)); if (host_flag == NULL) { fprintf(stderr, "malloc failed!"); return -1; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_flag, size * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } for (;; value++) { hipMemset(dev_flag, 0, size * sizeof(unsigned char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); goto Error; } bool isPrimary = true; for (unsigned long i = 2; i <= (value / 2); i += size) { hipLaunchKernelGGL(( isDivisible) , dim3(blockNum), dim3(threadNum) , 0, 0, value, i, dev_flag); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "isDivisible launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching isDivisible:%s!\n", cudaStatus, hipGetErrorString(cudaStatus)); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(host_flag, dev_flag, size * sizeof(unsigned char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } for (int j = 0; j < size; ++j) { if (host_flag[j]) { isPrimary = false; //printf("[%llu] [%llu]\n",value, i + j); break; } } if (isPrimary == false) { break; } } if (isPrimary) { printf("%llu is a primary number\n", value); } } Error: hipFree(dev_flag); free(host_flag); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return -1; } return 0; }
6d9697ab86c41afffab4df479c7e404945ebd081.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <memory.h> #include <stdlib.h> // these numbers can not be larger, it is not because of GPU global memory size limit(for example:flag size), // maybe it is limits by other level memory size. const unsigned int blockNum = 1024; const unsigned int threadNum = 1024; __global__ void isDivisible(unsigned long value, unsigned long start, unsigned char *flag) { //int threadid = threadIdx.x + blockNum * blockIdx.x; int threadid = blockDim.x * blockIdx.x + threadIdx.x; if ((value % (start + threadid)) == 0) { flag[threadid] = 1; } } int checkPrimary(unsigned long value) { cudaError_t cudaStatus; unsigned char * dev_flag; unsigned char * host_flag; const int size = blockNum * threadNum; host_flag = (unsigned char*)malloc(size * sizeof(unsigned char)); if (host_flag == NULL) { fprintf(stderr, "malloc failed!"); return -1; } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_flag, size * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } for (;; value++) { cudaMemset(dev_flag, 0, size * sizeof(unsigned char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); goto Error; } bool isPrimary = true; for (unsigned long i = 2; i <= (value / 2); i += size) { isDivisible <<<blockNum, threadNum >>> (value, i, dev_flag); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "isDivisible launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching isDivisible:%s!\n", cudaStatus, cudaGetErrorString(cudaStatus)); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(host_flag, dev_flag, size * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } for (int j = 0; j < size; ++j) { if (host_flag[j]) { isPrimary = false; //printf("[%llu] [%llu]\n",value, i + j); break; } } if (isPrimary == false) { break; } } if (isPrimary) { printf("%llu is a primary number\n", value); } } Error: cudaFree(dev_flag); free(host_flag); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return -1; } return 0; }
25b80e3faf40f1289584a87f1211062fd9e9379d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> extern "C" { #include "dim3_sweep_cuda.h" } // Local variable array macro #define PSI_1D(ANG) psi[ANG] #define PC_1D(ANG) pc[ANG] #define DEN_1D(ANG) den[ANG] #ifdef ROWORDER #define HV_2D(ANG, X) hv[ ANG*4 \ + X ] #else #define HV_2D(ANG, X) hv[ X*NANG \ + ANG ] #endif #ifdef ROWORDER #define FXHV_2D(ANG, X) fxhv[ ANG*4 \ + X ] #else #define FXHV_2D(ANG, X) fxhv[ X*NANG \ + ANG ] #endif // Simplify array indexing when certain values constant throughout module #define PSII_3D(ANG, Y, Z) PSII_4D(ANG, Y, Z, (g-1)) #define PSIJ_3D(ANG, CHUNK, Z) PSIJ_4D(ANG, CHUNK, Z, (g-1)) #define PSIK_3D(ANG, CHUNK, Y) PSIK_4D(ANG, CHUNK, Y, (g-1)) #define QTOT_4D(MOM1, X, Y, Z) QTOT_5D(MOM1, X, Y, Z, (g-1)) #define EC_2D(ANG, MOM1) EC_3D(ANG, MOM1, (oct-1)) #define VDELT_CONST VDELT_1D(g-1) #define PTR_IN_4D(ANG, X, Y, Z) PTR_IN_6D(ANG, X, Y, Z, (i1-1), (i2-1)) #define PTR_OUT_4D(ANG, X, Y, Z) PTR_OUT_6D(ANG, X, Y, Z, (i1-1), (i2-1)) #define DINV_4D(ANG, X, Y, Z) DINV_5D(ANG, X, Y, Z, (g-1)) #define FLUX_3D(X, Y, Z) FLUX_4D(X, Y, Z, (g-1)) #define FLUXM_4D(MOM1, X, Y, Z) FLUXM_5D(MOM1, X, Y, Z, (g-1)) #define JB_IN_3D(ANG, CHUNK, Z) JB_IN_4D(ANG, CHUNK, Z, (g-1)) #define JB_OUT_3D(ANG, CHUNK, Z) JB_OUT_4D(ANG, CHUNK, Z, (g-1)) #define KB_IN_3D(ANG, CHUNK, Y) KB_IN_4D(ANG, CHUNK, Y, (g-1)) #define KB_OUT_3D(ANG, CHUNK, Y) KB_OUT_4D(ANG, CHUNK, Y, (g-1)) #define FLKX_3D(X, Y, Z) FLKX_4D(X, Y, Z, (g-1)) #define FLKY_3D(X, Y, Z) FLKY_4D(X, Y, Z, (g-1)) #define FLKZ_3D(X, Y, Z) FLKZ_4D(X, Y, Z, (g-1)) #define T_XS_3D(X, Y, Z) T_XS_4D(X, Y, Z, (g-1)) // CUDA vars #define N 10 __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // this thread handles the data at its thread id if (tid < N) c[tid] = a[tid] + b[tid]; } int test( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU hipMalloc( (void**)&dev_a, N * sizeof(int) ); hipMalloc( (void**)&dev_b, N * sizeof(int) ); hipMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a, a, N * sizeof(int), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, N * sizeof(int), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU hipMemcpy( c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost ); // display the results for (int i=0; i<N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); return 0; } __global__ void diagonal_loop( input_data *input_vars, bool firsty, bool lasty, bool firstz, bool lastz, geom_data *geom_vars, sn_data *sn_vars, data_data *data_vars, control_data *control_vars, solvar_data *solvar_vars, dim_sweep_data *dim_sweep_vars, int ich, int i_dir, int d1, int d2, int d3, int d4, int j_dir, int k_dir, int j_low, int k_low, int j_high, int k_high, int j_step, int k_step, int i1, int i2, int oct, int g, int *ierr, int nang, double *psi, double *pc, double *den, double *hv, double *fxhv, int d) { // Local variables int n = threadIdx.x; int i_step, ic, i, j, k, l, ibl, ibb, ibf; int ang, indx1 = 4; double sum_hv = 0, sum_hv_tmp = 0, sum_wpsi = 0, sum_ecwpsi = 0, sum_wmupsii = 0, sum_wetapsij = 0, sum_wxipsik = 0; // Set up the sweep order in the i-direction. i_step = -1; if ( i_dir == 2 ) i_step = 1; // Loop over cells along the diagonals. When only 1 diagonal, it's // normal sweep order. Otherwise, nested threading performs mini-KBA. ic = DIAG_1D(d-1).cell_id_vars[n-1].ic; if ( i_step < 0 ) { i = ich*ICHUNK - ic + 1; } else { i = (ich-1)*ICHUNK + ic; } if ( i <= NX ) { j = DIAG_1D(d-1).cell_id_vars[n-1].jc; if ( j_step < 0 ) { j = NY - j + 1; } k = DIAG_1D(d-1).cell_id_vars[n-1].kc; if ( k_step < 0 ) { k = NZ - k + 1; } // Left/right boundary conditions, always vacuum. ibl = 0; if ( (i == NX) && (i_step == -1) ) { for ( ang = 0; ang < nang; ang++ ) { PSII_3D(ang,(j-1),(k-1)) = 0; } } else if ( i == 1 && i_step == 1 ) { switch ( ibl ) { case 0: case 1: for ( ang = 0; ang < nang; ang++ ) { PSII_3D(ang,(j-1),(k-1)) = 0; } } } // Top/bottom boundary condtions. Vacuum at global boundaries, // but set to some incoming flux from neighboring proc. ibb = 0; if ( j == j_low ) { if ( j_dir == 1 && lasty ) { for ( ang = 0; ang < nang; ang++ ) { PSIJ_3D(ang,(ic-1),(k-1)) = 0; } } else if ( j_dir == 2 && firsty ) { switch ( ibb ) { case 0: case 1: for ( ang = 0; ang < nang; ang++ ) { PSIJ_3D(ang,(ic-1),(k-1)) = 0; } } } else { for ( ang = 0; ang < nang; ang++ ) { PSIJ_3D(ang,(ic-1),(k-1)) = JB_IN_3D(ang,(ic-1),(k-1)); } } } // Front/back boundary condtions. Vacuum at global boundaries, // but set to some incoming flux from neighboring proc. ibf = 0; if ( k == k_low ) { if ( (k_dir == 1 && lastz) || NDIMEN < 3 ) { for ( ang = 0; ang < nang; ang++ ) { PSIK_3D(ang,(ic-1),(j-1)) = 0; } } else if ( k_dir == 2 && firstz ) { switch ( ibf ) { case 0: case 1: for ( ang = 0; ang < nang; ang++ ) { PSIK_3D(ang,(ic-1),(j-1)) = 0; } } } else { for ( ang = 0; ang < nang; ang++ ) { PSIK_3D(ang,(ic-1),(j-1)) = KB_IN_3D(ang,(ic-1),(j-1)); } } } // Compute the angular source for ( ang = 0; ang < nang; ang++ ) { PSI_1D(ang) = QTOT_4D(0,(i-1),(j-1),(k-1)); if ( SRC_OPT == 3 ) { PSI_1D(ang) += QIM_6D(ang,(i-1),(j-1),(k-1),(oct-1),(g-1)); } } for ( l = 2; l <= CMOM; l++ ) { for ( ang = 0; ang < nang; ang++ ) { PSI_1D(ang) += EC_2D(ang,(l-1)) *QTOT_4D((l-1),(i-1),(j-1),(k-1)); } } // Compute the numerator for the update formula for ( ang = 0; ang < nang; ang++ ) { PC_1D(ang) = PSI_1D(ang) + PSII_3D(ang,(j-1),(k-1)) *MU_1D(ang)*HI + PSIJ_3D(ang,(ic-1),(k-1))*HJ_1D(ang) + PSIK_3D(ang,(ic-1),(j-1))*HK_1D(ang); if ( VDELT_CONST != 0 ) { PC_1D(ang) += VDELT_CONST *PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)); } } // Compute the solution of the center. Use DD for edges. // Use fixup if requested. if ( FIXUP == 0 ) { for ( ang = 0; ang < nang; ang++ ) { PSI_1D(ang) = PC_1D(ang)*DINV_4D(ang,(i-1),(j-1),(k-1)); PSII_3D(ang,(j-1),(k-1)) = 2*PSI_1D(ang) - PSII_3D(ang,(j-1),(k-1)); PSIJ_3D(ang,(ic-1),(k-1)) = 2*PSI_1D(ang) - PSIJ_3D(ang,(ic-1),(k-1)); if ( NDIMEN == 3 ) { PSIK_3D(ang,(ic-1),(j-1)) = 2*PSI_1D(ang) - PSIK_3D(ang,(ic-1),(j-1)); } if ( VDELT_CONST != 0 ) { PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)) = 2*PSI_1D(ang) - PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)); } } } else { // Multi-pass set to zero + rebalance fixup. Determine angles // that will need fixup first. sum_hv = 0; for (ang = 0; ang < nang; ang++) { for (indx1 = 0; indx1 < 4; indx1++) { HV_2D(ang, indx1) = 1; sum_hv += HV_2D(ang,indx1); } PC_1D(ang) = PC_1D(ang) * DINV_4D(ang,(i-1),(j-1),(k-1)); } // fixup_loop while (true) { sum_hv_tmp = 0; for ( ang = 0; ang < nang; ang++ ) { FXHV_2D(ang,0) = 2*PC_1D(ang) - PSII_3D(ang,(j-1),(k-1)); FXHV_2D(ang,1) = 2*PC_1D(ang) - PSIJ_3D(ang,(ic-1),(k-1)); if ( NDIMEN == 3 ) { FXHV_2D(ang,2) = 2*PC_1D(ang) - PSIK_3D(ang,(ic-1),(j-1)); } if ( VDELT_CONST != 0 ) { FXHV_2D(ang,3) = 2*PC_1D(ang) - PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)); } for ( indx1 = 0; indx1 < 4; indx1++ ) { if ( FXHV_2D(ang,indx1) < 0 ) { HV_2D(ang,indx1) = 0; } sum_hv_tmp += HV_2D(ang,indx1); } } // Exit loop when all angles are fixed up if (sum_hv == sum_hv_tmp) break; sum_hv = sum_hv_tmp; // Recompute balance equation numerator and denominator // and get new cell average flux for ( ang = 0; ang < nang; ang++ ) { PC_1D(ang) = PSII_3D(ang,(j-1),(k-1)) * MU_1D(ang) * HI * (1+HV_2D(ang,0)) + PSIJ_3D(ang,(ic-1),(k-1)) * HJ_1D(ang) * (1+HV_2D(ang,1)) + PSIK_3D(ang,(ic-1),(j-1)) * HK_1D(ang) * (1+HV_2D(ang,2)); if ( VDELT_CONST != 0 ) { PC_1D(ang) += VDELT_CONST * PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)) * (1+HV_2D(ang,3)); } PC_1D(ang) = PSI_1D(ang) + 0.5*PC_1D(ang); DEN_1D(ang) = T_XS_3D((i-1),(j-1),(k-1)) + MU_1D(ang) * HI * HV_2D(ang,0) + HJ_1D(ang) * HV_2D(ang,1) + HK_1D(ang) * HV_2D(ang,2) + VDELT_CONST * HV_2D(ang,3); if ( DEN_1D(ang) > TOLR ) { PC_1D(ang) /= DEN_1D(ang); } else { PC_1D(ang) = 0; } } } // end fixup_loop // Fixup done, compute edges for (ang = 0; ang < nang; ang++) { PSI_1D(ang) = PC_1D(ang); PSII_3D(ang,(j-1),(k-1)) = FXHV_2D(ang,0) * HV_2D(ang,0); PSIJ_3D(ang,(ic-1),(k-1)) = FXHV_2D(ang,1) * HV_2D(ang,1); if (NDIMEN == 3) { PSIK_3D(ang,(ic-1),(j-1)) = FXHV_2D(ang,2) * HV_2D(ang,2); } if (VDELT_CONST != 0) { PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)) = FXHV_2D(ang,3) * HV_2D(ang,3); } } } // Clear the flux arrays if ( oct == 1 ) { FLUX_4D((i-1),(j-1),(k-1),(g-1)) = 0; for ( indx1 = 0; indx1 < (CMOM-1); indx1++ ) { FLUXM_5D(indx1,(i-1),(j-1),(k-1),(g-1)) = 0; } } // Compute the flux moments sum_wpsi = 0; for (ang = 0; ang < nang; ang++) { sum_wpsi += W_1D(ang) * PSI_1D(ang); } FLUX_4D((i-1),(j-1),(k-1),(g-1)) += sum_wpsi; for (l = 1; l <= (CMOM-1); l++) { sum_ecwpsi = 0; for ( ang = 0; ang < nang; ang++ ) { sum_ecwpsi += EC_2D(ang,(l))*W_1D(ang)*PSI_1D(ang); } FLUXM_5D((l-1),(i-1),(j-1),(k-1),(g-1)) += sum_ecwpsi; } // Calculate min and max scalar fluxes (not used elsewhere currently) if (oct == NOCT) { FMIN = MIN( FMIN, FLUX_3D((i-1),(j-1),(k-1)) ); FMAX = MAX( FMAX, FLUX_3D((i-1),(j-1),(k-1)) ); } // Save edge fluxes (dummy if checks for unused non-vacuum BCs) if (j == j_high) { if ((j_dir==2 && lasty) || ((j_dir == 1 && firsty) && ibb == 1)) { // CONTINUE } else { for (ang = 0; ang < nang; ang++) { JB_OUT_3D(ang,(ic-1),(k-1)) = PSIJ_3D(ang,(ic-1),(k-1)); } } } if (k == k_high) { if ((k_dir == 2 && lastz) || ((k_dir==1 && firstz) && ibf == 1)) { // CONTINUE } else { for ( ang = 0; ang < nang; ang++ ) { KB_OUT_3D(ang,(ic-1),(j-1)) = PSIK_3D(ang,(ic-1),(j-1)); } } } // Compute leakages (not used elsewhere currently) if (((i+i_dir-1) == 1) || ((i+i_dir-1) == (NX+1))) { sum_wmupsii = 0; for (ang = 0; ang < nang; ang++) { sum_wmupsii += WMU_1D(ang) * PSII_3D(ang,(j-1),(k-1)); } FLKX_3D((i+i_dir-1-1),(j-1),(k-1)) += i_step*sum_wmupsii; } if ((j_dir == 1 && firsty) || (j_dir == 2 && lasty)) { sum_wetapsij = 0; for (ang = 0; ang < nang; ang++) { sum_wetapsij += WETA_1D(ang) * PSIJ_3D(ang,(ic-1),(k-1)); } FLKY_3D((i-1),(j+j_dir-1-1),(k-1)) += j_step*sum_wetapsij; } if (((k_dir == 1 && firstz) || (k_dir == 2 && lastz)) && NDIMEN == 3) { sum_wxipsik = 0; for (ang = 0; ang < nang; ang++) { sum_wxipsik += WXI_1D(ang) * PSIK_3D(ang,(ic-1),(j-1)); } FLKZ_3D((i-1),(j-1),(k+k_dir-1-1)) += k_step*sum_wxipsik; } } } void dim3_sweep_cuda ( input_data *input_vars, bool firsty, bool lasty, bool firstz, bool lastz, geom_data *geom_vars, sn_data *sn_vars, data_data *data_vars, control_data *control_vars, solvar_data *solvar_vars, dim_sweep_data *dim_sweep_vars, int ich, int i_dir, int d1, int d2, int d3, int d4, int j_dir, int k_dir, int j_low, int k_low, int j_high, int k_high, int j_step, int k_step, int i1, int i2, int oct, int g, int *ierr ) { // Local variables int i; int ang, y_ind, ic_ind, z_ind = 4; double psi[NANG], pc[NANG], den[NANG]; double hv[NANG*4], fxhv[NANG*4]; double *c_psi[NANG], *c_pc[NANG], *c_den[NANG]; double *c_hv[NANG*4], *c_fxhv[NANG*4]; // Create GPU-copies of data input_data *c_input_vars; geom_data *c_geom_vars; sn_data *c_sn_vars; data_data *c_data_vars; control_data *c_control_vars; solvar_data *c_solvar_vars; dim_sweep_data *c_dim_sweep_vars; hipMalloc(&c_input_vars, sizeof(input_data)); hipMalloc(&c_geom_vars, sizeof(geom_data)); hipMalloc(&c_sn_vars, sizeof(sn_data)); hipMalloc(&c_data_vars, sizeof(data_data)); hipMalloc(&c_control_vars, sizeof(control_data)); hipMalloc(&c_solvar_vars, sizeof(solvar_data)); hipMalloc(&c_dim_sweep_vars, sizeof(dim_sweep_data)); hipMalloc(c_psi, NANG * sizeof(double)); hipMalloc(c_pc, NANG * sizeof(double)); hipMalloc(c_den, NANG * sizeof(double)); hipMalloc(c_hv, NANG * 4 * sizeof(double)); hipMalloc(c_fxhv, NANG * 4 * sizeof(double)); // Zero out the outgoing boundary arrays and fixup array for ( z_ind = 0; z_ind < NZ; z_ind++ ) { for ( ic_ind = 0; ic_ind < ICHUNK; ic_ind++ ) { for ( ang = 0; ang < NANG; ang++ ) { JB_OUT_3D(ang,ic_ind,z_ind) = 0; } } } for ( y_ind = 0; y_ind < NY; y_ind++ ) { for ( ic_ind = 0; ic_ind < ICHUNK; ic_ind++ ) { for ( ang = 0; ang < NANG; ang++ ) { KB_OUT_3D(ang,ic_ind,y_ind) = 0; } } } for ( i = 0; i < 4; i++) { for ( ang = 0; ang < NANG; ang++ ) { FXHV_2D(ang, i) = 0; } } hipMemcpy(c_psi, psi, NANG * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(c_pc, pc, NANG * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(c_den, den, NANG * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(c_hv, hv, NANG * 4 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(c_fxhv, fxhv, NANG * 4 * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(c_input_vars, input_vars, sizeof(input_data), hipMemcpyHostToDevice); hipMemcpy(c_geom_vars, geom_vars, sizeof(geom_data), hipMemcpyHostToDevice); hipMemcpy(c_sn_vars, sn_vars, sizeof(sn_data), hipMemcpyHostToDevice); hipMemcpy(c_data_vars, data_vars, sizeof(data_data), hipMemcpyHostToDevice); hipMemcpy(c_control_vars, control_vars, sizeof(control_data), hipMemcpyHostToDevice); hipMemcpy(c_solvar_vars, solvar_vars, sizeof(solvar_data), hipMemcpyHostToDevice); hipMemcpy(c_dim_sweep_vars, dim_sweep_vars, sizeof(dim_sweep_data), hipMemcpyHostToDevice); // Loop over cells along the diagonals. When only 1 diagonal, it's // normal sweep order. Otherwise, nested threading performs mini-KBA. // diagonal loop int d; for (d = 1; d <= NDIAG; d++) { printf("lenc: %d\n", DIAG_1D(d-1).lenc); hipLaunchKernelGGL(( diagonal_loop), dim3(1), dim3((DIAG_1D(d-1).lenc)), 0, 0, c_input_vars, firsty, lasty, firstz, lastz, c_geom_vars, c_sn_vars, c_data_vars, c_control_vars, c_solvar_vars, c_dim_sweep_vars, ich, i_dir, d1, d2, d3, d4, j_dir, k_dir, j_low, k_low, j_high, k_high, j_step, k_step, i1, i2, oct, g, ierr, input_vars->nang, *c_psi, *c_pc, *c_den, *c_hv, *c_fxhv, d ); } // Copy from device back to hsot hipMemcpy(c_input_vars, input_vars, sizeof(input_data), hipMemcpyDeviceToHost); hipMemcpy(c_geom_vars, geom_vars, sizeof(geom_data), hipMemcpyDeviceToHost); hipMemcpy(c_sn_vars, sn_vars, sizeof(sn_data), hipMemcpyDeviceToHost); hipMemcpy(c_data_vars, data_vars, sizeof(data_data), hipMemcpyDeviceToHost); hipMemcpy(c_control_vars, control_vars, sizeof(control_data), hipMemcpyDeviceToHost); hipMemcpy(c_solvar_vars, solvar_vars, sizeof(solvar_data), hipMemcpyDeviceToHost); hipMemcpy(c_dim_sweep_vars, dim_sweep_vars, sizeof(dim_sweep_data), hipMemcpyDeviceToHost); // Clean up hipFree(c_input_vars); hipFree(c_geom_vars); hipFree(c_sn_vars); hipFree(c_data_vars); hipFree(c_control_vars); hipFree(c_solvar_vars); hipFree(c_dim_sweep_vars); hipFree(c_psi); hipFree(c_pc); hipFree(c_den); hipFree(c_hv); hipFree(c_fxhv); }
25b80e3faf40f1289584a87f1211062fd9e9379d.cu
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> extern "C" { #include "dim3_sweep_cuda.h" } // Local variable array macro #define PSI_1D(ANG) psi[ANG] #define PC_1D(ANG) pc[ANG] #define DEN_1D(ANG) den[ANG] #ifdef ROWORDER #define HV_2D(ANG, X) hv[ ANG*4 \ + X ] #else #define HV_2D(ANG, X) hv[ X*NANG \ + ANG ] #endif #ifdef ROWORDER #define FXHV_2D(ANG, X) fxhv[ ANG*4 \ + X ] #else #define FXHV_2D(ANG, X) fxhv[ X*NANG \ + ANG ] #endif // Simplify array indexing when certain values constant throughout module #define PSII_3D(ANG, Y, Z) PSII_4D(ANG, Y, Z, (g-1)) #define PSIJ_3D(ANG, CHUNK, Z) PSIJ_4D(ANG, CHUNK, Z, (g-1)) #define PSIK_3D(ANG, CHUNK, Y) PSIK_4D(ANG, CHUNK, Y, (g-1)) #define QTOT_4D(MOM1, X, Y, Z) QTOT_5D(MOM1, X, Y, Z, (g-1)) #define EC_2D(ANG, MOM1) EC_3D(ANG, MOM1, (oct-1)) #define VDELT_CONST VDELT_1D(g-1) #define PTR_IN_4D(ANG, X, Y, Z) PTR_IN_6D(ANG, X, Y, Z, (i1-1), (i2-1)) #define PTR_OUT_4D(ANG, X, Y, Z) PTR_OUT_6D(ANG, X, Y, Z, (i1-1), (i2-1)) #define DINV_4D(ANG, X, Y, Z) DINV_5D(ANG, X, Y, Z, (g-1)) #define FLUX_3D(X, Y, Z) FLUX_4D(X, Y, Z, (g-1)) #define FLUXM_4D(MOM1, X, Y, Z) FLUXM_5D(MOM1, X, Y, Z, (g-1)) #define JB_IN_3D(ANG, CHUNK, Z) JB_IN_4D(ANG, CHUNK, Z, (g-1)) #define JB_OUT_3D(ANG, CHUNK, Z) JB_OUT_4D(ANG, CHUNK, Z, (g-1)) #define KB_IN_3D(ANG, CHUNK, Y) KB_IN_4D(ANG, CHUNK, Y, (g-1)) #define KB_OUT_3D(ANG, CHUNK, Y) KB_OUT_4D(ANG, CHUNK, Y, (g-1)) #define FLKX_3D(X, Y, Z) FLKX_4D(X, Y, Z, (g-1)) #define FLKY_3D(X, Y, Z) FLKY_4D(X, Y, Z, (g-1)) #define FLKZ_3D(X, Y, Z) FLKZ_4D(X, Y, Z, (g-1)) #define T_XS_3D(X, Y, Z) T_XS_4D(X, Y, Z, (g-1)) // CUDA vars #define N 10 __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // this thread handles the data at its thread id if (tid < N) c[tid] = a[tid] + b[tid]; } int test( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // fill the arrays 'a' and 'b' on the CPU for (int i=0; i<N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); add<<<N,1>>>( dev_a, dev_b, dev_c ); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ); // display the results for (int i=0; i<N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; } __global__ void diagonal_loop( input_data *input_vars, bool firsty, bool lasty, bool firstz, bool lastz, geom_data *geom_vars, sn_data *sn_vars, data_data *data_vars, control_data *control_vars, solvar_data *solvar_vars, dim_sweep_data *dim_sweep_vars, int ich, int i_dir, int d1, int d2, int d3, int d4, int j_dir, int k_dir, int j_low, int k_low, int j_high, int k_high, int j_step, int k_step, int i1, int i2, int oct, int g, int *ierr, int nang, double *psi, double *pc, double *den, double *hv, double *fxhv, int d) { // Local variables int n = threadIdx.x; int i_step, ic, i, j, k, l, ibl, ibb, ibf; int ang, indx1 = 4; double sum_hv = 0, sum_hv_tmp = 0, sum_wpsi = 0, sum_ecwpsi = 0, sum_wmupsii = 0, sum_wetapsij = 0, sum_wxipsik = 0; // Set up the sweep order in the i-direction. i_step = -1; if ( i_dir == 2 ) i_step = 1; // Loop over cells along the diagonals. When only 1 diagonal, it's // normal sweep order. Otherwise, nested threading performs mini-KBA. ic = DIAG_1D(d-1).cell_id_vars[n-1].ic; if ( i_step < 0 ) { i = ich*ICHUNK - ic + 1; } else { i = (ich-1)*ICHUNK + ic; } if ( i <= NX ) { j = DIAG_1D(d-1).cell_id_vars[n-1].jc; if ( j_step < 0 ) { j = NY - j + 1; } k = DIAG_1D(d-1).cell_id_vars[n-1].kc; if ( k_step < 0 ) { k = NZ - k + 1; } // Left/right boundary conditions, always vacuum. ibl = 0; if ( (i == NX) && (i_step == -1) ) { for ( ang = 0; ang < nang; ang++ ) { PSII_3D(ang,(j-1),(k-1)) = 0; } } else if ( i == 1 && i_step == 1 ) { switch ( ibl ) { case 0: case 1: for ( ang = 0; ang < nang; ang++ ) { PSII_3D(ang,(j-1),(k-1)) = 0; } } } // Top/bottom boundary condtions. Vacuum at global boundaries, // but set to some incoming flux from neighboring proc. ibb = 0; if ( j == j_low ) { if ( j_dir == 1 && lasty ) { for ( ang = 0; ang < nang; ang++ ) { PSIJ_3D(ang,(ic-1),(k-1)) = 0; } } else if ( j_dir == 2 && firsty ) { switch ( ibb ) { case 0: case 1: for ( ang = 0; ang < nang; ang++ ) { PSIJ_3D(ang,(ic-1),(k-1)) = 0; } } } else { for ( ang = 0; ang < nang; ang++ ) { PSIJ_3D(ang,(ic-1),(k-1)) = JB_IN_3D(ang,(ic-1),(k-1)); } } } // Front/back boundary condtions. Vacuum at global boundaries, // but set to some incoming flux from neighboring proc. ibf = 0; if ( k == k_low ) { if ( (k_dir == 1 && lastz) || NDIMEN < 3 ) { for ( ang = 0; ang < nang; ang++ ) { PSIK_3D(ang,(ic-1),(j-1)) = 0; } } else if ( k_dir == 2 && firstz ) { switch ( ibf ) { case 0: case 1: for ( ang = 0; ang < nang; ang++ ) { PSIK_3D(ang,(ic-1),(j-1)) = 0; } } } else { for ( ang = 0; ang < nang; ang++ ) { PSIK_3D(ang,(ic-1),(j-1)) = KB_IN_3D(ang,(ic-1),(j-1)); } } } // Compute the angular source for ( ang = 0; ang < nang; ang++ ) { PSI_1D(ang) = QTOT_4D(0,(i-1),(j-1),(k-1)); if ( SRC_OPT == 3 ) { PSI_1D(ang) += QIM_6D(ang,(i-1),(j-1),(k-1),(oct-1),(g-1)); } } for ( l = 2; l <= CMOM; l++ ) { for ( ang = 0; ang < nang; ang++ ) { PSI_1D(ang) += EC_2D(ang,(l-1)) *QTOT_4D((l-1),(i-1),(j-1),(k-1)); } } // Compute the numerator for the update formula for ( ang = 0; ang < nang; ang++ ) { PC_1D(ang) = PSI_1D(ang) + PSII_3D(ang,(j-1),(k-1)) *MU_1D(ang)*HI + PSIJ_3D(ang,(ic-1),(k-1))*HJ_1D(ang) + PSIK_3D(ang,(ic-1),(j-1))*HK_1D(ang); if ( VDELT_CONST != 0 ) { PC_1D(ang) += VDELT_CONST *PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)); } } // Compute the solution of the center. Use DD for edges. // Use fixup if requested. if ( FIXUP == 0 ) { for ( ang = 0; ang < nang; ang++ ) { PSI_1D(ang) = PC_1D(ang)*DINV_4D(ang,(i-1),(j-1),(k-1)); PSII_3D(ang,(j-1),(k-1)) = 2*PSI_1D(ang) - PSII_3D(ang,(j-1),(k-1)); PSIJ_3D(ang,(ic-1),(k-1)) = 2*PSI_1D(ang) - PSIJ_3D(ang,(ic-1),(k-1)); if ( NDIMEN == 3 ) { PSIK_3D(ang,(ic-1),(j-1)) = 2*PSI_1D(ang) - PSIK_3D(ang,(ic-1),(j-1)); } if ( VDELT_CONST != 0 ) { PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)) = 2*PSI_1D(ang) - PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)); } } } else { // Multi-pass set to zero + rebalance fixup. Determine angles // that will need fixup first. sum_hv = 0; for (ang = 0; ang < nang; ang++) { for (indx1 = 0; indx1 < 4; indx1++) { HV_2D(ang, indx1) = 1; sum_hv += HV_2D(ang,indx1); } PC_1D(ang) = PC_1D(ang) * DINV_4D(ang,(i-1),(j-1),(k-1)); } // fixup_loop while (true) { sum_hv_tmp = 0; for ( ang = 0; ang < nang; ang++ ) { FXHV_2D(ang,0) = 2*PC_1D(ang) - PSII_3D(ang,(j-1),(k-1)); FXHV_2D(ang,1) = 2*PC_1D(ang) - PSIJ_3D(ang,(ic-1),(k-1)); if ( NDIMEN == 3 ) { FXHV_2D(ang,2) = 2*PC_1D(ang) - PSIK_3D(ang,(ic-1),(j-1)); } if ( VDELT_CONST != 0 ) { FXHV_2D(ang,3) = 2*PC_1D(ang) - PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)); } for ( indx1 = 0; indx1 < 4; indx1++ ) { if ( FXHV_2D(ang,indx1) < 0 ) { HV_2D(ang,indx1) = 0; } sum_hv_tmp += HV_2D(ang,indx1); } } // Exit loop when all angles are fixed up if (sum_hv == sum_hv_tmp) break; sum_hv = sum_hv_tmp; // Recompute balance equation numerator and denominator // and get new cell average flux for ( ang = 0; ang < nang; ang++ ) { PC_1D(ang) = PSII_3D(ang,(j-1),(k-1)) * MU_1D(ang) * HI * (1+HV_2D(ang,0)) + PSIJ_3D(ang,(ic-1),(k-1)) * HJ_1D(ang) * (1+HV_2D(ang,1)) + PSIK_3D(ang,(ic-1),(j-1)) * HK_1D(ang) * (1+HV_2D(ang,2)); if ( VDELT_CONST != 0 ) { PC_1D(ang) += VDELT_CONST * PTR_IN_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)) * (1+HV_2D(ang,3)); } PC_1D(ang) = PSI_1D(ang) + 0.5*PC_1D(ang); DEN_1D(ang) = T_XS_3D((i-1),(j-1),(k-1)) + MU_1D(ang) * HI * HV_2D(ang,0) + HJ_1D(ang) * HV_2D(ang,1) + HK_1D(ang) * HV_2D(ang,2) + VDELT_CONST * HV_2D(ang,3); if ( DEN_1D(ang) > TOLR ) { PC_1D(ang) /= DEN_1D(ang); } else { PC_1D(ang) = 0; } } } // end fixup_loop // Fixup done, compute edges for (ang = 0; ang < nang; ang++) { PSI_1D(ang) = PC_1D(ang); PSII_3D(ang,(j-1),(k-1)) = FXHV_2D(ang,0) * HV_2D(ang,0); PSIJ_3D(ang,(ic-1),(k-1)) = FXHV_2D(ang,1) * HV_2D(ang,1); if (NDIMEN == 3) { PSIK_3D(ang,(ic-1),(j-1)) = FXHV_2D(ang,2) * HV_2D(ang,2); } if (VDELT_CONST != 0) { PTR_OUT_6D(ang,(i-1),(j-1),(k-1),(i1-1),(i2-1)) = FXHV_2D(ang,3) * HV_2D(ang,3); } } } // Clear the flux arrays if ( oct == 1 ) { FLUX_4D((i-1),(j-1),(k-1),(g-1)) = 0; for ( indx1 = 0; indx1 < (CMOM-1); indx1++ ) { FLUXM_5D(indx1,(i-1),(j-1),(k-1),(g-1)) = 0; } } // Compute the flux moments sum_wpsi = 0; for (ang = 0; ang < nang; ang++) { sum_wpsi += W_1D(ang) * PSI_1D(ang); } FLUX_4D((i-1),(j-1),(k-1),(g-1)) += sum_wpsi; for (l = 1; l <= (CMOM-1); l++) { sum_ecwpsi = 0; for ( ang = 0; ang < nang; ang++ ) { sum_ecwpsi += EC_2D(ang,(l))*W_1D(ang)*PSI_1D(ang); } FLUXM_5D((l-1),(i-1),(j-1),(k-1),(g-1)) += sum_ecwpsi; } // Calculate min and max scalar fluxes (not used elsewhere currently) if (oct == NOCT) { FMIN = MIN( FMIN, FLUX_3D((i-1),(j-1),(k-1)) ); FMAX = MAX( FMAX, FLUX_3D((i-1),(j-1),(k-1)) ); } // Save edge fluxes (dummy if checks for unused non-vacuum BCs) if (j == j_high) { if ((j_dir==2 && lasty) || ((j_dir == 1 && firsty) && ibb == 1)) { // CONTINUE } else { for (ang = 0; ang < nang; ang++) { JB_OUT_3D(ang,(ic-1),(k-1)) = PSIJ_3D(ang,(ic-1),(k-1)); } } } if (k == k_high) { if ((k_dir == 2 && lastz) || ((k_dir==1 && firstz) && ibf == 1)) { // CONTINUE } else { for ( ang = 0; ang < nang; ang++ ) { KB_OUT_3D(ang,(ic-1),(j-1)) = PSIK_3D(ang,(ic-1),(j-1)); } } } // Compute leakages (not used elsewhere currently) if (((i+i_dir-1) == 1) || ((i+i_dir-1) == (NX+1))) { sum_wmupsii = 0; for (ang = 0; ang < nang; ang++) { sum_wmupsii += WMU_1D(ang) * PSII_3D(ang,(j-1),(k-1)); } FLKX_3D((i+i_dir-1-1),(j-1),(k-1)) += i_step*sum_wmupsii; } if ((j_dir == 1 && firsty) || (j_dir == 2 && lasty)) { sum_wetapsij = 0; for (ang = 0; ang < nang; ang++) { sum_wetapsij += WETA_1D(ang) * PSIJ_3D(ang,(ic-1),(k-1)); } FLKY_3D((i-1),(j+j_dir-1-1),(k-1)) += j_step*sum_wetapsij; } if (((k_dir == 1 && firstz) || (k_dir == 2 && lastz)) && NDIMEN == 3) { sum_wxipsik = 0; for (ang = 0; ang < nang; ang++) { sum_wxipsik += WXI_1D(ang) * PSIK_3D(ang,(ic-1),(j-1)); } FLKZ_3D((i-1),(j-1),(k+k_dir-1-1)) += k_step*sum_wxipsik; } } } void dim3_sweep_cuda ( input_data *input_vars, bool firsty, bool lasty, bool firstz, bool lastz, geom_data *geom_vars, sn_data *sn_vars, data_data *data_vars, control_data *control_vars, solvar_data *solvar_vars, dim_sweep_data *dim_sweep_vars, int ich, int i_dir, int d1, int d2, int d3, int d4, int j_dir, int k_dir, int j_low, int k_low, int j_high, int k_high, int j_step, int k_step, int i1, int i2, int oct, int g, int *ierr ) { // Local variables int i; int ang, y_ind, ic_ind, z_ind = 4; double psi[NANG], pc[NANG], den[NANG]; double hv[NANG*4], fxhv[NANG*4]; double *c_psi[NANG], *c_pc[NANG], *c_den[NANG]; double *c_hv[NANG*4], *c_fxhv[NANG*4]; // Create GPU-copies of data input_data *c_input_vars; geom_data *c_geom_vars; sn_data *c_sn_vars; data_data *c_data_vars; control_data *c_control_vars; solvar_data *c_solvar_vars; dim_sweep_data *c_dim_sweep_vars; cudaMalloc(&c_input_vars, sizeof(input_data)); cudaMalloc(&c_geom_vars, sizeof(geom_data)); cudaMalloc(&c_sn_vars, sizeof(sn_data)); cudaMalloc(&c_data_vars, sizeof(data_data)); cudaMalloc(&c_control_vars, sizeof(control_data)); cudaMalloc(&c_solvar_vars, sizeof(solvar_data)); cudaMalloc(&c_dim_sweep_vars, sizeof(dim_sweep_data)); cudaMalloc(c_psi, NANG * sizeof(double)); cudaMalloc(c_pc, NANG * sizeof(double)); cudaMalloc(c_den, NANG * sizeof(double)); cudaMalloc(c_hv, NANG * 4 * sizeof(double)); cudaMalloc(c_fxhv, NANG * 4 * sizeof(double)); // Zero out the outgoing boundary arrays and fixup array for ( z_ind = 0; z_ind < NZ; z_ind++ ) { for ( ic_ind = 0; ic_ind < ICHUNK; ic_ind++ ) { for ( ang = 0; ang < NANG; ang++ ) { JB_OUT_3D(ang,ic_ind,z_ind) = 0; } } } for ( y_ind = 0; y_ind < NY; y_ind++ ) { for ( ic_ind = 0; ic_ind < ICHUNK; ic_ind++ ) { for ( ang = 0; ang < NANG; ang++ ) { KB_OUT_3D(ang,ic_ind,y_ind) = 0; } } } for ( i = 0; i < 4; i++) { for ( ang = 0; ang < NANG; ang++ ) { FXHV_2D(ang, i) = 0; } } cudaMemcpy(c_psi, psi, NANG * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(c_pc, pc, NANG * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(c_den, den, NANG * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(c_hv, hv, NANG * 4 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(c_fxhv, fxhv, NANG * 4 * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(c_input_vars, input_vars, sizeof(input_data), cudaMemcpyHostToDevice); cudaMemcpy(c_geom_vars, geom_vars, sizeof(geom_data), cudaMemcpyHostToDevice); cudaMemcpy(c_sn_vars, sn_vars, sizeof(sn_data), cudaMemcpyHostToDevice); cudaMemcpy(c_data_vars, data_vars, sizeof(data_data), cudaMemcpyHostToDevice); cudaMemcpy(c_control_vars, control_vars, sizeof(control_data), cudaMemcpyHostToDevice); cudaMemcpy(c_solvar_vars, solvar_vars, sizeof(solvar_data), cudaMemcpyHostToDevice); cudaMemcpy(c_dim_sweep_vars, dim_sweep_vars, sizeof(dim_sweep_data), cudaMemcpyHostToDevice); // Loop over cells along the diagonals. When only 1 diagonal, it's // normal sweep order. Otherwise, nested threading performs mini-KBA. // diagonal loop int d; for (d = 1; d <= NDIAG; d++) { printf("lenc: %d\n", DIAG_1D(d-1).lenc); diagonal_loop<<<1, (DIAG_1D(d-1).lenc)>>>( c_input_vars, firsty, lasty, firstz, lastz, c_geom_vars, c_sn_vars, c_data_vars, c_control_vars, c_solvar_vars, c_dim_sweep_vars, ich, i_dir, d1, d2, d3, d4, j_dir, k_dir, j_low, k_low, j_high, k_high, j_step, k_step, i1, i2, oct, g, ierr, input_vars->nang, *c_psi, *c_pc, *c_den, *c_hv, *c_fxhv, d ); } // Copy from device back to hsot cudaMemcpy(c_input_vars, input_vars, sizeof(input_data), cudaMemcpyDeviceToHost); cudaMemcpy(c_geom_vars, geom_vars, sizeof(geom_data), cudaMemcpyDeviceToHost); cudaMemcpy(c_sn_vars, sn_vars, sizeof(sn_data), cudaMemcpyDeviceToHost); cudaMemcpy(c_data_vars, data_vars, sizeof(data_data), cudaMemcpyDeviceToHost); cudaMemcpy(c_control_vars, control_vars, sizeof(control_data), cudaMemcpyDeviceToHost); cudaMemcpy(c_solvar_vars, solvar_vars, sizeof(solvar_data), cudaMemcpyDeviceToHost); cudaMemcpy(c_dim_sweep_vars, dim_sweep_vars, sizeof(dim_sweep_data), cudaMemcpyDeviceToHost); // Clean up cudaFree(c_input_vars); cudaFree(c_geom_vars); cudaFree(c_sn_vars); cudaFree(c_data_vars); cudaFree(c_control_vars); cudaFree(c_solvar_vars); cudaFree(c_dim_sweep_vars); cudaFree(c_psi); cudaFree(c_pc); cudaFree(c_den); cudaFree(c_hv); cudaFree(c_fxhv); }
73943ce51a4f15f4e041afdb19d5afa64925d78d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include "magma_templates.h" #define COMPLEX // 512 is maximum number of threads for CUDA capability 1.x #define NB 512 // ---------------------------------------- // CUDA kernel for magma_zlarfg. // Uses one block of NB (currently 512) threads. // Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ..., // then does parallel sum reduction to get norm-squared. // // Currently setup to use NB threads, no matter how small dx is. // This was slightly faster (5%) than passing n to magma_sum_reduce. // To use number of threads = min( NB, max( 1, n-1 )), pass n as // argument to magma_sum_reduce, rather than as template parameter. __global__ void zlarfg_kernel( int n, magmaDoubleComplex* dalpha, magmaDoubleComplex* dx, int incx, magmaDoubleComplex* dtau ) { const int tx = threadIdx.x; __shared__ double swork[ NB ]; // TODO is it faster for each thread to have its own scale (register)? // if so, communicate it via swork[0] __shared__ double sscale; __shared__ magmaDoubleComplex sscale2; magmaDoubleComplex tmp; // find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow if ( tx == 0 ) { tmp = *dalpha; #ifdef COMPLEX swork[tx] = max( fabs(real(tmp)), fabs(imag(tmp)) ); #else swork[tx] = fabs(tmp); #endif } else { swork[tx] = 0; } for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx]; #ifdef COMPLEX swork[tx] = max( swork[tx], max( fabs(real(tmp)), fabs(imag(tmp)) )); #else swork[tx] = max( swork[tx], fabs(tmp) ); #endif } magma_max_reduce< NB >( tx, swork ); if ( tx == 0 ) sscale = swork[0]; __syncthreads(); // sum norm^2 of dx/sscale // dx has length n-1 swork[tx] = 0; if ( sscale > 0 ) { for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx] / sscale; swork[tx] += real(tmp)*real(tmp) + imag(tmp)*imag(tmp); } magma_sum_reduce< NB >( tx, swork ); //magma_sum_reduce( blockDim.x, tx, swork ); } if ( tx == 0 ) { magmaDoubleComplex alpha = *dalpha; if ( swork[0] == 0 && imag(alpha) == 0 ) { // H = I *dtau = MAGMA_Z_ZERO; } else { // beta = norm( [dalpha, dx] ) double beta; tmp = alpha / sscale; beta = sscale * sqrt( real(tmp)*real(tmp) + imag(tmp)*imag(tmp) + swork[0] ); beta = -copysign( beta, real(alpha) ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_Z_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); *dalpha = MAGMA_Z_MAKE( beta, 0 ); sscale2 = 1 / (alpha - beta); } } // scale x (if norm was not 0) __syncthreads(); if ( swork[0] != 0 ) { for( int j = tx; j < n-1; j += NB ) { dx[j*incx] *= sscale2; } } } /** Purpose ------- ZLARFG generates a complex elementary reflector (Householder matrix) H of order n, such that H * ( alpha ) = ( beta ), H**H * H = I. ( x ) ( 0 ) where alpha and beta are scalars, with beta real and beta = norm([alpha, x]), and x is an (n-1)-element complex vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v**H ), ( v ) where tau is a complex scalar and v is a complex (n-1)-element vector. Note that H is not Hermitian. If the elements of x are all zero and dalpha is real, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1. Arguments --------- @param[in] n INTEGER The order of the elementary reflector. @param[in,out] dalpha COMPLEX_16* on the GPU. On entry, pointer to the value alpha, i.e., the first entry of the vector. On exit, it is overwritten with the value beta. @param[in,out] dx COMPLEX_16 array, dimension (1+(N-2)*abs(INCX)), on the GPU On entry, the (n-1)-element vector x. On exit, it is overwritten with the vector v. @param[in] incx INTEGER The increment between elements of X. INCX > 0. @param[out] dtau COMPLEX_16* on the GPU. Pointer to the value tau. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux1 ********************************************************************/ extern "C" void magmablas_zlarfg_q( magma_int_t n, magmaDoubleComplex_ptr dalpha, magmaDoubleComplex_ptr dx, magma_int_t incx, magmaDoubleComplex_ptr dtau, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); hipLaunchKernelGGL(( zlarfg_kernel), dim3(blocks), dim3(threads), 0, queue , n, dalpha, dx, incx, dtau ); } /** @see magmablas_zlarfg_q @ingroup magma_zaux1 ********************************************************************/ extern "C" void magmablas_zlarfg( magma_int_t n, magmaDoubleComplex_ptr dalpha, magmaDoubleComplex_ptr dx, magma_int_t incx, magmaDoubleComplex_ptr dtau ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); hipLaunchKernelGGL(( zlarfg_kernel), dim3(blocks), dim3(threads) , 0, 0, n, dalpha, dx, incx, dtau ); }
73943ce51a4f15f4e041afdb19d5afa64925d78d.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @precisions normal z -> s d c @author Mark Gates */ #include "common_magma.h" #include "magma_templates.h" #define COMPLEX // 512 is maximum number of threads for CUDA capability 1.x #define NB 512 // ---------------------------------------- // CUDA kernel for magma_zlarfg. // Uses one block of NB (currently 512) threads. // Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ..., // then does parallel sum reduction to get norm-squared. // // Currently setup to use NB threads, no matter how small dx is. // This was slightly faster (5%) than passing n to magma_sum_reduce. // To use number of threads = min( NB, max( 1, n-1 )), pass n as // argument to magma_sum_reduce, rather than as template parameter. __global__ void zlarfg_kernel( int n, magmaDoubleComplex* dalpha, magmaDoubleComplex* dx, int incx, magmaDoubleComplex* dtau ) { const int tx = threadIdx.x; __shared__ double swork[ NB ]; // TODO is it faster for each thread to have its own scale (register)? // if so, communicate it via swork[0] __shared__ double sscale; __shared__ magmaDoubleComplex sscale2; magmaDoubleComplex tmp; // find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow if ( tx == 0 ) { tmp = *dalpha; #ifdef COMPLEX swork[tx] = max( fabs(real(tmp)), fabs(imag(tmp)) ); #else swork[tx] = fabs(tmp); #endif } else { swork[tx] = 0; } for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx]; #ifdef COMPLEX swork[tx] = max( swork[tx], max( fabs(real(tmp)), fabs(imag(tmp)) )); #else swork[tx] = max( swork[tx], fabs(tmp) ); #endif } magma_max_reduce< NB >( tx, swork ); if ( tx == 0 ) sscale = swork[0]; __syncthreads(); // sum norm^2 of dx/sscale // dx has length n-1 swork[tx] = 0; if ( sscale > 0 ) { for( int j = tx; j < n-1; j += NB ) { tmp = dx[j*incx] / sscale; swork[tx] += real(tmp)*real(tmp) + imag(tmp)*imag(tmp); } magma_sum_reduce< NB >( tx, swork ); //magma_sum_reduce( blockDim.x, tx, swork ); } if ( tx == 0 ) { magmaDoubleComplex alpha = *dalpha; if ( swork[0] == 0 && imag(alpha) == 0 ) { // H = I *dtau = MAGMA_Z_ZERO; } else { // beta = norm( [dalpha, dx] ) double beta; tmp = alpha / sscale; beta = sscale * sqrt( real(tmp)*real(tmp) + imag(tmp)*imag(tmp) + swork[0] ); beta = -copysign( beta, real(alpha) ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_Z_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); *dalpha = MAGMA_Z_MAKE( beta, 0 ); sscale2 = 1 / (alpha - beta); } } // scale x (if norm was not 0) __syncthreads(); if ( swork[0] != 0 ) { for( int j = tx; j < n-1; j += NB ) { dx[j*incx] *= sscale2; } } } /** Purpose ------- ZLARFG generates a complex elementary reflector (Householder matrix) H of order n, such that H * ( alpha ) = ( beta ), H**H * H = I. ( x ) ( 0 ) where alpha and beta are scalars, with beta real and beta = ±norm([alpha, x]), and x is an (n-1)-element complex vector. H is represented in the form H = I - tau * ( 1 ) * ( 1 v**H ), ( v ) where tau is a complex scalar and v is a complex (n-1)-element vector. Note that H is not Hermitian. If the elements of x are all zero and dalpha is real, then tau = 0 and H is taken to be the unit matrix. Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1. Arguments --------- @param[in] n INTEGER The order of the elementary reflector. @param[in,out] dalpha COMPLEX_16* on the GPU. On entry, pointer to the value alpha, i.e., the first entry of the vector. On exit, it is overwritten with the value beta. @param[in,out] dx COMPLEX_16 array, dimension (1+(N-2)*abs(INCX)), on the GPU On entry, the (n-1)-element vector x. On exit, it is overwritten with the vector v. @param[in] incx INTEGER The increment between elements of X. INCX > 0. @param[out] dtau COMPLEX_16* on the GPU. Pointer to the value tau. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux1 ********************************************************************/ extern "C" void magmablas_zlarfg_q( magma_int_t n, magmaDoubleComplex_ptr dalpha, magmaDoubleComplex_ptr dx, magma_int_t incx, magmaDoubleComplex_ptr dtau, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); zlarfg_kernel<<< blocks, threads, 0, queue >>>( n, dalpha, dx, incx, dtau ); } /** @see magmablas_zlarfg_q @ingroup magma_zaux1 ********************************************************************/ extern "C" void magmablas_zlarfg( magma_int_t n, magmaDoubleComplex_ptr dalpha, magmaDoubleComplex_ptr dx, magma_int_t incx, magmaDoubleComplex_ptr dtau ) { dim3 blocks( 1 ); dim3 threads( NB ); //dim3 threads( min( NB, max( n-1, 1 ))); zlarfg_kernel<<< blocks, threads >>>( n, dalpha, dx, incx, dtau ); }
402d3c98f72a81781615132a42c3937db95f6d96.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "applyLinearFunction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; const short *x = NULL; hipMalloc(&x, XSIZE*YSIZE); short *y = NULL; hipMalloc(&y, XSIZE*YSIZE); short a = 2; short b = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( applyLinearFunction), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,a,b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( applyLinearFunction), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( applyLinearFunction), dim3(gridBlock),dim3(threadBlock), 0, 0, size,x,y,a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
402d3c98f72a81781615132a42c3937db95f6d96.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "applyLinearFunction.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; const short *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); short *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); short a = 2; short b = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); applyLinearFunction<<<gridBlock,threadBlock>>>(size,x,y,a,b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { applyLinearFunction<<<gridBlock,threadBlock>>>(size,x,y,a,b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { applyLinearFunction<<<gridBlock,threadBlock>>>(size,x,y,a,b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
be00531e2a26dca376e06bf3a06f1de74e849314.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <GL/glew.h> #include <minmax.h> #include <cuda_gl_interop.h> #include <hip/hip_runtime.h> #include "MeshResource.h" #include "Shader.h" #include "Window.h" #include "GameTime.h" #include "DebugCamera.h" using namespace std; class VBOTestResource : public MeshResource { public: VBOTestResource(); ~VBOTestResource(); void draw() const; void update(); private: void generateGrid(); unsigned int restart_index; int width, height; struct cudaGraphicsResource* cudaVBO; }; VBOTestResource::VBOTestResource(): width(1000), height(1000) { width = max(2, width); height = max(2, height); generateGrid(); hipGLSetGLDevice(1); if (hipGraphicsGLRegisterBuffer(&cudaVBO, vbo[VBO_VERTEX], hipGraphicsMapFlagsNone) != hipSuccess) printf("Failed\n"); } VBOTestResource::~VBOTestResource() { if (hipGraphicsUnregisterResource(cudaVBO) != hipSuccess) printf("Failed\n"); } void VBOTestResource::generateGrid() { int loop_size = 2*height + 1; int num_verts = width*height; int num_indices = (width - 1)*loop_size; float* verts = new float[3*num_verts]; float* norms = new float[3*num_verts]; float* texcoords = new float[2*num_verts]; unsigned int* indices = new unsigned int[num_indices]; mode = GL_TRIANGLE_STRIP; for (int x = 0; x < width; x++) { int loops = x*loop_size; for (int y = 0; y < height; y++) { int offset = y*width + x; if (x != width - 1) indices[loops + 2*y + 1] = offset; if (x != 0) indices[loops - loop_size + 2*y] = offset; verts[3*offset + 0] = 2*(x*1.0f/(width-1)) - 1; verts[3*offset + 1] = 0; verts[3*offset + 2] = 2*(y*1.0f/(height-1)) - 1; norms[3*offset + 0] = 0; norms[3*offset + 1] = 1; norms[3*offset + 2] = 0; texcoords[2*offset + 0] = x*1.0f/(width-1); texcoords[2*offset + 1] = y*1.0f/(height-1); } if (x != width - 1) indices[loops + loop_size - 1] = width*height; } restart_index = width*height; glBindVertexArray(vao[0]); initVBO(VBO_VERTEX, (float*)verts, 3, num_verts, GL_DYNAMIC_DRAW); initVBO(VBO_NORMAL, (float*)norms, 3, num_verts, GL_DYNAMIC_DRAW); initVBO(VBO_TEXCOORD, (float*)texcoords, 2, num_verts, GL_DYNAMIC_DRAW); initIBO(indices, num_indices, GL_DYNAMIC_DRAW); glBindVertexArray(0); delete[] verts; delete[] norms; delete[] texcoords; delete[] indices; } void VBOTestResource::draw() const { glPrimitiveRestartIndex(restart_index); glEnable(GL_PRIMITIVE_RESTART); MeshResource::draw(); glDisable(GL_PRIMITIVE_RESTART); } __global__ void vboTestResource_update(float* ptr, int width, int height, float time) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = y*width + x; if (x >= width || y >= height) return; float period = 30; float rate = 1; float cx = x*1.0f/width - 0.5f; float cy = y*1.0f/height - 0.5f; float wave = sin(sqrt(cx*cx + cy*cy)*period - rate*time); int sign = wave>0?1:-1; wave = sign*sqrt(sign*wave); ptr[3*offset + 1] = wave/10; period *= 3; rate *= -9; ptr[3*offset + 1] += (sin(x*period/(width - 1) + rate*time) + sin(y*period/(height - 1) + rate*time))/40; } void VBOTestResource::update() { float time = GameTime::getTime(); float* devBuff; size_t size; dim3 threadsPerBlock(8, 8); dim3 numBlocks((width - 1)/threadsPerBlock.x + 1, (height - 1)/threadsPerBlock.y + 1); if (hipGraphicsMapResources(1, &cudaVBO, 0) != hipSuccess) printf("Failed\n"); hipGraphicsResourceGetMappedPointer((void**)&devBuff, &size, cudaVBO); hipLaunchKernelGGL(( vboTestResource_update), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, devBuff, width, height, time); if (hipGraphicsUnmapResources(1, &cudaVBO, 0) != hipSuccess) printf("Failed\n"); } const char vert[] = "#version 150 core\n" "uniform mat4 model;\n" "uniform mat4 view;\n" "uniform mat4 proj;\n" "in vec3 position;\n" "in vec3 normal;\n" "in vec2 texCoord;\n" "out Vertex {\n" " vec3 norm;\n" " vec3 worldPos;\n" " vec2 texCoord;\n" "} OUT;\n" "void main() {\n" " mat4 mv = view*model;\n" " vec4 worldPos = model*vec4(position, 1);\n" " gl_Position = proj*view*worldPos;\n" " OUT.norm = normalize(mat3(model)*normal);\n" " OUT.texCoord = texCoord;\n" " OUT.worldPos = worldPos.xyz;\n" "}\n"; const char frag[] = "#version 150 core\n" "uniform sampler2D diffTex;\n" "in Vertex {\n" " vec3 norm;\n" " vec3 worldPos;\n" " vec2 texCoord;\n" "} IN;\n" "out vec4 fragColour;\n" "void main() {\n" " fragColour = vec4(sin(IN.worldPos.x), cos(IN.worldPos.y), sin(IN.worldPos.z), 1);\n" "}\n"; int main() { int swidth, sheight; Window win(swidth = 1280, sheight = 720); GLenum err = glewInit(); if(err != GLEW_OK) { fprintf(stderr, "GLEW Error: %s\n", glewGetErrorString(err)); return -1; } Shader shader(vert, frag); shader.use(); DebugCamera dbcam(Vector3(10, 10, 10), 3.14f/4, -7*3.14f/32); int modelLoc = glGetUniformLocation(shader.getProgram(), "model"); int viewLoc = glGetUniformLocation(shader.getProgram(), "view"); int projLoc = glGetUniformLocation(shader.getProgram(), "proj"); Matrix4 model = Matrix4::diagonal(10,10,10,1); Matrix4 proj = Matrix4::perspective(60, sheight*1.0f/swidth, 0.1f, 1000); Matrix4 view; shader.send(modelLoc, 1, &model); shader.send(projLoc, 1, &proj); VBOTestResource test; win.show(); glEnable(GL_DEPTH_TEST); GameTime::startTime(); int running = true; while (running) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); GameTime::startFrame(); view = dbcam.getTransformMatrix(); shader.send(viewLoc, 1, &view); dbcam.updateView(); test.update(); shader.use(); test.draw(); win.swapBuffers(); win.update(); } return 0; }
be00531e2a26dca376e06bf3a06f1de74e849314.cu
#include <iostream> #include <GL/glew.h> #include <minmax.h> #include <cuda_gl_interop.h> #include <cuda_runtime.h> #include "MeshResource.h" #include "Shader.h" #include "Window.h" #include "GameTime.h" #include "DebugCamera.h" using namespace std; class VBOTestResource : public MeshResource { public: VBOTestResource(); ~VBOTestResource(); void draw() const; void update(); private: void generateGrid(); unsigned int restart_index; int width, height; struct cudaGraphicsResource* cudaVBO; }; VBOTestResource::VBOTestResource(): width(1000), height(1000) { width = max(2, width); height = max(2, height); generateGrid(); cudaGLSetGLDevice(1); if (cudaGraphicsGLRegisterBuffer(&cudaVBO, vbo[VBO_VERTEX], cudaGraphicsMapFlagsNone) != cudaSuccess) printf("Failed\n"); } VBOTestResource::~VBOTestResource() { if (cudaGraphicsUnregisterResource(cudaVBO) != cudaSuccess) printf("Failed\n"); } void VBOTestResource::generateGrid() { int loop_size = 2*height + 1; int num_verts = width*height; int num_indices = (width - 1)*loop_size; float* verts = new float[3*num_verts]; float* norms = new float[3*num_verts]; float* texcoords = new float[2*num_verts]; unsigned int* indices = new unsigned int[num_indices]; mode = GL_TRIANGLE_STRIP; for (int x = 0; x < width; x++) { int loops = x*loop_size; for (int y = 0; y < height; y++) { int offset = y*width + x; if (x != width - 1) indices[loops + 2*y + 1] = offset; if (x != 0) indices[loops - loop_size + 2*y] = offset; verts[3*offset + 0] = 2*(x*1.0f/(width-1)) - 1; verts[3*offset + 1] = 0; verts[3*offset + 2] = 2*(y*1.0f/(height-1)) - 1; norms[3*offset + 0] = 0; norms[3*offset + 1] = 1; norms[3*offset + 2] = 0; texcoords[2*offset + 0] = x*1.0f/(width-1); texcoords[2*offset + 1] = y*1.0f/(height-1); } if (x != width - 1) indices[loops + loop_size - 1] = width*height; } restart_index = width*height; glBindVertexArray(vao[0]); initVBO(VBO_VERTEX, (float*)verts, 3, num_verts, GL_DYNAMIC_DRAW); initVBO(VBO_NORMAL, (float*)norms, 3, num_verts, GL_DYNAMIC_DRAW); initVBO(VBO_TEXCOORD, (float*)texcoords, 2, num_verts, GL_DYNAMIC_DRAW); initIBO(indices, num_indices, GL_DYNAMIC_DRAW); glBindVertexArray(0); delete[] verts; delete[] norms; delete[] texcoords; delete[] indices; } void VBOTestResource::draw() const { glPrimitiveRestartIndex(restart_index); glEnable(GL_PRIMITIVE_RESTART); MeshResource::draw(); glDisable(GL_PRIMITIVE_RESTART); } __global__ void vboTestResource_update(float* ptr, int width, int height, float time) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = y*width + x; if (x >= width || y >= height) return; float period = 30; float rate = 1; float cx = x*1.0f/width - 0.5f; float cy = y*1.0f/height - 0.5f; float wave = sin(sqrt(cx*cx + cy*cy)*period - rate*time); int sign = wave>0?1:-1; wave = sign*sqrt(sign*wave); ptr[3*offset + 1] = wave/10; period *= 3; rate *= -9; ptr[3*offset + 1] += (sin(x*period/(width - 1) + rate*time) + sin(y*period/(height - 1) + rate*time))/40; } void VBOTestResource::update() { float time = GameTime::getTime(); float* devBuff; size_t size; dim3 threadsPerBlock(8, 8); dim3 numBlocks((width - 1)/threadsPerBlock.x + 1, (height - 1)/threadsPerBlock.y + 1); if (cudaGraphicsMapResources(1, &cudaVBO, 0) != cudaSuccess) printf("Failed\n"); cudaGraphicsResourceGetMappedPointer((void**)&devBuff, &size, cudaVBO); vboTestResource_update<<<numBlocks, threadsPerBlock>>>(devBuff, width, height, time); if (cudaGraphicsUnmapResources(1, &cudaVBO, 0) != cudaSuccess) printf("Failed\n"); } const char vert[] = "#version 150 core\n" "uniform mat4 model;\n" "uniform mat4 view;\n" "uniform mat4 proj;\n" "in vec3 position;\n" "in vec3 normal;\n" "in vec2 texCoord;\n" "out Vertex {\n" " vec3 norm;\n" " vec3 worldPos;\n" " vec2 texCoord;\n" "} OUT;\n" "void main() {\n" " mat4 mv = view*model;\n" " vec4 worldPos = model*vec4(position, 1);\n" " gl_Position = proj*view*worldPos;\n" " OUT.norm = normalize(mat3(model)*normal);\n" " OUT.texCoord = texCoord;\n" " OUT.worldPos = worldPos.xyz;\n" "}\n"; const char frag[] = "#version 150 core\n" "uniform sampler2D diffTex;\n" "in Vertex {\n" " vec3 norm;\n" " vec3 worldPos;\n" " vec2 texCoord;\n" "} IN;\n" "out vec4 fragColour;\n" "void main() {\n" " fragColour = vec4(sin(IN.worldPos.x), cos(IN.worldPos.y), sin(IN.worldPos.z), 1);\n" "}\n"; int main() { int swidth, sheight; Window win(swidth = 1280, sheight = 720); GLenum err = glewInit(); if(err != GLEW_OK) { fprintf(stderr, "GLEW Error: %s\n", glewGetErrorString(err)); return -1; } Shader shader(vert, frag); shader.use(); DebugCamera dbcam(Vector3(10, 10, 10), 3.14f/4, -7*3.14f/32); int modelLoc = glGetUniformLocation(shader.getProgram(), "model"); int viewLoc = glGetUniformLocation(shader.getProgram(), "view"); int projLoc = glGetUniformLocation(shader.getProgram(), "proj"); Matrix4 model = Matrix4::diagonal(10,10,10,1); Matrix4 proj = Matrix4::perspective(60, sheight*1.0f/swidth, 0.1f, 1000); Matrix4 view; shader.send(modelLoc, 1, &model); shader.send(projLoc, 1, &proj); VBOTestResource test; win.show(); glEnable(GL_DEPTH_TEST); GameTime::startTime(); int running = true; while (running) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); GameTime::startFrame(); view = dbcam.getTransformMatrix(); shader.send(viewLoc, 1, &view); dbcam.updateView(); test.update(); shader.use(); test.draw(); win.swapBuffers(); win.update(); } return 0; }
59f2fcce553859440015f5d1c853fea9fb943cd5.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } int *dev_arrayA; int *dev_arrayB; int *dev_bools; int *dev_boolScans; int *dev_idata; int *dev_odata; int * dev_indices; int *dev_lastElements; int *dev_lastElements2; void printArray(int n, const int *a, bool abridged = false) { printf(" [ "); for (int i = 0; i < n; i++) { if (abridged && i + 2 == 15 && n > 16) { i = n - 2; printf("... "); } printf("%3d ", a[i]); } printf("]\n"); } __global__ void kernEffScanUpSweep(int N, int pow2d, int pow2d1, int* arrA) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= N) return; if ((k % pow2d1) == 0 && (k + pow2d1 - 1)<N && (k + pow2d - 1)<N ){ arrA[k + pow2d1 - 1] += arrA[k + pow2d - 1]; } } __global__ void kernEffScanDownSweep(int N, int pow2d, int pow2d1, int* arrA) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= N) return; int tmp = 0; if ((k % pow2d1) == 0 && (k + pow2d1 - 1) < N && (k + pow2d - 1) < N) { tmp = arrA[k + pow2d -1]; arrA[k + pow2d - 1] = arrA[k + pow2d1 - 1]; arrA[k + pow2d1 - 1] += tmp; } } __global__ void kernInitZero(int N, int* array) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { array[tid] = 0; } } __global__ void kernScanShared(int n, int * g_odata, int * g_idata) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int tid_read = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid_read >= n) return; int offset = 1; temp[2 * thid] = g_idata[2 * tid_read]; // load input into shared memory temp[2 * thid + 1] = g_idata[2 * tid_read + 1]; // build sum in place up the tree for (int d = 2*blockDim.x >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[2 * blockDim.x - 1] = 0; } // clear the last element for (int d = 1; d < 2 * blockDim.x; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2 * tid_read] = temp[2 * thid]; // write results to device memory g_odata[2 * tid_read + 1] = temp[2 * thid + 1]; } __global__ void kernGetLastElement(int n, int* s_data, int * g_odata, int * g_idata) { int thid = threadIdx.x; int tid_global = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid_global >= n) return; if (thid == blockDim.x - 1) { s_data[blockIdx.x] = g_odata[tid_global] +g_idata[tid_global]; } } __global__ void kernUpdateScan(int n, int* s_data, int * g_odata, int * g_idata) { int thid = threadIdx.x; int tid_global = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid_global >= n) return; g_odata[tid_global] += s_data[blockIdx.x]; } /* * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ /* void scan(int n, int *odata, const int *idata) { // TODO int n_new = n; //check for non-2powerN if (1 << ilog2ceil(n) != n) n_new = (1 << ilog2ceil(n)); int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); hipMalloc((void**)&dev_arrayA, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); //Initialize to Zero kernInitZero <<<fullBlocksPerGrid, blockSize >>> (n_new, dev_arrayA); checkCUDAErrorFn("kernInitZero failed!"); // Fill dev_arrayA with idata hipMemcpy(dev_arrayA, idata, n * sizeof(int), hipMemcpyHostToDevice); checkCUDAErrorFn("hipMemcpyToSymbol from idata to dev_arrayA failed!"); bool tmp = true; try { timer().startGpuTimer(); //printf("IN WEScan timer started!\n"); } catch (const std::runtime_error& e) { tmp = false; } // Upstream int pow2d1 = 0; int pow2d = 0; for (int d = 0; d <= ilog2ceil(n_new)-1; d++) { pow2d = 1 << (d); pow2d1 = 1 << (d+1); kernEffScanUpSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, dev_arrayA); checkCUDAErrorFn("kernEffScanUpSweep failed!"); } // Downstream int *zero = new int[1]; zero[0] = 0; hipMemcpy(dev_arrayA + n_new-1, zero, 1*sizeof(int), hipMemcpyHostToDevice); for (int d = ilog2ceil(n_new)-1; d >= 0; d--) { pow2d = 1 << (d); pow2d1 = 1 << (d + 1); kernEffScanDownSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, dev_arrayA); checkCUDAErrorFn("kernGenerateRandomPosArray failed!"); } if (tmp == true) { timer().endGpuTimer(); //printf("IN WEScan timer ended!\n"); } // Copy back to cpu hipMemcpy(odata, dev_arrayA, n*sizeof(int), hipMemcpyDeviceToHost); checkCUDAErrorFn("hipMemcpyFromSymbol from dev_arrayA to odata failed!"); //printf("BBT Scan Final Computed : \n"); //printArray(n, odata, true); hipFree(dev_arrayA); return; } */ void oldScan(int n_new, int *odata, int *idata) { int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); // Upstream int pow2d1 = 0; int pow2d = 0; for (int d = 0; d <= ilog2ceil(n_new) - 1; d++) { pow2d = 1 << (d); pow2d1 = 1 << (d + 1); kernEffScanUpSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, idata); checkCUDAErrorFn("kernEffScanUpSweep failed!"); } // Downstream int *zero = new int[1]; zero[0] = 0; hipMemcpy(idata + n_new - 1, zero, 1 * sizeof(int), hipMemcpyHostToDevice); for (int d = ilog2ceil(n_new) - 1; d >= 0; d--) { pow2d = 1 << (d); pow2d1 = 1 << (d + 1); kernEffScanDownSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, idata); checkCUDAErrorFn("kernGenerateRandomPosArray failed!"); } // Copy back to out hipMemcpy(odata, idata, n_new * sizeof(int), hipMemcpyDeviceToDevice); checkCUDAErrorFn("hipMemcpyFromSymbol from dev_arrayB to odata failed!"); return; } void scan(int n, int *odata, const int *idata) { // TODO int n_new = n; //int *tmp_print = new int[n]; //check for non-2powerN if (1 << ilog2ceil(n) != n) n_new = (1 << ilog2ceil(n)); int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); hipMalloc((void**)&dev_arrayA, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); //Initialize to Zero kernInitZero << <fullBlocksPerGrid, blockSize >> > (n_new, dev_arrayA); checkCUDAErrorFn("kernInitZero failed!"); // Fill dev_arrayA with idata hipMemcpy(dev_arrayA, idata, n * sizeof(int), hipMemcpyHostToDevice); checkCUDAErrorFn("hipMemcpyToSymbol from idata to dev_arrayA failed!"); // More arrays hipMalloc((void**)&dev_odata, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); hipMalloc((void**)&dev_lastElements, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); hipMalloc((void**)&dev_lastElements2, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); bool tmp = true; try { timer().startGpuTimer(); //printf("IN WEScan timer started!\n"); } catch (const std::runtime_error& e) { tmp = false; } //printf("\n==========================STARTED WES================================\n"); //printf("Pre Scan Array \n"); //printArray(n, idata, true); //fullBlocksPerGrid = 4; hipLaunchKernelGGL(( kernScanShared) , dim3(fullBlocksPerGrid), dim3(blockSize / 2), (2*blockSize + blockSize/8) * sizeof(int) >> > (n_new, dev_odata, dev_arrayA); //hipMemcpy(tmp_print, dev_odata, n_new * sizeof(int), hipMemcpyDeviceToHost); //checkCUDAErrorFn("hipMemcpyFromSymbol from dev_odata to tmp_print failed!"); //printf("kernScanShared results per %d blocks\n", fullBlocksPerGrid); //printArray(n_new, tmp_print, true); kernGetLastElement << < fullBlocksPerGrid, blockSize, blockSize * sizeof(int) >> > (n_new, dev_lastElements, dev_odata, dev_arrayA); //hipMemcpy(tmp_print, dev_lastElements, fullBlocksPerGrid * sizeof(int), hipMemcpyDeviceToHost); //checkCUDAErrorFn("hipMemcpyFromSymbol from dev_odata to odata failed!"); //printf("kernGetLastElement results\n"); //printArray(fullBlocksPerGrid, tmp_print, true); oldScan(fullBlocksPerGrid, dev_lastElements2, dev_lastElements); //kernScanShared << < 1, blockSize / 2, blockSize * sizeof(int) >> > (n_new, dev_lastElements2, dev_lastElements); //hipMemcpy(tmp_print, dev_lastElements2, fullBlocksPerGrid * sizeof(int), hipMemcpyDeviceToHost); //checkCUDAErrorFn("hipMemcpyFromSymbol from dev_odata to odata failed!"); //printf("scan on kernGetLastElement\n"); //printArray(fullBlocksPerGrid, tmp_print, true); kernUpdateScan << < fullBlocksPerGrid, blockSize >> > (n_new, dev_lastElements2, dev_odata, dev_arrayA); //hipMemcpy(tmp_print, dev_odata, n_new * sizeof(int), hipMemcpyDeviceToHost); //checkCUDAErrorFn("hipMemcpyFromSymbol from dev_odata to odata failed!"); //printf("FINAL Scan results\n"); //printArray(n_new, tmp_print, true); //printf("\n==========================FINISHED WES================================\n"); if (tmp == true) { timer().endGpuTimer(); //printf("IN WEScan timer ended!\n"); } // Copy back hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost); checkCUDAErrorFn("hipMemcpyFromSymbol from dev_arrayA to odata failed!"); //printf("BBT Scan Final Computed : \n"); //printArray(n, odata, true); hipFree(dev_arrayA); hipFree(dev_odata); hipFree(dev_lastElements); hipFree(dev_lastElements2); return; } void compact_scan(int n, int *dev_odata, int *dev_idata) { // TODO int n_new = n; //int *tmp_print = new int[n]; //check for non-2powerN if (1 << ilog2ceil(n) != n) { n_new = (1 << ilog2ceil(n)); } int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); hipMalloc((void**)&dev_arrayA, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); //Initialize to Zero kernInitZero , fullBlocksPerGrid, blockSize >> > (n_new, dev_arrayA); checkCUDAErrorFn("kernInitZero failed!"); // Fill dev_arrayA with idata hipMemcpy(dev_arrayA, dev_idata, n * sizeof(int), hipMemcpyDeviceToDevice); checkCUDAErrorFn("hipMemcpyToSymbol from idata to dev_arrayA failed!"); // More arrays hipMalloc((void**)&dev_lastElements, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); hipMalloc((void**)&dev_lastElements2, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); bool tmp = true; try { timer().startGpuTimer(); //printf("IN WEScan timer started!\n"); } catch (const std::runtime_error& e) { tmp = false; } ( kernScanShared) << < dim3(fullBlocksPerGrid), dim3(blockSize / 2), (2 * blockSize + blockSize / 8) * sizeof(int) >> > (n_new, dev_odata, dev_arrayA); //hipMemcpy(tmp_print, dev_odata, n_new * sizeof(int), hipMemcpyDeviceToHost); kernGetLastElement << < fullBlocksPerGrid, blockSize, blockSize * sizeof(int) >> > (n_new, dev_lastElements, dev_odata, dev_arrayA); //hipMemcpy(tmp_print, dev_lastElements, fullBlocksPerGrid * sizeof(int), hipMemcpyDeviceToHost); oldScan(fullBlocksPerGrid, dev_lastElements2, dev_lastElements); //kernScanShared << < 1, blockSize / 2, blockSize * sizeof(int) >> > (n_new, dev_lastElements2, dev_lastElements); kernUpdateScan << < fullBlocksPerGrid, blockSize >> > (n_new, dev_lastElements2, dev_odata, dev_arrayA); //hipMemcpy(tmp_print, dev_odata, n_new * sizeof(int), hipMemcpyDeviceToHost); if (tmp == true) { timer().endGpuTimer(); //printf("IN WEScan timer ended!\n"); } // Copy back //hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost); //checkCUDAErrorFn("hipMemcpyFromSymbol from dev_arrayA to odata failed!"); //printf("BBT Scan Final Computed : \n"); //printArray(n, odata, true); hipFree(dev_arrayA); hipFree(dev_lastElements); hipFree(dev_lastElements2); return; } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO int * indices = new int[n]; int * bools = new int[n]; int fullBlocksPerGrid((n + blockSize - 1) / blockSize); hipMalloc((void**)&dev_bools, n * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_bools failed!"); hipMalloc((void**)&dev_idata, n*sizeof(int)); checkCUDAErrorFn("hipMalloc dev_arrayA failed!"); hipMemcpy(dev_idata, idata, n*sizeof(int), hipMemcpyHostToDevice); checkCUDAErrorFn("hipMemcpyToSymbol from idata to dev_arrayA failed!"); int n_new = n; if (1 << ilog2ceil(n) != n) { int n_new = (1 << ilog2ceil(n)); } // allocate enough memory to thandle non power of two hipMalloc((void**)&dev_indices, n_new * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_indices failed!"); hipMalloc((void**)&dev_odata, n * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_indices failed!"); timer().startGpuTimer(); //Compute bools Common::kernMapToBoolean, fullBlocksPerGrid, blockSize , 0, 0, n, dev_bools, dev_idata); checkCUDAErrorFn("kernMapToBoolean failed!"); //compute scans compact_scan(n, dev_indices, dev_bools); //scatter hipLaunchKernelGGL(( Common::kernScatter), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, n, dev_odata, dev_idata, dev_bools, dev_indices); checkCUDAErrorFn("kernScatter failed!"); timer().endGpuTimer(); // Copy back to cpu hipMemcpy(odata, dev_odata, n*sizeof(int), hipMemcpyDeviceToHost); checkCUDAErrorFn("hipMemcpyFromSymbol from dev_odata to odata failed!"); int *lastEl = new int[1]; hipMemcpy(lastEl, dev_indices+n-1, 1*sizeof(int), hipMemcpyDeviceToHost); checkCUDAErrorFn("hipMemcpyFromSymbol from dev_odata to odata failed!"); //printf("GPU Compaction : \n"); //printArray(n, odata, true); hipFree(dev_bools); hipFree(dev_idata); hipFree(dev_indices); hipFree(dev_odata); if (idata[n - 1] != 0) { return lastEl[0] + 1; } else { return lastEl[0]; } } } }
59f2fcce553859440015f5d1c853fea9fb943cd5.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } int *dev_arrayA; int *dev_arrayB; int *dev_bools; int *dev_boolScans; int *dev_idata; int *dev_odata; int * dev_indices; int *dev_lastElements; int *dev_lastElements2; void printArray(int n, const int *a, bool abridged = false) { printf(" [ "); for (int i = 0; i < n; i++) { if (abridged && i + 2 == 15 && n > 16) { i = n - 2; printf("... "); } printf("%3d ", a[i]); } printf("]\n"); } __global__ void kernEffScanUpSweep(int N, int pow2d, int pow2d1, int* arrA) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= N) return; if ((k % pow2d1) == 0 && (k + pow2d1 - 1)<N && (k + pow2d - 1)<N ){ arrA[k + pow2d1 - 1] += arrA[k + pow2d - 1]; } } __global__ void kernEffScanDownSweep(int N, int pow2d, int pow2d1, int* arrA) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= N) return; int tmp = 0; if ((k % pow2d1) == 0 && (k + pow2d1 - 1) < N && (k + pow2d - 1) < N) { tmp = arrA[k + pow2d -1]; arrA[k + pow2d - 1] = arrA[k + pow2d1 - 1]; arrA[k + pow2d1 - 1] += tmp; } } __global__ void kernInitZero(int N, int* array) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid < N) { array[tid] = 0; } } __global__ void kernScanShared(int n, int * g_odata, int * g_idata) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int tid_read = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid_read >= n) return; int offset = 1; temp[2 * thid] = g_idata[2 * tid_read]; // load input into shared memory temp[2 * thid + 1] = g_idata[2 * tid_read + 1]; // build sum in place up the tree for (int d = 2*blockDim.x >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[2 * blockDim.x - 1] = 0; } // clear the last element for (int d = 1; d < 2 * blockDim.x; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[2 * tid_read] = temp[2 * thid]; // write results to device memory g_odata[2 * tid_read + 1] = temp[2 * thid + 1]; } __global__ void kernGetLastElement(int n, int* s_data, int * g_odata, int * g_idata) { int thid = threadIdx.x; int tid_global = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid_global >= n) return; if (thid == blockDim.x - 1) { s_data[blockIdx.x] = g_odata[tid_global] +g_idata[tid_global]; } } __global__ void kernUpdateScan(int n, int* s_data, int * g_odata, int * g_idata) { int thid = threadIdx.x; int tid_global = (blockIdx.x * blockDim.x) + threadIdx.x; if (tid_global >= n) return; g_odata[tid_global] += s_data[blockIdx.x]; } /* * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ /* void scan(int n, int *odata, const int *idata) { // TODO int n_new = n; //check for non-2powerN if (1 << ilog2ceil(n) != n) n_new = (1 << ilog2ceil(n)); int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_arrayA, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); //Initialize to Zero kernInitZero <<<fullBlocksPerGrid, blockSize >>> (n_new, dev_arrayA); checkCUDAErrorFn("kernInitZero failed!"); // Fill dev_arrayA with idata cudaMemcpy(dev_arrayA, idata, n * sizeof(int), cudaMemcpyHostToDevice); checkCUDAErrorFn("cudaMemcpyToSymbol from idata to dev_arrayA failed!"); bool tmp = true; try { timer().startGpuTimer(); //printf("IN WEScan timer started!\n"); } catch (const std::runtime_error& e) { tmp = false; } // Upstream int pow2d1 = 0; int pow2d = 0; for (int d = 0; d <= ilog2ceil(n_new)-1; d++) { pow2d = 1 << (d); pow2d1 = 1 << (d+1); kernEffScanUpSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, dev_arrayA); checkCUDAErrorFn("kernEffScanUpSweep failed!"); } // Downstream int *zero = new int[1]; zero[0] = 0; cudaMemcpy(dev_arrayA + n_new-1, zero, 1*sizeof(int), cudaMemcpyHostToDevice); for (int d = ilog2ceil(n_new)-1; d >= 0; d--) { pow2d = 1 << (d); pow2d1 = 1 << (d + 1); kernEffScanDownSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, dev_arrayA); checkCUDAErrorFn("kernGenerateRandomPosArray failed!"); } if (tmp == true) { timer().endGpuTimer(); //printf("IN WEScan timer ended!\n"); } // Copy back to cpu cudaMemcpy(odata, dev_arrayA, n*sizeof(int), cudaMemcpyDeviceToHost); checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_arrayA to odata failed!"); //printf("BBT Scan Final Computed : \n"); //printArray(n, odata, true); cudaFree(dev_arrayA); return; } */ void oldScan(int n_new, int *odata, int *idata) { int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); // Upstream int pow2d1 = 0; int pow2d = 0; for (int d = 0; d <= ilog2ceil(n_new) - 1; d++) { pow2d = 1 << (d); pow2d1 = 1 << (d + 1); kernEffScanUpSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, idata); checkCUDAErrorFn("kernEffScanUpSweep failed!"); } // Downstream int *zero = new int[1]; zero[0] = 0; cudaMemcpy(idata + n_new - 1, zero, 1 * sizeof(int), cudaMemcpyHostToDevice); for (int d = ilog2ceil(n_new) - 1; d >= 0; d--) { pow2d = 1 << (d); pow2d1 = 1 << (d + 1); kernEffScanDownSweep << <fullBlocksPerGrid, blockSize >> > (n_new, pow2d, pow2d1, idata); checkCUDAErrorFn("kernGenerateRandomPosArray failed!"); } // Copy back to out cudaMemcpy(odata, idata, n_new * sizeof(int), cudaMemcpyDeviceToDevice); checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_arrayB to odata failed!"); return; } void scan(int n, int *odata, const int *idata) { // TODO int n_new = n; //int *tmp_print = new int[n]; //check for non-2powerN if (1 << ilog2ceil(n) != n) n_new = (1 << ilog2ceil(n)); int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_arrayA, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); //Initialize to Zero kernInitZero << <fullBlocksPerGrid, blockSize >> > (n_new, dev_arrayA); checkCUDAErrorFn("kernInitZero failed!"); // Fill dev_arrayA with idata cudaMemcpy(dev_arrayA, idata, n * sizeof(int), cudaMemcpyHostToDevice); checkCUDAErrorFn("cudaMemcpyToSymbol from idata to dev_arrayA failed!"); // More arrays cudaMalloc((void**)&dev_odata, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); cudaMalloc((void**)&dev_lastElements, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); cudaMalloc((void**)&dev_lastElements2, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); bool tmp = true; try { timer().startGpuTimer(); //printf("IN WEScan timer started!\n"); } catch (const std::runtime_error& e) { tmp = false; } //printf("\n==========================STARTED WES================================\n"); //printf("Pre Scan Array \n"); //printArray(n, idata, true); //fullBlocksPerGrid = 4; kernScanShared <<< fullBlocksPerGrid, blockSize / 2, (2*blockSize + blockSize/8) * sizeof(int) >> > (n_new, dev_odata, dev_arrayA); //cudaMemcpy(tmp_print, dev_odata, n_new * sizeof(int), cudaMemcpyDeviceToHost); //checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_odata to tmp_print failed!"); //printf("kernScanShared results per %d blocks\n", fullBlocksPerGrid); //printArray(n_new, tmp_print, true); kernGetLastElement << < fullBlocksPerGrid, blockSize, blockSize * sizeof(int) >> > (n_new, dev_lastElements, dev_odata, dev_arrayA); //cudaMemcpy(tmp_print, dev_lastElements, fullBlocksPerGrid * sizeof(int), cudaMemcpyDeviceToHost); //checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_odata to odata failed!"); //printf("kernGetLastElement results\n"); //printArray(fullBlocksPerGrid, tmp_print, true); oldScan(fullBlocksPerGrid, dev_lastElements2, dev_lastElements); //kernScanShared << < 1, blockSize / 2, blockSize * sizeof(int) >> > (n_new, dev_lastElements2, dev_lastElements); //cudaMemcpy(tmp_print, dev_lastElements2, fullBlocksPerGrid * sizeof(int), cudaMemcpyDeviceToHost); //checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_odata to odata failed!"); //printf("scan on kernGetLastElement\n"); //printArray(fullBlocksPerGrid, tmp_print, true); kernUpdateScan << < fullBlocksPerGrid, blockSize >> > (n_new, dev_lastElements2, dev_odata, dev_arrayA); //cudaMemcpy(tmp_print, dev_odata, n_new * sizeof(int), cudaMemcpyDeviceToHost); //checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_odata to odata failed!"); //printf("FINAL Scan results\n"); //printArray(n_new, tmp_print, true); //printf("\n==========================FINISHED WES================================\n"); if (tmp == true) { timer().endGpuTimer(); //printf("IN WEScan timer ended!\n"); } // Copy back cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_arrayA to odata failed!"); //printf("BBT Scan Final Computed : \n"); //printArray(n, odata, true); cudaFree(dev_arrayA); cudaFree(dev_odata); cudaFree(dev_lastElements); cudaFree(dev_lastElements2); return; } void compact_scan(int n, int *dev_odata, int *dev_idata) { // TODO int n_new = n; //int *tmp_print = new int[n]; //check for non-2powerN if (1 << ilog2ceil(n) != n) { n_new = (1 << ilog2ceil(n)); } int fullBlocksPerGrid((n_new + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_arrayA, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); //Initialize to Zero kernInitZero <<<fullBlocksPerGrid, blockSize >> > (n_new, dev_arrayA); checkCUDAErrorFn("kernInitZero failed!"); // Fill dev_arrayA with idata cudaMemcpy(dev_arrayA, dev_idata, n * sizeof(int), cudaMemcpyDeviceToDevice); checkCUDAErrorFn("cudaMemcpyToSymbol from idata to dev_arrayA failed!"); // More arrays cudaMalloc((void**)&dev_lastElements, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); cudaMalloc((void**)&dev_lastElements2, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); bool tmp = true; try { timer().startGpuTimer(); //printf("IN WEScan timer started!\n"); } catch (const std::runtime_error& e) { tmp = false; } kernScanShared << < fullBlocksPerGrid, blockSize / 2, (2 * blockSize + blockSize / 8) * sizeof(int) >> > (n_new, dev_odata, dev_arrayA); //cudaMemcpy(tmp_print, dev_odata, n_new * sizeof(int), cudaMemcpyDeviceToHost); kernGetLastElement << < fullBlocksPerGrid, blockSize, blockSize * sizeof(int) >> > (n_new, dev_lastElements, dev_odata, dev_arrayA); //cudaMemcpy(tmp_print, dev_lastElements, fullBlocksPerGrid * sizeof(int), cudaMemcpyDeviceToHost); oldScan(fullBlocksPerGrid, dev_lastElements2, dev_lastElements); //kernScanShared << < 1, blockSize / 2, blockSize * sizeof(int) >> > (n_new, dev_lastElements2, dev_lastElements); kernUpdateScan << < fullBlocksPerGrid, blockSize >> > (n_new, dev_lastElements2, dev_odata, dev_arrayA); //cudaMemcpy(tmp_print, dev_odata, n_new * sizeof(int), cudaMemcpyDeviceToHost); if (tmp == true) { timer().endGpuTimer(); //printf("IN WEScan timer ended!\n"); } // Copy back //cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); //checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_arrayA to odata failed!"); //printf("BBT Scan Final Computed : \n"); //printArray(n, odata, true); cudaFree(dev_arrayA); cudaFree(dev_lastElements); cudaFree(dev_lastElements2); return; } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO int * indices = new int[n]; int * bools = new int[n]; int fullBlocksPerGrid((n + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_bools, n * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_bools failed!"); cudaMalloc((void**)&dev_idata, n*sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_arrayA failed!"); cudaMemcpy(dev_idata, idata, n*sizeof(int), cudaMemcpyHostToDevice); checkCUDAErrorFn("cudaMemcpyToSymbol from idata to dev_arrayA failed!"); int n_new = n; if (1 << ilog2ceil(n) != n) { int n_new = (1 << ilog2ceil(n)); } // allocate enough memory to thandle non power of two cudaMalloc((void**)&dev_indices, n_new * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_indices failed!"); cudaMalloc((void**)&dev_odata, n * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_indices failed!"); timer().startGpuTimer(); //Compute bools Common::kernMapToBoolean<<<fullBlocksPerGrid, blockSize >>>(n, dev_bools, dev_idata); checkCUDAErrorFn("kernMapToBoolean failed!"); //compute scans compact_scan(n, dev_indices, dev_bools); //scatter Common::kernScatter<<<fullBlocksPerGrid, blockSize >>>(n, dev_odata, dev_idata, dev_bools, dev_indices); checkCUDAErrorFn("kernScatter failed!"); timer().endGpuTimer(); // Copy back to cpu cudaMemcpy(odata, dev_odata, n*sizeof(int), cudaMemcpyDeviceToHost); checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_odata to odata failed!"); int *lastEl = new int[1]; cudaMemcpy(lastEl, dev_indices+n-1, 1*sizeof(int), cudaMemcpyDeviceToHost); checkCUDAErrorFn("cudaMemcpyFromSymbol from dev_odata to odata failed!"); //printf("GPU Compaction : \n"); //printArray(n, odata, true); cudaFree(dev_bools); cudaFree(dev_idata); cudaFree(dev_indices); cudaFree(dev_odata); if (idata[n - 1] != 0) { return lastEl[0] + 1; } else { return lastEl[0]; } } } }
30d4ae7e09bf857dbae68f7659def352ab2aab28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/infogain_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void InfogainLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype* infogain, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int num_labels_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; int l = 0; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = 0; for (l = 0; l < num_labels_; l++) { loss[index] -= infogain[label_value*num_labels_ + l] * log(max(prob_data[n * spatial_dim * num_labels_ + l * spatial_dim + s], Dtype(kLOG_THRESHOLD))); } counts[index] = 1; } } } template <typename Dtype> void InfogainLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* infogain_mat = NULL; if (bottom.size() < 3) { infogain_mat = infogain_.gpu_data(); } else { infogain_mat = bottom[2]->gpu_data(); } int count = 0; const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( InfogainLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, infogain_mat, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, num_labels_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void InfogainLossBackwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* bottom_diff, const Dtype* infogain, const Dtype* rows_infogain, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int num_labels_, Dtype* counts) { //const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { //for (int c = 0; c < channels; ++c) { for (int l = 0; l < num_labels_; ++l) { bottom_diff[n * dim + l * spatial_dim + s] = 0; } counts[index] = 0; } else { for (int l = 0; l < num_labels_; ++l) { bottom_diff[n*dim + l*spatial_dim + s] = prob_data[n*dim + l*spatial_dim + s]*rows_infogain[label_value] - infogain[label_value*num_labels_ + s]; } counts[index] = 1; } } } template <typename Dtype> void InfogainLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); // caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const Dtype* infogain_mat = NULL; if (bottom.size() < 3) { infogain_mat = infogain_.gpu_data(); } else { infogain_mat = bottom[2]->gpu_data(); sum_rows_of_H(bottom[2]); } const Dtype* rows_infogain = sum_rows_H_.gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( InfogainLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, bottom_diff, infogain_mat, rows_infogain, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, num_labels_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(InfogainLossLayer); } // namespace caffe
30d4ae7e09bf857dbae68f7659def352ab2aab28.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/infogain_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void InfogainLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype* infogain, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int num_labels_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; int l = 0; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = 0; for (l = 0; l < num_labels_; l++) { loss[index] -= infogain[label_value*num_labels_ + l] * log(max(prob_data[n * spatial_dim * num_labels_ + l * spatial_dim + s], Dtype(kLOG_THRESHOLD))); } counts[index] = 1; } } } template <typename Dtype> void InfogainLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* infogain_mat = NULL; if (bottom.size() < 3) { infogain_mat = infogain_.gpu_data(); } else { infogain_mat = bottom[2]->gpu_data(); } int count = 0; const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) InfogainLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, infogain_mat, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, num_labels_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void InfogainLossBackwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* bottom_diff, const Dtype* infogain, const Dtype* rows_infogain, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, const int num_labels_, Dtype* counts) { //const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { //for (int c = 0; c < channels; ++c) { for (int l = 0; l < num_labels_; ++l) { bottom_diff[n * dim + l * spatial_dim + s] = 0; } counts[index] = 0; } else { for (int l = 0; l < num_labels_; ++l) { bottom_diff[n*dim + l*spatial_dim + s] = prob_data[n*dim + l*spatial_dim + s]*rows_infogain[label_value] - infogain[label_value*num_labels_ + s]; } counts[index] = 1; } } } template <typename Dtype> void InfogainLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); // caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const Dtype* infogain_mat = NULL; if (bottom.size() < 3) { infogain_mat = infogain_.gpu_data(); } else { infogain_mat = bottom[2]->gpu_data(); sum_rows_of_H(bottom[2]); } const Dtype* rows_infogain = sum_rows_H_.gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) InfogainLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, bottom_diff, infogain_mat, rows_infogain, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, num_labels_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(InfogainLossLayer); } // namespace caffe
703302a8add0f16c14966823a444b67a5bbece6f.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <complex> #include <helper_cuda.h> #include <iomanip> #include <iostream> #include <random> #include <cufinufft.h> #include <cufinufft/impl.h> #include <cufinufft/utils.h> #include <thrust/complex.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> using cufinufft::utils::infnorm; template <typename T> int run_test(int method, int type, int N1, int M, T tol, T checktol, int iflag) { std::cout << std::scientific << std::setprecision(3); int ier; thrust::host_vector<T> x(M); thrust::host_vector<thrust::complex<T>> c(M); thrust::host_vector<thrust::complex<T>> fk(N1); thrust::device_vector<T> d_x(M); thrust::device_vector<thrust::complex<T>> d_c(M); thrust::device_vector<thrust::complex<T>> d_fk(N1); std::default_random_engine eng(1); std::uniform_real_distribution<T> dist11(-1, 1); auto randm11 = [&eng, &dist11]() { return dist11(eng); }; // Making data for (int i = 0; i < M; i++) { x[i] = M_PI * randm11(); // x in [-pi,pi) } if (type == 1) { for (int i = 0; i < M; i++) { c[i].real(randm11()); c[i].imag(randm11()); } } else if (type == 2) { for (int i = 0; i < N1; i++) { fk[i].real(randm11()); fk[i].imag(randm11()); } } else { std::cerr << "Invalid type " << type << " supplied\n"; return 1; } d_x = x; if (type == 1) d_c = c; else if (type == 2) d_fk = fk; hipEvent_t start, stop; float milliseconds = 0; float totaltime = 0; hipEventCreate(&start); hipEventCreate(&stop); // warm up CUFFT (is slow, takes around 0.2 sec... ) hipEventRecord(start); { int nf1 = 1; hipfftHandle fftplan; hipfftPlan1d(&fftplan, nf1, cufft_type<T>(), 1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds / 1000); // now to the test... cufinufft_plan_t<T> *dplan; const int dim = 1; // Here we setup our own opts, for gpu_method. cufinufft_opts opts; ier = cufinufft_default_opts(type, dim, &opts); if (ier != 0) { printf("err %d: cufinufft_default_opts\n", ier); return ier; } opts.gpu_method = method; opts.gpu_maxbatchsize = 1; int nmodes[3] = {N1, 1, 1}; int ntransf = 1; hipEventRecord(start); ier = cufinufft_makeplan_impl<T>(type, dim, nmodes, iflag, ntransf, tol, &dplan, &opts); if (ier != 0) { printf("err: cufinufft1d_plan\n"); return ier; } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds / 1000); hipEventRecord(start); ier = cufinufft_setpts_impl<T>(M, d_x.data().get(), NULL, NULL, 0, NULL, NULL, NULL, dplan); if (ier != 0) { printf("err: cufinufft_setpts\n"); return ier; } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds / 1000); hipEventRecord(start); ier = cufinufft_execute_impl<T>((cuda_complex<T> *)d_c.data().get(), (cuda_complex<T> *)d_fk.data().get(), dplan); if (ier != 0) { printf("err: cufinufft1d_exec\n"); return ier; } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; float exec_ms = milliseconds; printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds / 1000); hipEventRecord(start); ier = cufinufft_destroy_impl<T>(dplan); if (ier != 0) { printf("err %d: cufinufft1d_destroy\n", ier); return ier; } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds / 1000); printf("[Method %d] %d U pts to %d NU pts in %.3g s: %.3g NU pts/s\n", opts.gpu_method, N1, M, totaltime / 1000, M / totaltime * 1000); printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n", M / exec_ms * 1000); T rel_error = std::numeric_limits<T>::max(); if (type == 1) { fk = d_fk; int nt1 = 0.37 * N1; // choose some mode index to check thrust::complex<T> Ft = thrust::complex<T>(0, 0), J = thrust::complex<T>(0.0, iflag); for (int j = 0; j < M; ++j) Ft += c[j] * exp(J * (nt1 * x[j])); // crude direct int it = N1 / 2 + nt1; // index in complex F as 1d array rel_error = abs(Ft - fk[it]) / infnorm(N1, (std::complex<T> *)fk.data()); printf("[gpu ] one mode: rel err in F[%d] is %.3g\n", nt1, rel_error); } else if (type == 2) { c = d_c; int jt = M / 2; // check arbitrary choice of one targ pt thrust::complex<T> J = thrust::complex<T>(0, iflag); thrust::complex<T> ct = thrust::complex<T>(0, 0); int m = 0; for (int m1 = -(N1 / 2); m1 <= (N1 - 1) / 2; ++m1) ct += fk[m++] * exp(J * (m1 * x[jt])); // crude direct rel_error = abs(c[jt] - ct) / infnorm(M, (std::complex<T> *)c.data()); printf("[gpu ] one targ: rel err in c[%d] is %.3g\n", jt, rel_error); } return std::isnan(rel_error) || rel_error > checktol; } int main(int argc, char *argv[]) { if (argc != 8) { fprintf(stderr, "Usage: cufinufft1d_test method type N1 M tol checktol prec\n" "Arguments:\n" " method: One of\n" " 1: nupts driven\n" " type: Type of transform (1, 2)\n" " N1: Number of fourier modes\n" " M: The number of non-uniform points\n" " tol: NUFFT tolerance\n" " checktol: relative error to pass test\n" " precision: f or d\n"); return 1; } const int method = atoi(argv[1]); const int type = atoi(argv[2]); const int N1 = atof(argv[3]); const int M = atof(argv[4]); const double tol = atof(argv[5]); const double checktol = atof(argv[6]); const int iflag = 1; const char prec = argv[7][0]; if (prec == 'f') return run_test<float>(method, type, N1, M, tol, checktol, iflag); else if (prec == 'd') return run_test<double>(method, type, N1, M, tol, checktol, iflag); else return -1; }
703302a8add0f16c14966823a444b67a5bbece6f.cu
#include <cmath> #include <complex> #include <helper_cuda.h> #include <iomanip> #include <iostream> #include <random> #include <cufinufft.h> #include <cufinufft/impl.h> #include <cufinufft/utils.h> #include <thrust/complex.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> using cufinufft::utils::infnorm; template <typename T> int run_test(int method, int type, int N1, int M, T tol, T checktol, int iflag) { std::cout << std::scientific << std::setprecision(3); int ier; thrust::host_vector<T> x(M); thrust::host_vector<thrust::complex<T>> c(M); thrust::host_vector<thrust::complex<T>> fk(N1); thrust::device_vector<T> d_x(M); thrust::device_vector<thrust::complex<T>> d_c(M); thrust::device_vector<thrust::complex<T>> d_fk(N1); std::default_random_engine eng(1); std::uniform_real_distribution<T> dist11(-1, 1); auto randm11 = [&eng, &dist11]() { return dist11(eng); }; // Making data for (int i = 0; i < M; i++) { x[i] = M_PI * randm11(); // x in [-pi,pi) } if (type == 1) { for (int i = 0; i < M; i++) { c[i].real(randm11()); c[i].imag(randm11()); } } else if (type == 2) { for (int i = 0; i < N1; i++) { fk[i].real(randm11()); fk[i].imag(randm11()); } } else { std::cerr << "Invalid type " << type << " supplied\n"; return 1; } d_x = x; if (type == 1) d_c = c; else if (type == 2) d_fk = fk; cudaEvent_t start, stop; float milliseconds = 0; float totaltime = 0; cudaEventCreate(&start); cudaEventCreate(&stop); // warm up CUFFT (is slow, takes around 0.2 sec... ) cudaEventRecord(start); { int nf1 = 1; cufftHandle fftplan; cufftPlan1d(&fftplan, nf1, cufft_type<T>(), 1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds / 1000); // now to the test... cufinufft_plan_t<T> *dplan; const int dim = 1; // Here we setup our own opts, for gpu_method. cufinufft_opts opts; ier = cufinufft_default_opts(type, dim, &opts); if (ier != 0) { printf("err %d: cufinufft_default_opts\n", ier); return ier; } opts.gpu_method = method; opts.gpu_maxbatchsize = 1; int nmodes[3] = {N1, 1, 1}; int ntransf = 1; cudaEventRecord(start); ier = cufinufft_makeplan_impl<T>(type, dim, nmodes, iflag, ntransf, tol, &dplan, &opts); if (ier != 0) { printf("err: cufinufft1d_plan\n"); return ier; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds / 1000); cudaEventRecord(start); ier = cufinufft_setpts_impl<T>(M, d_x.data().get(), NULL, NULL, 0, NULL, NULL, NULL, dplan); if (ier != 0) { printf("err: cufinufft_setpts\n"); return ier; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds / 1000); cudaEventRecord(start); ier = cufinufft_execute_impl<T>((cuda_complex<T> *)d_c.data().get(), (cuda_complex<T> *)d_fk.data().get(), dplan); if (ier != 0) { printf("err: cufinufft1d_exec\n"); return ier; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; float exec_ms = milliseconds; printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds / 1000); cudaEventRecord(start); ier = cufinufft_destroy_impl<T>(dplan); if (ier != 0) { printf("err %d: cufinufft1d_destroy\n", ier); return ier; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds / 1000); printf("[Method %d] %d U pts to %d NU pts in %.3g s: %.3g NU pts/s\n", opts.gpu_method, N1, M, totaltime / 1000, M / totaltime * 1000); printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n", M / exec_ms * 1000); T rel_error = std::numeric_limits<T>::max(); if (type == 1) { fk = d_fk; int nt1 = 0.37 * N1; // choose some mode index to check thrust::complex<T> Ft = thrust::complex<T>(0, 0), J = thrust::complex<T>(0.0, iflag); for (int j = 0; j < M; ++j) Ft += c[j] * exp(J * (nt1 * x[j])); // crude direct int it = N1 / 2 + nt1; // index in complex F as 1d array rel_error = abs(Ft - fk[it]) / infnorm(N1, (std::complex<T> *)fk.data()); printf("[gpu ] one mode: rel err in F[%d] is %.3g\n", nt1, rel_error); } else if (type == 2) { c = d_c; int jt = M / 2; // check arbitrary choice of one targ pt thrust::complex<T> J = thrust::complex<T>(0, iflag); thrust::complex<T> ct = thrust::complex<T>(0, 0); int m = 0; for (int m1 = -(N1 / 2); m1 <= (N1 - 1) / 2; ++m1) ct += fk[m++] * exp(J * (m1 * x[jt])); // crude direct rel_error = abs(c[jt] - ct) / infnorm(M, (std::complex<T> *)c.data()); printf("[gpu ] one targ: rel err in c[%d] is %.3g\n", jt, rel_error); } return std::isnan(rel_error) || rel_error > checktol; } int main(int argc, char *argv[]) { if (argc != 8) { fprintf(stderr, "Usage: cufinufft1d_test method type N1 M tol checktol prec\n" "Arguments:\n" " method: One of\n" " 1: nupts driven\n" " type: Type of transform (1, 2)\n" " N1: Number of fourier modes\n" " M: The number of non-uniform points\n" " tol: NUFFT tolerance\n" " checktol: relative error to pass test\n" " precision: f or d\n"); return 1; } const int method = atoi(argv[1]); const int type = atoi(argv[2]); const int N1 = atof(argv[3]); const int M = atof(argv[4]); const double tol = atof(argv[5]); const double checktol = atof(argv[6]); const int iflag = 1; const char prec = argv[7][0]; if (prec == 'f') return run_test<float>(method, type, N1, M, tol, checktol, iflag); else if (prec == 'd') return run_test<double>(method, type, N1, M, tol, checktol, iflag); else return -1; }
9dc82ad6ef402fe4ab7a88a4b0b4a20dfacc7eba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /////////////////////////////////////////////////////////////////////////////// // // The MIT License // // Copyright (c) 2006 Scientific Computing and Imaging Institute, // University of Utah (USA) // // License for the specific language governing rights and limitations under // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // /////////////////////////////////////////////////////////////////////////////// #ifndef ELVIS_VOLUME_RENDERING_FULL_CU #define ELVIS_VOLUME_RENDERING_FULL_CU #include <ElVis/Core/Float.cu> #include <ElVis/Core/FieldEvaluator.cu> #include <ElVis/Math/TrapezoidalIntegration.hpp> #include <ElVis/Core/TransferFunction.h> #include <math_functions.h> #include <ElVis/Core/GaussKronrod.cu> namespace ElVis { enum IntegrationStatus { eFinished, ePartial }; __device__ void ActualH( ElVisFloat a, ElVisFloat b, ElVisFloat desiredH, ElVisFloat& h, int& n) { ElVisFloat d = (b - a); n = Floor(d / desiredH); if (n == 0) { h = d; n = 1; } else { h = d / (ElVisFloat)(n); } } struct FieldTrapezoidalIntegration { // n is the number of intervals. // So evaluation is at n+1 points. template <typename T, typename IntegrandType, typename FieldEvaluatorType> ELVIS_DEVICE static T CompositeIntegration(const IntegrandType& integrand, const FieldEvaluatorType& field, const T& a, const T& b, unsigned int n, bool traceEnabled) { // if( traceEnabled ) // { // ELVIS_PRINTF("Running trapezoidal rule on interval // [%2.15f, %2.15f] with %d samples\n", a, b, n); // } T h = (b - a); if (n > 0) { h = (b - a) / (n); } T result = 0.0; for (unsigned int i = 1; i < n; ++i) { ElVisFloat t = a + i * h; ElVisFloat s = field(t); ElVisFloat integrandValue = integrand(t, s); result += integrandValue; // if( traceEnabled ) // { // ELVIS_PRINTF("Trapezoidal sample at t %2.15f, // field %2.15f, integrand value %2.15f\n", t, s, // integrandValue); // } } ElVisFloat sa = field(a); ElVisFloat sb = field(b); result += .5 * integrand(a, sa) + .5 * integrand(b, sb); result *= h; // if( traceEnabled ) // { // ELVIS_PRINTF("Finalizing Trapezoidal sample at t // (%2.15f, %2.15f), field (%2.15f, %2.15f), result %2.15f // and h %2.15f\n", a, b, sa, sb, result, h); // ELVIS_PRINTF("Finished trapezoidal on interval [%2.15f, // %2.15f] with endpoint transfer samples %2.15f, %2.15f // with result %2.15f\n", a, b, integrand(a, sa), // integrand(b, sb), result); // } return result; } }; struct InnerIntegralFunctor { ELVIS_DEVICE ElVisFloat GetMaxValue(const Interval<ElVisFloat>& domain) const { return transferFunction->GetMaxValue(eDensity, domain); } ELVIS_DEVICE ElVisFloat operator()(const ElVisFloat& t, const ElVisFloat& s, bool traceEnabled = false) const { return transferFunction->Sample(eDensity, s); } TransferFunction* transferFunction; }; struct OuterIntegralIntegrandWithInnerTrapezoidal { // __device__ ElVisFloat GetMaxValue(const Interval<ElVisFloat>& // domain) const // { // return transferFunction->GetMaxValue(channel, domain) * // transferFunction->GetMaxValue(eDensity, domain); // } ELVIS_DEVICE ElVisFloat3 operator()(const ElVisFloat& t, const ElVisFloat& s, bool traceEnabled = false) const { // if( traceEnabled ) // { // ELVIS_PRINTF("Starting outer integrand at %2.15f, // accumulatedDensity = %2.15f, innerT = %2.15f, innerH = // %2.15f\n", t, accumulatedDensity, innerT, innerH); // } ElVisFloat3 c = transferFunction->SampleColor(s); ElVisFloat d = transferFunction->Sample(eDensity, s); int numberAdditionalInnerSamples = 0; ElVisFloat newH; ActualH(innerT, t, innerH, newH, numberAdditionalInnerSamples); accumulatedDensity += FieldTrapezoidalIntegration::CompositeIntegration( *innerIntegral, *field, innerT, t, numberAdditionalInnerSamples, traceEnabled); // If 0, then the endponits have already been calculated. // By setting n to a different but hopefully close h, we don't need this // fixup. // if( numberAdditionalInnerSamples != 0 ) // { // ElVisFloat t0 = t-numberAdditionalInnerSamples*innerH; // ElVisFloat s0 = (*field)(t0); // accumulatedDensity += MAKE_FLOAT(.5)*(t-t0)*( // (*innerIntegral)(t0, s0) + (*innerIntegral)(t, s)); // if( traceEnabled ) // { // ELVIS_PRINTF("Inner Integral final adjustment t0 = // %2.15f, s0 = %2.15f\n", t0, s0); // ELVIS_PRINTF("Sampling outer integrand at %2.15f, // with color sample %2.15f, density %2.15f, and // accumulated density %2.15f\n", t, c, d, // accumulatedDensity); // } // } innerT = t; return c * d * exp(-accumulatedDensity); } TransferFunction* transferFunction; InnerIntegralFunctor* innerIntegral; FieldEvaluator* field; mutable ElVisFloat accumulatedDensity; mutable ElVisFloat innerT; mutable ElVisFloat innerH; }; // struct OuterIntegralIntegrand // { // __device__ ElVisFloat GetMaxValue(const Interval<ElVisFloat>& // domain) const // { // return transferFunction->GetMaxValue(channel, domain) * // transferFunction->GetMaxValue(eDensity, domain); // } // __device__ ElVisFloat operator() (const ElVisFloat& t, const // ElVisFloat& s) const // { // ElVisFloat c = transferFunction->Sample(channel, s); // ElVisFloat d = transferFunction->Sample(eDensity, s); // ElVisFloat accumulatedDensity = // innerIntegralApproximation->SampleInnerIntegral(t, s, eDensity, // transferFunction); // return c*d*Exp(-accumulatedDensity); // } // TransferFunction* transferFunction; // TransferFunctionChannel channel; // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21>* // innerIntegralApproximation; // }; /// \brief epsilon - the desired global error. extern "C" __global__ void IntegrateSegmentSingleThreadPerRayFullVersion( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, TransferFunction* transferFunction, ElVisFloat epsilon, bool enableTrace, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { // int2 trace = make_int2(512/2, 512/2); // // Step 1 - Categorize the field along the segment. // // // // Case 1: Total space skipping. When the density function is // identically 0 over the entire segment, then there is nothing to // do. // // Case 2: Density only. If the color components are identially // 0, then we only need to integrate the density contribution. // // 2.1: No breakpoints. Use gauss-kronrod on the entire // interval. The 7-15 rule is probably sufficient. // // 2.2: Breakpoints. Adaptive trapezoidal. // // Case 3: Everything. Both density and color contribute. // // 2.1. No breakpoints in either. // // 2.2. Color breakpoints, density no // // 2.3. Color no breakpoints, density yes. // // 3.5 Color and density have breakpoints. // // First pass - do adaptive trapezoidal on the density because I // know I can then evaluate at any point within my error // // budget. Then I can do adaptive or gauss-kronrod on the main // integral without a problem. // // If the color is doing gauss-kronrod, then I can incrementally // evaluate the density using adaptive trapezoidal and don't need to // keep the entire // // structure around. It is only when the outer integral is // adaptive that I need to do that (case 2.2). // // In cases 2 and 3, each component can be integrated differently // based on breakpoints. // // Density integration // uint2 pixel; // pixel.x = blockIdx.x * blockDim.x + threadIdx.x; // pixel.y = blockIdx.y * blockDim.y + threadIdx.y; // bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && // enableTrace); // if( traceEnabled ) // { // ELVIS_PRINTF("Esilon %2.10f\n", epsilon); // } // uint2 screen; // screen.x = gridDim.x * blockDim.x; // screen.y = gridDim.y * blockDim.y; // int segmentIndex = pixel.x + screen.x*pixel.y; // if( segmentEnd[segmentIndex] < MAKE_FLOAT(0.0) ) // { // return; // } // int elementId = segmentElementId[segmentIndex]; // if( elementId == -1 ) // { // return; // } // int elementTypeId = segmentElementType[segmentIndex]; // ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex]; // ElVisFloat3 color = colorAccumulator[segmentIndex]; // ElVisFloat a = segmentStart[segmentIndex]; // ElVisFloat b = segmentEnd[segmentIndex]; //// if( traceEnabled ) //// { //// b = 2.024846; //// } // ElVisFloat3 dir = segmentDirection[segmentIndex]; // ElVisFloat d = (b-a); // if( d == MAKE_FLOAT(0.0) ) // { // return; // } // // First test for density identically 0. This means the segment // does not contribute at // // all to the integral and can be skipped. // ElVisFloat3 p0 = origin + a*dir; // ElVisFloat3 p1 = origin + b*dir; // ElVisFloat s0 = EvaluateFieldCuda(elementId, elementTypeId, p0); // ElVisFloat s1 = EvaluateFieldCuda(elementId, elementTypeId, p1); // ElVis::Interval<ElVisFloat> range; // EstimateRangeCuda(elementId, elementTypeId, fieldId, p0, p1, // range); // if( traceEnabled ) // { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // } // bool densityContainsBreakpoints = // transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, // range); // if( traceEnabled ) // { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // } // if( !densityContainsBreakpoints ) // { // // No breakpoints. If 0 at the endpoints, then 0 everywhere. // if( transferFunction->Sample(eDensity, s0) == MAKE_FLOAT(0.0) // && // transferFunction->Sample(eDensity, s1) == MAKE_FLOAT(0.0) // ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Density is identically 0.\n"); // } // // Case 1 // return; // } // } // // At this point we know that there is some non-0 density along // the segment. // // Check if the color is identically 0. If so, we can just // integrate the // // density. // bool colorContainsBreakpoints = // transferFunction->ColorContainsAtLeastOneBreakpoint(range); // if( traceEnabled ) // { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // } // if( !colorContainsBreakpoints ) // { // if( transferFunction->Sample(eRed, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eRed, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s1) == MAKE_FLOAT(0.0) ) // { // // Case 2 - Integrate density only. // if( densityContainsBreakpoints ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // adaptive trapezoidal.\n"); // } // // Case 2.1 // // Integrate density using adaptive trapezoidal. // } // else // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // gauss-kronrod.\n"); // } // // Case 2.2 // // Integrate density using gauss-kronrod. // } // return; // } // } // // Case 3: Everything. Both density and color contribute. // // 2.1. No breakpoints in either. // // 2.2. Color breakpoints, density no // // 2.3. Color no breakpoints, density yes. // // 3.5 Color and density have breakpoints. // if( colorContainsBreakpoints ) // { // // Need adaptive trapezoidal for the outer integral. So // evalute the density over the entire range, // // then do adaptive on the outer, sampling the inner function. // if( traceEnabled ) // { // ELVIS_PRINTF("Adaptive trapezoidal for the outer, stack // backed for inner on [%2.10f, %2.10f].\n", a, b); // } // FieldEvaluator f; // f.Origin = origin; // f.Direction = dir; // f.ElementId = elementId; // f.ElementType = elementTypeId; // f.FieldId = fieldId; // bool reachedMax = false; // ElVisFloat maxValue = transferFunction->GetMaxValue(eDensity, // range); // ElVisFloat estimate = maxValue*(b-a); // maxValue = MAKE_FLOAT(1.0); // estimate = MAKE_FLOAT(1.0); // InnerIntegralFunctor innerIntegralFunc; // innerIntegralFunc.transferFunction = transferFunction; // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // innerIntegralApproximation; // innerIntegralApproximation.Initialize(a, b, innerIntegralFunc, // f, epsilon, estimate, maxValue, reachedMax, traceEnabled); // OuterIntegralIntegrand outerIntegralRedFunc; // outerIntegralRedFunc.channel = eRed; // outerIntegralRedFunc.innerIntegralApproximation = // &innerIntegralApproximation; // outerIntegralRedFunc.transferFunction = transferFunction; // OuterIntegralIntegrand outerIntegralGreenFunc; // outerIntegralGreenFunc.channel = eGreen; // outerIntegralGreenFunc.innerIntegralApproximation = // &innerIntegralApproximation; // outerIntegralGreenFunc.transferFunction = transferFunction; // OuterIntegralIntegrand outerIntegralBlueFunc; // outerIntegralBlueFunc.channel = eBlue; // outerIntegralBlueFunc.innerIntegralApproximation = // &innerIntegralApproximation; // outerIntegralBlueFunc.transferFunction = transferFunction; // IntegrationStatus innerIntegrationStatus = ePartial; // int loopGuard = 0; // do // { // innerIntegrationStatus = // innerIntegralApproximation.ContinueIntegration(innerIntegralFunc, // f, epsilon, estimate, maxValue, reachedMax, traceEnabled); // ++loopGuard; // Interval<ElVisFloat> validDomain = // innerIntegralApproximation.ValidDomain(); // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // redApproximation; // if(traceEnabled) // { // ELVIS_PRINTF("Finished evaluating the interval // [%2.10f, %2.10f]\n", validDomain.GetLow(), // validDomain.GetHigh()); // } // redApproximation.Initialize(validDomain.GetLow(), // validDomain.GetHigh(), outerIntegralRedFunc, f, epsilon, // estimate, maxValue, reachedMax, false); // int redLoopGuard = 0; // IntegrationStatus redStatus = ePartial; // do // { // ++redLoopGuard; // redApproximation.ContinueIntegration(outerIntegralRedFunc, // f, epsilon, estimate, maxValue, reachedMax, false); // color.x += redApproximation.OverallValue(); // } // while( redStatus != eFinished && redLoopGuard < 10); // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // greenApproximation; // greenApproximation.Initialize(validDomain.GetLow(), // validDomain.GetHigh(), outerIntegralGreenFunc, f, epsilon, // estimate, maxValue, reachedMax, false); // int greenLoopGuard = 0; // IntegrationStatus greenStatus = ePartial; // do // { // ++greenLoopGuard; // greenApproximation.ContinueIntegration(outerIntegralGreenFunc, // f, epsilon, estimate, maxValue, reachedMax, false); // color.y += greenApproximation.OverallValue(); // } // while( greenStatus != eFinished && greenLoopGuard < 10); // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // blueApproximation; // blueApproximation.Initialize(validDomain.GetLow(), // validDomain.GetHigh(), outerIntegralBlueFunc, f, epsilon, // estimate, maxValue, reachedMax, false); // int blueLoopGuard = 0; // IntegrationStatus blueStatus = ePartial; // do // { // ++blueLoopGuard; // blueApproximation.ContinueIntegration(outerIntegralBlueFunc, // f, epsilon, estimate, maxValue, reachedMax, false); // color.z += blueApproximation.OverallValue(); // } // while( blueStatus != eFinished && blueLoopGuard < 10); // } // while( innerIntegrationStatus != eFinished && loopGuard < 10 // ); // accumulatedDensity += // innerIntegralApproximation.OverallValue(); //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Density\n"); //// } //// innerIntegralApproximation.Integrate(a, b, transferFunction, ///eDensity, f, epsilon, estimate, maxValue, reachedMax, traceEnabled); //// ElVisFloat redMax = transferFunction->GetMaxValue(eRed, ///range); //// OuterIntegralAdaptiveTrapezoidal<ElVisFloat, 10> ///redIntegral; //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Red\n"); //// } //// redIntegral.Integrate(a, b, transferFunction, eRed, f, ///innerIntegralApproximation, epsilon, maxValue*redMax*(b-a), ///maxValue*redMax, reachedMax, traceEnabled, accumulatedDensity); //// color.x += redIntegral.OverallValue(); //// ElVisFloat greenMax = transferFunction->GetMaxValue(eGreen, ///range); //// OuterIntegralAdaptiveTrapezoidal<ElVisFloat, 10> ///greenIntegral; //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Green\n"); //// } //// greenIntegral.Integrate(a, b, transferFunction, eGreen, f, ///innerIntegralApproximation, epsilon, maxValue*greenMax*(b-a), ///maxValue*greenMax, reachedMax, traceEnabled, accumulatedDensity); //// color.y += greenIntegral.OverallValue(); //// ElVisFloat blueMax = transferFunction->GetMaxValue(eBlue, ///range); //// OuterIntegralAdaptiveTrapezoidal<ElVisFloat, 10> ///blueIntegral; //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Blue\n"); //// } //// blueIntegral.Integrate(a, b, transferFunction, eBlue, f, ///innerIntegralApproximation, epsilon, maxValue*blueMax*(b-a), ///maxValue*blueMax, reachedMax, traceEnabled, accumulatedDensity); //// color.z += blueIntegral.OverallValue(); //// accumulatedDensity += ///innerIntegralApproximation.OverallValue(); // } // else // { // // Color doesn't have breakpoints, so the main integral can be // evaluated with Gauss-Kronrod. // // We'll do adaptive trapezoidal in the density, adding on to // the existing integral as we sample // // the gauss-kronrod points. This way we don't have to keep // the adaptive structure around. // if( traceEnabled ) // { // ELVIS_PRINTF("Gauss-Kronrod outer, adaptive trapezoidal // inner..\n"); // } // } // densityAccumulator[segmentIndex] = accumulatedDensity; // colorAccumulator[segmentIndex] = color; } template <unsigned int block_size> __device__ void PrefixSumTrapezoidalIntegration( const ElVisFloat& initialValue, volatile ElVisFloat* __restrict__ input, volatile ElVisFloat* __restrict__ output, ElVisFloat h, bool traceEnabled) { ElVisFloat val = MAKE_FLOAT(.5) * input[threadIdx.x]; output[threadIdx.x] = val; __syncthreads(); if (threadIdx.x == 0) { output[0] = MAKE_FLOAT(0.0); } if (traceEnabled) { // ELVIS_PRINTF("Input Array Values: ("); // for(unsigned int i = 0; i < block_size; ++i) // { // ELVIS_PRINTF("%f, ", input[i]); // } // ELVIS_PRINTF(")\n\n"); // ELVIS_PRINTF("Output Array Values: ("); // for(unsigned int i = 0; i < block_size; ++i) // { // ELVIS_PRINTF("%f, ", output[i]); // } // ELVIS_PRINTF(")\n\n"); } if (block_size > 1) { if (threadIdx.x >= 1) { val = output[threadIdx.x - 1] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 2) { if (threadIdx.x >= 2) { val = output[threadIdx.x - 2] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 4) { if (threadIdx.x >= 4) { val = output[threadIdx.x - 4] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 8) { if (threadIdx.x >= 8) { val = output[threadIdx.x - 8] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 16) { if (threadIdx.x >= 16) { val = output[threadIdx.x - 16] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 32) { if (threadIdx.x >= 32) { val = output[threadIdx.x - 32] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 64) { if (threadIdx.x >= 64) { val = output[threadIdx.x - 64] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 128) { if (threadIdx.x >= 128) { val = output[threadIdx.x - 128] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 256) { if (threadIdx.x >= 256) { val = output[threadIdx.x - 256] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 512) { if (threadIdx.x >= 512) { val = output[threadIdx.x - 512] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } output[threadIdx.x] = initialValue + h * (output[threadIdx.x]); __syncthreads(); if (traceEnabled) { // ELVIS_PRINTF("Result Array Values: ("); // for(unsigned int i = 0; i < block_size; ++i) // { // ELVIS_PRINTF("%f, ", output[i]); // } // ELVIS_PRINTF(")\n\n"); } } template <unsigned int block_size> __device__ void PrefixSum(volatile ElVisFloat* __restrict__ input, volatile ElVisFloat* __restrict__ output) { ElVisFloat val = input[threadIdx.x]; output[threadIdx.x] = val; __syncthreads(); if (block_size > 1) { if (threadIdx.x >= 1) { val = output[threadIdx.x - 1] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 2) { if (threadIdx.x >= 2) { val = output[threadIdx.x - 2] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 4) { if (threadIdx.x >= 4) { val = output[threadIdx.x - 4] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 8) { if (threadIdx.x >= 8) { val = output[threadIdx.x - 8] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 16) { if (threadIdx.x >= 16) { val = output[threadIdx.x - 16] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 32) { if (threadIdx.x >= 32) { val = output[threadIdx.x - 32] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 64) { if (threadIdx.x >= 64) { val = output[threadIdx.x - 64] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 128) { if (threadIdx.x >= 128) { val = output[threadIdx.x - 128] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 256) { if (threadIdx.x >= 256) { val = output[threadIdx.x - 256] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 512) { if (threadIdx.x >= 512) { val = output[threadIdx.x - 512] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } } template <typename IntegrationType> __device__ void IntegrateDensityFunction() { } // Assumes a single warp per ray, and that each block only contains a single // warp. extern "C" __global__ void IntegrateSegmentWarpPerSegment( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, TransferFunction* transferFunction, ElVisFloat epsilon, bool enableTrace, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { // __shared__ int2 trace; // trace = make_int2(512/2, 512/2); // // // Step 1 - Categorize the field along the segment. // // // // Case 1: Total space skipping. When the density function is // identically 0 over the entire segment, then there is nothing to // do. // // Case 2: Density only. If the color components are identially // 0, then we only need to integrate the density contribution. // // 2.1: No breakpoints. Use gauss-kronrod on the entire // interval. The 7-15 rule is probably sufficient. // // 2.2: Breakpoints. Adaptive trapezoidal. // // Case 3: Everything. Both density and color contribute. // // 2.1. No breakpoints in either. // // 2.2. Color breakpoints, density no // // 2.3. Color no breakpoints, density yes. // // 3.5 Color and density have breakpoints. // // First pass - do adaptive trapezoidal on the density because I // know I can then evaluate at any point within my error // // budget. Then I can do adaptive or gauss-kronrod on the main // integral without a problem. // // // If the color is doing gauss-kronrod, then I can incrementally // evaluate the density using adaptive trapezoidal and don't need to // keep the entire // // structure around. It is only when the outer integral is // adaptive that I need to do that (case 2.2). // // // In cases 2 and 3, each component can be integrated differently // based on breakpoints. // // // Density integration // // __shared__ uint2 pixel; // pixel.x = blockIdx.x; // pixel.y = blockIdx.y; // // bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && // enableTrace && threadIdx.x == 0); // //// if( traceEnabled ) //// { //// ELVIS_PRINTF("Esilon %2.10f\n", epsilon); //// } // __shared__ uint2 screen; // screen.x = gridDim.x; // screen.y = gridDim.y; // // __shared__ int segmentIndex; // segmentIndex = pixel.x + screen.x*pixel.y; // __shared__ ElVisFloat b; // b = segmentEnd[segmentIndex]; // if( b < MAKE_FLOAT(0.0) ) // { // return; // } // // __shared__ int elementId; // elementId = segmentElementId[segmentIndex]; // if( elementId == -1 ) // { // return; // } // // __shared__ int elementTypeId; // elementTypeId = segmentElementType[segmentIndex]; // __shared__ ElVisFloat accumulatedDensity; // accumulatedDensity = densityAccumulator[segmentIndex]; // __shared__ ElVisFloat3 color; // color = colorAccumulator[segmentIndex]; // __shared__ ElVisFloat a; // a = segmentStart[segmentIndex]; // // // __shared__ ElVisFloat3 dir; // dir = segmentDirection[segmentIndex]; // __shared__ ElVisFloat d; // d = (b-a); // // if( d == MAKE_FLOAT(0.0) ) // { // return; // } // // __shared__ ElVisFloat h; // h = d/31; // // // First test for density identically 0. This means the segment // does not contribute at // // all to the integral and can be skipped. // __shared__ ElVisFloat3 p0; // p0 = origin + a*dir; // __shared__ ElVisFloat3 p1; // p1 = origin + b*dir; // ElVisFloat s0 = EvaluateFieldCuda(elementId, elementTypeId, p0); // ElVisFloat s1 = EvaluateFieldCuda(elementId, elementTypeId, p1); // if( traceEnabled ) // { // ELVIS_PRINTF("%f, %f\n", s0, s1); // } // ElVis::Interval<ElVisFloat> range; // EstimateRangeCuda(elementId, elementTypeId, p0, p1, range); // // if( traceEnabled ) // { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // } // // bool densityContainsBreakpoints = // transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, // range); // if( traceEnabled ) // { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // } // if( !densityContainsBreakpoints ) // { // // No breakpoints. If 0 at the endpoints, then 0 everywhere. // if( transferFunction->Sample(eDensity, s0) == MAKE_FLOAT(0.0) // && // transferFunction->Sample(eDensity, s1) == MAKE_FLOAT(0.0) // ) // { //// if( traceEnabled ) //// { //// ELVIS_PRINTF("Density is identically 0.\n"); //// } // // Case 1 // return; // } // } // // bool colorContainsBreakpoints = // transferFunction->ColorContainsAtLeastOneBreakpoint(range); // if( traceEnabled ) // { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // } // if( !colorContainsBreakpoints ) // { // if( transferFunction->Sample(eRed, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eRed, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s1) == MAKE_FLOAT(0.0) ) // { // // Case 2 - Integrate density only. // if( densityContainsBreakpoints ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // adaptive trapezoidal.\n"); // } // // // Case 2.1 // // Integrate density using adaptive trapezoidal. // } // else // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // gauss-kronrod.\n"); // } // // Case 2.2 // // Integrate density using gauss-kronrod. // //IntegrateDensityFunction<G7K15>(); // } // return; // } // } // // //// // Sample the field. //// __shared__ ElVisFloat fieldSamples[32]; //// ElVisFloat3 p = origin + (a+threadIdx.x*h)*dir; //// fieldSamples[threadIdx.x] = EvaluateFieldCuda(elementId, ///elementTypeId, p); //// __syncthreads(); // //// __shared__ ElVisFloat density[32]; //// density[threadIdx.x] = transferFunction->Sample(eDensity, ///fieldSamples[threadIdx.x]); //// __syncthreads(); // //// __shared__ ElVisFloat accumulatedDensityIntegration[32]; //// PrefixSumTrapezoidalIntegration<32>(accumulatedDensity, ///&density[0], &accumulatedDensityIntegration[0], h, traceEnabled); //// __syncthreads(); // //// __shared__ ElVisFloat red[32]; //// __shared__ ElVisFloat green[32]; //// __shared__ ElVisFloat blue[32]; // //// ElVisFloat attenuation = ///Exp(-accumulatedDensityIntegration[threadIdx.x]); //// red[threadIdx.x] = transferFunction->Sample(eRed, ///fieldSamples[threadIdx.x])*density[threadIdx.x]*attenuation; //// green[threadIdx.x] = transferFunction->Sample(eGreen, ///fieldSamples[threadIdx.x])*density[threadIdx.x]*attenuation;; //// blue[threadIdx.x] = transferFunction->Sample(eBlue, ///fieldSamples[threadIdx.x])*density[threadIdx.x]*attenuation;; //// __syncthreads(); // //// __shared__ ElVisFloat redIntegral[32]; //// __shared__ ElVisFloat greenIntegral[32]; //// __shared__ ElVisFloat blueIntegral[32]; // //// PrefixSumTrapezoidalIntegration<32>(color.x, &red[0], ///&redIntegral[0], h, traceEnabled); //// PrefixSumTrapezoidalIntegration<32>(color.y, &green[0], ///&greenIntegral[0], h, traceEnabled); //// PrefixSumTrapezoidalIntegration<32>(color.z, &blue[0], ///&blueIntegral[0], h, traceEnabled); // //// if( threadIdx.x == 0 ) //// { //// densityAccumulator[segmentIndex] = ///accumulatedDensityIntegration[31]; //// color.x += redIntegral[31]; //// color.y += greenIntegral[31]; //// color.z += blueIntegral[31]; // //// colorAccumulator[segmentIndex] = color; //// } } // Actual code extern "C" __global__ void //__launch_bounds__(32, 8) IntegrateSegmentSingleThreadPerRay( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, TransferFunction* transferFunction, ElVisFloat epsilon, ElVisFloat desiredH, bool enableTrace, int tracex, int tracey, int* numSamples, bool renderIntegrationType, bool enableEmptySpaceSkipping, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { int2 trace = make_int2(tracex, tracey); uint2 pixel; pixel.x = blockIdx.x * blockDim.x + threadIdx.x; pixel.y = blockIdx.y * blockDim.y + threadIdx.y; bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && enableTrace); if (traceEnabled) { // ELVIS_PRINTF("Esilon %2.10f\n", epsilon); // ELVIS_PRINTF("Number of samples enabled %d\n", (numSamples ? // 1: 0)); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } } uint2 screen; screen.x = gridDim.x * blockDim.x; screen.y = gridDim.y * blockDim.y; int segmentIndex = pixel.x + screen.x * pixel.y; // if( traceEnabled ) // { // ELVIS_PRINTF("Segment index %d\n", segmentIndex); // } if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0)) { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because ray has left volume based // on segment end\n", segmentIndex); // } return; } int elementId = segmentElementId[segmentIndex]; // if( traceEnabled ) // { // ELVIS_PRINTF("Element id %d\n", elementId); // } if (elementId == -1) { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because element id is 0\n", // segmentIndex); // } return; } int elementTypeId = segmentElementType[segmentIndex]; ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex]; ElVisFloat3 color = colorAccumulator[segmentIndex]; ElVisFloat a = segmentStart[segmentIndex]; ElVisFloat b = segmentEnd[segmentIndex]; ElVisFloat3 dir = segmentDirection[segmentIndex]; ElVisFloat d = (b - a); // if( traceEnabled ) // { // ELVIS_PRINTF("Ray Direction (%2.10f, %2.10f, %2.10f), segment // distance %2.10f\n", dir.x, dir.y, dir.z, d); // } if (d == MAKE_FLOAT(0.0)) { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because d is 0\n", dir.x, dir.y, // dir.z, d); // } return; } int n = Floor(d / desiredH); ElVisFloat h; if (n == 0) { h = b - a; n = 1; } else { h = d / (ElVisFloat)(n); } // if( traceEnabled ) // { // ELVIS_PRINTF("Total segment range: [%2.15f, %2.15f], segment // Id %d\n", segmentStart[segmentIndex], // segmentEnd[segmentIndex], segmentIndex); // ELVIS_PRINTF("D = %2.15f, H = %2.15f, N = %d\n", d, h, n); // } // First test for density identically 0. This means the segment does not // contribute at // all to the integral and can be skipped. ElVisFloat3 p0 = origin + a * dir; ElVisFloat3 p1 = origin + b * dir; ElVis::Interval<ElVisFloat> range; EstimateRangeCuda(elementId, elementTypeId, fieldId, p0, p1, range); if (traceEnabled) { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // ELVIS_PRINTF("Origin (%f, %f, %f)\n", origin.x, origin.y, // origin.z); // ELVIS_PRINTF("Direction (%f, %f, %f)\n", dir.x, dir.y, // dir.z); // ELVIS_PRINTF("Integration domain [%f, %f]\n", a, b); } bool densityContainsBreakpoints = transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, range); Interval<ElVisFloat> densityRange = transferFunction->Sample(eDensity, range); if (traceEnabled) { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Density range (%f, %f).\n", // densityRange.GetLow(), densityRange.GetHigh()); } if (enableEmptySpaceSkipping) { if (densityRange.GetLow() == MAKE_FLOAT(0.0) && densityRange.GetHigh() == MAKE_FLOAT(0.0)) { if (traceEnabled) { // ELVIS_PRINTF("Density is identically 0.\n"); } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].x += // MAKE_FLOAT(20.0)/MAKE_FLOAT(255.0); // } return; } } // At this point we know that there is some non-0 density along the segment. // Check if the color is identically 0. If so, we can just integrate the // density. bool colorContainsBreakpoints = transferFunction->ColorContainsAtLeastOneBreakpoint(range); Interval<ElVisFloat> redRange = transferFunction->Sample(eRed, range); Interval<ElVisFloat> greenRange = transferFunction->Sample(eGreen, range); Interval<ElVisFloat> blueRange = transferFunction->Sample(eBlue, range); Interval<ElVisFloat> totalColorRange; totalColorRange.Combine(redRange); totalColorRange.Combine(blueRange); totalColorRange.Combine(greenRange); if (traceEnabled) { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Red range (%f, %f).\n", redRange.GetLow(), // redRange.GetHigh()); // ELVIS_PRINTF("Green range (%f, %f).\n", greenRange.GetLow(), // greenRange.GetHigh()); // ELVIS_PRINTF("Blue range (%f, %f).\n", blueRange.GetLow(), // blueRange.GetHigh()); // ELVIS_PRINTF("Total Color range (%f, %f).\n", // totalColorRange.GetLow(), blueRange.GetHigh()); } // If the color does not contribute, then we can just integrate the density. FieldEvaluator f; f.Origin = origin; f.Direction = dir; f.ElementId = elementId; f.ElementType = elementTypeId; f.sampleCount = numSamples; f.FieldId = fieldId; // bool colorEmpty = totalColorRange.GetLow() == MAKE_FLOAT(0.0) && // totalColorRange.GetHigh() == MAKE_FLOAT(0.0); // int doDensityOnly = __all(colorEmpty); // __syncthreads(); if (totalColorRange.GetLow() == MAKE_FLOAT(0.0) && totalColorRange.GetHigh() == MAKE_FLOAT(0.0)) // if( doDensityOnly ) { InnerIntegralFunctor innerIntegralFunc; innerIntegralFunc.transferFunction = transferFunction; // int doBreakpoints = __any(densityContainsBreakpoints); // __syncthreads(); // Case 2 - Integrate density only. if (densityContainsBreakpoints) // if( doBreakpoints ) { if (traceEnabled) { // ELVIS_PRINTF("Integrate density alone using adaptive // trapezoidal.\n"); } ElVisFloat result = FieldTrapezoidalIntegration::CompositeIntegration( innerIntegralFunc, f, a, b, n, traceEnabled); accumulatedDensity += result; f.AdjustSampleCount(-1); } else { if (traceEnabled) { // ELVIS_PRINTF("Integrate density alone using gauss-kronrod.\n"); } ElVisFloat errorEstimate = MAKE_FLOAT(0.0); ElVisFloat result = SingleThreadGaussKronrod<G7K15>::Integrate<ElVisFloat>( innerIntegralFunc, a, b, f, errorEstimate, traceEnabled); accumulatedDensity += result; // if( traceEnabled ) // { // ELVIS_PRINTF("[%d, %d] - GK Density (%f, %f) - // [%f, %f].\n", pixel.x, pixel.y, a, b, // range.GetLow(), range.GetHigh()); // //ELVIS_PRINTF("G7K15 Density over segment %f with // error %f\n", result, errorEstimate); // } } } else { // int doColorContainsBreakpoints = // __any(colorContainsBreakpoints); // __syncthreads(); // Color Contributes. // Case 3: Everything. Both density and color contribute. // 2.1. No breakpoints in either. // 2.2. Color breakpoints, density no // 2.3. Color no breakpoints, density yes. // 3.5 Color and density have breakpoints. if (colorContainsBreakpoints) // if( doColorContainsBreakpoints ) { // Do trapezoidal for outer and inner in lockstep. if (traceEnabled) { // ELVIS_PRINTF("Trapezoidal for outer and inner.\n"); } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].y += // MAKE_FLOAT(20.0)/MAKE_FLOAT(255.0); // return; // } ElVisFloat s0 = f(a); ElVisFloat3 color0 = transferFunction->SampleColor(s0); ElVisFloat d0 = transferFunction->Sample(eDensity, s0); ElVisFloat atten = expf(-accumulatedDensity); color += h * MAKE_FLOAT(.5) * color0 * d0 * atten; for (int i = 1; i < n; ++i) { ElVisFloat t = a + i * h; ElVisFloat sample = f(t); // if( traceEnabled ) // { // ElVisFloat3 tempPoint = origin + t*dir; // ELVIS_PRINTF("Sample at %f (%f, %f, %f) = // %f\n", t, tempPoint.x, tempPoint.y, // tempPoint.z, sample); // } ElVisFloat d1 = transferFunction->Sample(eDensity, sample); accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + d1); // if( traceEnabled ) // { // ELVIS_PRINTF("Density = %f\n", d1); // } ElVisFloat3 colorSample = transferFunction->SampleColor(sample); ElVisFloat atten = expf(-accumulatedDensity); color += h * colorSample * d1 * atten; // if( traceEnabled ) // { // ELVIS_PRINTF("Density = %f, accumulated // density = %f\n", d1, accumulatedDensity); // ELVIS_PRINTF("Color Samples = (%f, %f, %f), // Accumulated Color = (%f, %f, %f)\n", // colorSample.x, colorSample.y, colorSample.z, // color.x, color.y, color.z); // } d0 = d1; } ElVisFloat sn = f(b); ElVisFloat3 colorn = transferFunction->SampleColor(sn); ElVisFloat dn = transferFunction->Sample(eDensity, sn); accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + dn); atten = expf(-accumulatedDensity); color += h * MAKE_FLOAT(.5) * colorn * dn * atten; // if( traceEnabled ) // { // ELVIS_PRINTF("Final Sample %f, Final Density // Sample %f, Final Color Sample (%f, %f, %f)\n", sn, // dn, colorn.x, colorn.y, colorn.z); // } f.AdjustSampleCount(-1); } else { // Color doesn't have breakpoints, so the main integral can be evaluated // with Gauss-Kronrod. // We'll do adaptive trapezoidal in the density, adding on to the // existing integral as we sample // the gauss-kronrod points. This way we don't have to keep the // adaptive structure around. if (traceEnabled) { // ELVIS_PRINTF("Gauss-Kronrod outer, Trapezoidal inner.\n"); } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].z += // MAKE_FLOAT(20.0)/MAKE_FLOAT(255.0); // return; // } OuterIntegralIntegrandWithInnerTrapezoidal outerIntegrand; outerIntegrand.accumulatedDensity = accumulatedDensity; outerIntegrand.field = &f; outerIntegrand.innerH = h; outerIntegrand.innerT = a; outerIntegrand.transferFunction = transferFunction; InnerIntegralFunctor innerIntegrand; innerIntegrand.transferFunction = transferFunction; outerIntegrand.innerIntegral = &innerIntegrand; // if( traceEnabled ) // { // ELVIS_PRINTF("Start GK with incoming density // %2.15f\n", outerIntegrand.accumulatedDensity); // } ElVisFloat3 errorEstimate; ElVisFloat3 colorContribution = SingleThreadGaussKronrod<G7K15>::Integrate<ElVisFloat3>( outerIntegrand, a, b, f, errorEstimate, traceEnabled); // TODO - need to finish the density contribution for the space between // the last sample and the end of the interval. // if( traceEnabled ) // { // ElVisFloat testDensity = // FieldTrapezoidalIntegration::CompositeIntegration(innerIntegrand, // f, a, b, n, traceEnabled); // ELVIS_PRINTF("After running GK, the incoming color // is (%2.15f, %2.15f, %2.15f), the color // contribution is (%2.15f, %2.15f, %2.15f), and // density contribution is %2.15f (test density is // %2.15f) \n", // color.x, color.y, color.z, // colorContribution.x, colorContribution.y, // colorContribution.z, // outerIntegrand.accumulatedDensity, // testDensity); // } color += colorContribution; accumulatedDensity = outerIntegrand.accumulatedDensity; } } // if( traceEnabled ) // { // ELVIS_PRINTF("Final density %2.15f\n", accumulatedDensity); // ELVIS_PRINTF("Final color is (%2.15f, %2.15f, %2.15f)\n", // color.x, color.y, color.z); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } // } densityAccumulator[segmentIndex] = accumulatedDensity; colorAccumulator[segmentIndex] = color; } // Force GK/Trap extern "C" __global__ void //__launch_bounds__(32, 8) GKOnly(ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, TransferFunction* transferFunction, ElVisFloat epsilon, ElVisFloat desiredH, bool enableTrace, int tracex, int tracey, int* numSamples, bool renderIntegrationType, bool enableEmptySpaceSkipping, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { // int2 trace = make_int2(tracex, tracey); // uint2 pixel; // pixel.x = blockIdx.x * blockDim.x + threadIdx.x; // pixel.y = blockIdx.y * blockDim.y + threadIdx.y; // bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && // enableTrace); // if( traceEnabled ) // { // ELVIS_PRINTF("Esilon %2.10f\n", epsilon); // ELVIS_PRINTF("Number of samples enabled %d\n", (numSamples ? // 1: 0)); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } // } // uint2 screen; // screen.x = gridDim.x * blockDim.x; // screen.y = gridDim.y * blockDim.y; // int segmentIndex = pixel.x + screen.x*pixel.y; // if( traceEnabled ) // { // ELVIS_PRINTF("Segment index %d\n", segmentIndex); // } // if( segmentEnd[segmentIndex] < MAKE_FLOAT(0.0) ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because ray has left volume based on // segment end\n", segmentIndex); // } // return; // } // int elementId = segmentElementId[segmentIndex]; // if( traceEnabled ) // { // ELVIS_PRINTF("Element id %d\n", elementId); // } // if( elementId == -1 ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because element id is 0\n", // segmentIndex); // } // return; // } // int elementTypeId = segmentElementType[segmentIndex]; // ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex]; // ElVisFloat3 color = colorAccumulator[segmentIndex]; // ElVisFloat a = segmentStart[segmentIndex]; // ElVisFloat b = segmentEnd[segmentIndex]; // ElVisFloat3 dir = segmentDirection[segmentIndex]; // ElVisFloat d = (b-a); // if( traceEnabled ) // { // ELVIS_PRINTF("Ray Direction (%2.10f, %2.10f, %2.10f), segment // distance %2.10f\n", dir.x, dir.y, dir.z, d); // } // if( d == MAKE_FLOAT(0.0) ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because d is 0\n", dir.x, dir.y, // dir.z, d); // } // return; // } // int n = Floor(d/desiredH); // ElVisFloat h; // if( n == 0 ) // { // h = b-a; // n = 1; // } // else // { // h= d/(ElVisFloat)(n); // } // if( traceEnabled ) // { // ELVIS_PRINTF("Total segment range: [%2.15f, %2.15f], segment // Id %d\n", segmentStart[segmentIndex], // segmentEnd[segmentIndex], segmentIndex); // ELVIS_PRINTF("D = %2.15f, H = %2.15f, N = %d\n", d, h, n); // } // // First test for density identically 0. This means the segment // does not contribute at // // all to the integral and can be skipped. // ElVisFloat3 p0 = origin + a*dir; // ElVisFloat3 p1 = origin + b*dir; // ElVisFloat s0 = EvaluateFieldCuda(elementId, elementTypeId, p0); // ElVisFloat s1 = EvaluateFieldCuda(elementId, elementTypeId, p1); // ElVis::Interval<ElVisFloat> range; // EstimateRangeCuda(elementId, elementTypeId, p0, p1, range); // if( traceEnabled ) // { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // ELVIS_PRINTF("Origin (%f, %f, %f)\n", origin.x, origin.y, // origin.z); // ELVIS_PRINTF("Direction (%f, %f, %f)\n", dir.x, dir.y, dir.z); // ELVIS_PRINTF("Integration domain [%f, %f]\n", a, b); // } // bool densityContainsBreakpoints = // transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, // range); // Interval<ElVisFloat> densityRange = // transferFunction->Sample(eDensity, range); // if( traceEnabled ) // { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Density range (%f, %f).\n", // densityRange.GetLow(), densityRange.GetHigh()); // } //// if( enableEmptySpaceSkipping ) //// { //// if( densityRange.GetLow() == MAKE_FLOAT(0.0) && //// densityRange.GetHigh() == MAKE_FLOAT(0.0) ) //// { //// if( traceEnabled ) //// { //// ELVIS_PRINTF("Density is identically 0.\n"); //// } //// if( renderIntegrationType ) //// { //// colorAccumulator[segmentIndex].x += ///MAKE_FLOAT(1.0)/MAKE_FLOAT(255.0); //// } //// return; //// } //// } // // At this point we know that there is some non-0 density along // the segment. // // Check if the color is identically 0. If so, we can just // integrate the // // density. // bool colorContainsBreakpoints = // transferFunction->ColorContainsAtLeastOneBreakpoint(range); // Interval<ElVisFloat> redRange = transferFunction->Sample(eRed, // range); // Interval<ElVisFloat> greenRange = transferFunction->Sample(eGreen, // range); // Interval<ElVisFloat> blueRange = transferFunction->Sample(eBlue, // range); // Interval<ElVisFloat> totalColorRange; // totalColorRange.Combine(redRange); // totalColorRange.Combine(blueRange); // totalColorRange.Combine(greenRange); // if( traceEnabled ) // { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Red range (%f, %f).\n", redRange.GetLow(), // redRange.GetHigh()); // ELVIS_PRINTF("Green range (%f, %f).\n", greenRange.GetLow(), // greenRange.GetHigh()); // ELVIS_PRINTF("Blue range (%f, %f).\n", blueRange.GetLow(), // blueRange.GetHigh()); // ELVIS_PRINTF("Total Color range (%f, %f).\n", // totalColorRange.GetLow(), blueRange.GetHigh()); // } // // If the color does not contribute, then we can just integrate // the density. // FieldEvaluator f; // f.Origin = origin; // f.Direction = dir; // f.ElementId = elementId; // f.ElementType = elementTypeId; // f.sampleCount = numSamples; // f.FieldId = fieldId; // { // ELVIS_PRINTF("[%d, %d] - GK Colro - Trap Density.\n", // pixel.x, pixel.y); // // Color doesn't have breakpoints, so the main // integral can be evaluated with Gauss-Kronrod. // // We'll do adaptive trapezoidal in the density, // adding on to the existing integral as we sample // // the gauss-kronrod points. This way we don't have // to keep the adaptive structure around. // if( traceEnabled ) // { // ELVIS_PRINTF("Gauss-Kronrod outer, Trapezoidal // inner.\n"); // } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].z += // MAKE_FLOAT(1.0)/MAKE_FLOAT(255.0); // return; // } // OuterIntegralIntegrandWithInnerTrapezoidal // outerIntegrand; // outerIntegrand.accumulatedDensity = // accumulatedDensity; // outerIntegrand.field = &f; // outerIntegrand.innerH = h; // outerIntegrand.innerT = a; // outerIntegrand.transferFunction = transferFunction; // InnerIntegralFunctor innerIntegrand; // innerIntegrand.transferFunction = transferFunction; // outerIntegrand.innerIntegral = &innerIntegrand; // if( traceEnabled ) // { // ELVIS_PRINTF("Start GK with incoming density // %2.15f\n", outerIntegrand.accumulatedDensity); // } // ElVisFloat3 errorEstimate; // ElVisFloat3 colorContribution = // SingleThreadGaussKronrod<G7K15>::Integrate<ElVisFloat3>(outerIntegrand, // a, b, f, errorEstimate, traceEnabled); // // TODO - need to finish the density contribution for // the space between the last sample and the end of the // interval. // if( traceEnabled ) // { // ElVisFloat testDensity = // FieldTrapezoidalIntegration::CompositeIntegration(innerIntegrand, // f, a, b, n, traceEnabled); // ELVIS_PRINTF("After running GK, the incoming color // is (%2.15f, %2.15f, %2.15f), the color // contribution is (%2.15f, %2.15f, %2.15f), and // density contribution is %2.15f (test density is // %2.15f) \n", // color.x, color.y, color.z, // colorContribution.x, colorContribution.y, // colorContribution.z, // outerIntegrand.accumulatedDensity, // testDensity); // } // color += colorContribution; // accumulatedDensity = // outerIntegrand.accumulatedDensity; // } // if( traceEnabled ) // { // ELVIS_PRINTF("Final density %2.15f\n", accumulatedDensity); // ELVIS_PRINTF("Final color is (%2.15f, %2.15f, %2.15f)\n", // color.x, color.y, color.z); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } // } // densityAccumulator[segmentIndex] = accumulatedDensity; // colorAccumulator[segmentIndex] = color; } } #endif
9dc82ad6ef402fe4ab7a88a4b0b4a20dfacc7eba.cu
/////////////////////////////////////////////////////////////////////////////// // // The MIT License // // Copyright (c) 2006 Scientific Computing and Imaging Institute, // University of Utah (USA) // // License for the specific language governing rights and limitations under // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // /////////////////////////////////////////////////////////////////////////////// #ifndef ELVIS_VOLUME_RENDERING_FULL_CU #define ELVIS_VOLUME_RENDERING_FULL_CU #include <ElVis/Core/Float.cu> #include <ElVis/Core/FieldEvaluator.cu> #include <ElVis/Math/TrapezoidalIntegration.hpp> #include <ElVis/Core/TransferFunction.h> #include <math_functions.h> #include <ElVis/Core/GaussKronrod.cu> namespace ElVis { enum IntegrationStatus { eFinished, ePartial }; __device__ void ActualH( ElVisFloat a, ElVisFloat b, ElVisFloat desiredH, ElVisFloat& h, int& n) { ElVisFloat d = (b - a); n = Floor(d / desiredH); if (n == 0) { h = d; n = 1; } else { h = d / (ElVisFloat)(n); } } struct FieldTrapezoidalIntegration { // n is the number of intervals. // So evaluation is at n+1 points. template <typename T, typename IntegrandType, typename FieldEvaluatorType> ELVIS_DEVICE static T CompositeIntegration(const IntegrandType& integrand, const FieldEvaluatorType& field, const T& a, const T& b, unsigned int n, bool traceEnabled) { // if( traceEnabled ) // { // ELVIS_PRINTF("Running trapezoidal rule on interval // [%2.15f, %2.15f] with %d samples\n", a, b, n); // } T h = (b - a); if (n > 0) { h = (b - a) / (n); } T result = 0.0; for (unsigned int i = 1; i < n; ++i) { ElVisFloat t = a + i * h; ElVisFloat s = field(t); ElVisFloat integrandValue = integrand(t, s); result += integrandValue; // if( traceEnabled ) // { // ELVIS_PRINTF("Trapezoidal sample at t %2.15f, // field %2.15f, integrand value %2.15f\n", t, s, // integrandValue); // } } ElVisFloat sa = field(a); ElVisFloat sb = field(b); result += .5 * integrand(a, sa) + .5 * integrand(b, sb); result *= h; // if( traceEnabled ) // { // ELVIS_PRINTF("Finalizing Trapezoidal sample at t // (%2.15f, %2.15f), field (%2.15f, %2.15f), result %2.15f // and h %2.15f\n", a, b, sa, sb, result, h); // ELVIS_PRINTF("Finished trapezoidal on interval [%2.15f, // %2.15f] with endpoint transfer samples %2.15f, %2.15f // with result %2.15f\n", a, b, integrand(a, sa), // integrand(b, sb), result); // } return result; } }; struct InnerIntegralFunctor { ELVIS_DEVICE ElVisFloat GetMaxValue(const Interval<ElVisFloat>& domain) const { return transferFunction->GetMaxValue(eDensity, domain); } ELVIS_DEVICE ElVisFloat operator()(const ElVisFloat& t, const ElVisFloat& s, bool traceEnabled = false) const { return transferFunction->Sample(eDensity, s); } TransferFunction* transferFunction; }; struct OuterIntegralIntegrandWithInnerTrapezoidal { // __device__ ElVisFloat GetMaxValue(const Interval<ElVisFloat>& // domain) const // { // return transferFunction->GetMaxValue(channel, domain) * // transferFunction->GetMaxValue(eDensity, domain); // } ELVIS_DEVICE ElVisFloat3 operator()(const ElVisFloat& t, const ElVisFloat& s, bool traceEnabled = false) const { // if( traceEnabled ) // { // ELVIS_PRINTF("Starting outer integrand at %2.15f, // accumulatedDensity = %2.15f, innerT = %2.15f, innerH = // %2.15f\n", t, accumulatedDensity, innerT, innerH); // } ElVisFloat3 c = transferFunction->SampleColor(s); ElVisFloat d = transferFunction->Sample(eDensity, s); int numberAdditionalInnerSamples = 0; ElVisFloat newH; ActualH(innerT, t, innerH, newH, numberAdditionalInnerSamples); accumulatedDensity += FieldTrapezoidalIntegration::CompositeIntegration( *innerIntegral, *field, innerT, t, numberAdditionalInnerSamples, traceEnabled); // If 0, then the endponits have already been calculated. // By setting n to a different but hopefully close h, we don't need this // fixup. // if( numberAdditionalInnerSamples != 0 ) // { // ElVisFloat t0 = t-numberAdditionalInnerSamples*innerH; // ElVisFloat s0 = (*field)(t0); // accumulatedDensity += MAKE_FLOAT(.5)*(t-t0)*( // (*innerIntegral)(t0, s0) + (*innerIntegral)(t, s)); // if( traceEnabled ) // { // ELVIS_PRINTF("Inner Integral final adjustment t0 = // %2.15f, s0 = %2.15f\n", t0, s0); // ELVIS_PRINTF("Sampling outer integrand at %2.15f, // with color sample %2.15f, density %2.15f, and // accumulated density %2.15f\n", t, c, d, // accumulatedDensity); // } // } innerT = t; return c * d * exp(-accumulatedDensity); } TransferFunction* transferFunction; InnerIntegralFunctor* innerIntegral; FieldEvaluator* field; mutable ElVisFloat accumulatedDensity; mutable ElVisFloat innerT; mutable ElVisFloat innerH; }; // struct OuterIntegralIntegrand // { // __device__ ElVisFloat GetMaxValue(const Interval<ElVisFloat>& // domain) const // { // return transferFunction->GetMaxValue(channel, domain) * // transferFunction->GetMaxValue(eDensity, domain); // } // __device__ ElVisFloat operator() (const ElVisFloat& t, const // ElVisFloat& s) const // { // ElVisFloat c = transferFunction->Sample(channel, s); // ElVisFloat d = transferFunction->Sample(eDensity, s); // ElVisFloat accumulatedDensity = // innerIntegralApproximation->SampleInnerIntegral(t, s, eDensity, // transferFunction); // return c*d*Exp(-accumulatedDensity); // } // TransferFunction* transferFunction; // TransferFunctionChannel channel; // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21>* // innerIntegralApproximation; // }; /// \brief epsilon - the desired global error. extern "C" __global__ void IntegrateSegmentSingleThreadPerRayFullVersion( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, TransferFunction* transferFunction, ElVisFloat epsilon, bool enableTrace, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { // int2 trace = make_int2(512/2, 512/2); // // Step 1 - Categorize the field along the segment. // // // // Case 1: Total space skipping. When the density function is // identically 0 over the entire segment, then there is nothing to // do. // // Case 2: Density only. If the color components are identially // 0, then we only need to integrate the density contribution. // // 2.1: No breakpoints. Use gauss-kronrod on the entire // interval. The 7-15 rule is probably sufficient. // // 2.2: Breakpoints. Adaptive trapezoidal. // // Case 3: Everything. Both density and color contribute. // // 2.1. No breakpoints in either. // // 2.2. Color breakpoints, density no // // 2.3. Color no breakpoints, density yes. // // 3.5 Color and density have breakpoints. // // First pass - do adaptive trapezoidal on the density because I // know I can then evaluate at any point within my error // // budget. Then I can do adaptive or gauss-kronrod on the main // integral without a problem. // // If the color is doing gauss-kronrod, then I can incrementally // evaluate the density using adaptive trapezoidal and don't need to // keep the entire // // structure around. It is only when the outer integral is // adaptive that I need to do that (case 2.2). // // In cases 2 and 3, each component can be integrated differently // based on breakpoints. // // Density integration // uint2 pixel; // pixel.x = blockIdx.x * blockDim.x + threadIdx.x; // pixel.y = blockIdx.y * blockDim.y + threadIdx.y; // bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && // enableTrace); // if( traceEnabled ) // { // ELVIS_PRINTF("Esilon %2.10f\n", epsilon); // } // uint2 screen; // screen.x = gridDim.x * blockDim.x; // screen.y = gridDim.y * blockDim.y; // int segmentIndex = pixel.x + screen.x*pixel.y; // if( segmentEnd[segmentIndex] < MAKE_FLOAT(0.0) ) // { // return; // } // int elementId = segmentElementId[segmentIndex]; // if( elementId == -1 ) // { // return; // } // int elementTypeId = segmentElementType[segmentIndex]; // ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex]; // ElVisFloat3 color = colorAccumulator[segmentIndex]; // ElVisFloat a = segmentStart[segmentIndex]; // ElVisFloat b = segmentEnd[segmentIndex]; //// if( traceEnabled ) //// { //// b = 2.024846; //// } // ElVisFloat3 dir = segmentDirection[segmentIndex]; // ElVisFloat d = (b-a); // if( d == MAKE_FLOAT(0.0) ) // { // return; // } // // First test for density identically 0. This means the segment // does not contribute at // // all to the integral and can be skipped. // ElVisFloat3 p0 = origin + a*dir; // ElVisFloat3 p1 = origin + b*dir; // ElVisFloat s0 = EvaluateFieldCuda(elementId, elementTypeId, p0); // ElVisFloat s1 = EvaluateFieldCuda(elementId, elementTypeId, p1); // ElVis::Interval<ElVisFloat> range; // EstimateRangeCuda(elementId, elementTypeId, fieldId, p0, p1, // range); // if( traceEnabled ) // { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // } // bool densityContainsBreakpoints = // transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, // range); // if( traceEnabled ) // { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // } // if( !densityContainsBreakpoints ) // { // // No breakpoints. If 0 at the endpoints, then 0 everywhere. // if( transferFunction->Sample(eDensity, s0) == MAKE_FLOAT(0.0) // && // transferFunction->Sample(eDensity, s1) == MAKE_FLOAT(0.0) // ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Density is identically 0.\n"); // } // // Case 1 // return; // } // } // // At this point we know that there is some non-0 density along // the segment. // // Check if the color is identically 0. If so, we can just // integrate the // // density. // bool colorContainsBreakpoints = // transferFunction->ColorContainsAtLeastOneBreakpoint(range); // if( traceEnabled ) // { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // } // if( !colorContainsBreakpoints ) // { // if( transferFunction->Sample(eRed, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eRed, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s1) == MAKE_FLOAT(0.0) ) // { // // Case 2 - Integrate density only. // if( densityContainsBreakpoints ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // adaptive trapezoidal.\n"); // } // // Case 2.1 // // Integrate density using adaptive trapezoidal. // } // else // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // gauss-kronrod.\n"); // } // // Case 2.2 // // Integrate density using gauss-kronrod. // } // return; // } // } // // Case 3: Everything. Both density and color contribute. // // 2.1. No breakpoints in either. // // 2.2. Color breakpoints, density no // // 2.3. Color no breakpoints, density yes. // // 3.5 Color and density have breakpoints. // if( colorContainsBreakpoints ) // { // // Need adaptive trapezoidal for the outer integral. So // evalute the density over the entire range, // // then do adaptive on the outer, sampling the inner function. // if( traceEnabled ) // { // ELVIS_PRINTF("Adaptive trapezoidal for the outer, stack // backed for inner on [%2.10f, %2.10f].\n", a, b); // } // FieldEvaluator f; // f.Origin = origin; // f.Direction = dir; // f.ElementId = elementId; // f.ElementType = elementTypeId; // f.FieldId = fieldId; // bool reachedMax = false; // ElVisFloat maxValue = transferFunction->GetMaxValue(eDensity, // range); // ElVisFloat estimate = maxValue*(b-a); // maxValue = MAKE_FLOAT(1.0); // estimate = MAKE_FLOAT(1.0); // InnerIntegralFunctor innerIntegralFunc; // innerIntegralFunc.transferFunction = transferFunction; // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // innerIntegralApproximation; // innerIntegralApproximation.Initialize(a, b, innerIntegralFunc, // f, epsilon, estimate, maxValue, reachedMax, traceEnabled); // OuterIntegralIntegrand outerIntegralRedFunc; // outerIntegralRedFunc.channel = eRed; // outerIntegralRedFunc.innerIntegralApproximation = // &innerIntegralApproximation; // outerIntegralRedFunc.transferFunction = transferFunction; // OuterIntegralIntegrand outerIntegralGreenFunc; // outerIntegralGreenFunc.channel = eGreen; // outerIntegralGreenFunc.innerIntegralApproximation = // &innerIntegralApproximation; // outerIntegralGreenFunc.transferFunction = transferFunction; // OuterIntegralIntegrand outerIntegralBlueFunc; // outerIntegralBlueFunc.channel = eBlue; // outerIntegralBlueFunc.innerIntegralApproximation = // &innerIntegralApproximation; // outerIntegralBlueFunc.transferFunction = transferFunction; // IntegrationStatus innerIntegrationStatus = ePartial; // int loopGuard = 0; // do // { // innerIntegrationStatus = // innerIntegralApproximation.ContinueIntegration(innerIntegralFunc, // f, epsilon, estimate, maxValue, reachedMax, traceEnabled); // ++loopGuard; // Interval<ElVisFloat> validDomain = // innerIntegralApproximation.ValidDomain(); // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // redApproximation; // if(traceEnabled) // { // ELVIS_PRINTF("Finished evaluating the interval // [%2.10f, %2.10f]\n", validDomain.GetLow(), // validDomain.GetHigh()); // } // redApproximation.Initialize(validDomain.GetLow(), // validDomain.GetHigh(), outerIntegralRedFunc, f, epsilon, // estimate, maxValue, reachedMax, false); // int redLoopGuard = 0; // IntegrationStatus redStatus = ePartial; // do // { // ++redLoopGuard; // redApproximation.ContinueIntegration(outerIntegralRedFunc, // f, epsilon, estimate, maxValue, reachedMax, false); // color.x += redApproximation.OverallValue(); // } // while( redStatus != eFinished && redLoopGuard < 10); // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // greenApproximation; // greenApproximation.Initialize(validDomain.GetLow(), // validDomain.GetHigh(), outerIntegralGreenFunc, f, epsilon, // estimate, maxValue, reachedMax, false); // int greenLoopGuard = 0; // IntegrationStatus greenStatus = ePartial; // do // { // ++greenLoopGuard; // greenApproximation.ContinueIntegration(outerIntegralGreenFunc, // f, epsilon, estimate, maxValue, reachedMax, false); // color.y += greenApproximation.OverallValue(); // } // while( greenStatus != eFinished && greenLoopGuard < 10); // ReentrantAdaptiveTrapezoidal<ElVisFloat, 21> // blueApproximation; // blueApproximation.Initialize(validDomain.GetLow(), // validDomain.GetHigh(), outerIntegralBlueFunc, f, epsilon, // estimate, maxValue, reachedMax, false); // int blueLoopGuard = 0; // IntegrationStatus blueStatus = ePartial; // do // { // ++blueLoopGuard; // blueApproximation.ContinueIntegration(outerIntegralBlueFunc, // f, epsilon, estimate, maxValue, reachedMax, false); // color.z += blueApproximation.OverallValue(); // } // while( blueStatus != eFinished && blueLoopGuard < 10); // } // while( innerIntegrationStatus != eFinished && loopGuard < 10 // ); // accumulatedDensity += // innerIntegralApproximation.OverallValue(); //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Density\n"); //// } //// innerIntegralApproximation.Integrate(a, b, transferFunction, ///eDensity, f, epsilon, estimate, maxValue, reachedMax, traceEnabled); //// ElVisFloat redMax = transferFunction->GetMaxValue(eRed, ///range); //// OuterIntegralAdaptiveTrapezoidal<ElVisFloat, 10> ///redIntegral; //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Red\n"); //// } //// redIntegral.Integrate(a, b, transferFunction, eRed, f, ///innerIntegralApproximation, epsilon, maxValue*redMax*(b-a), ///maxValue*redMax, reachedMax, traceEnabled, accumulatedDensity); //// color.x += redIntegral.OverallValue(); //// ElVisFloat greenMax = transferFunction->GetMaxValue(eGreen, ///range); //// OuterIntegralAdaptiveTrapezoidal<ElVisFloat, 10> ///greenIntegral; //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Green\n"); //// } //// greenIntegral.Integrate(a, b, transferFunction, eGreen, f, ///innerIntegralApproximation, epsilon, maxValue*greenMax*(b-a), ///maxValue*greenMax, reachedMax, traceEnabled, accumulatedDensity); //// color.y += greenIntegral.OverallValue(); //// ElVisFloat blueMax = transferFunction->GetMaxValue(eBlue, ///range); //// OuterIntegralAdaptiveTrapezoidal<ElVisFloat, 10> ///blueIntegral; //// if( traceEnabled ) //// { //// ELVIS_PRINTF("################## Blue\n"); //// } //// blueIntegral.Integrate(a, b, transferFunction, eBlue, f, ///innerIntegralApproximation, epsilon, maxValue*blueMax*(b-a), ///maxValue*blueMax, reachedMax, traceEnabled, accumulatedDensity); //// color.z += blueIntegral.OverallValue(); //// accumulatedDensity += ///innerIntegralApproximation.OverallValue(); // } // else // { // // Color doesn't have breakpoints, so the main integral can be // evaluated with Gauss-Kronrod. // // We'll do adaptive trapezoidal in the density, adding on to // the existing integral as we sample // // the gauss-kronrod points. This way we don't have to keep // the adaptive structure around. // if( traceEnabled ) // { // ELVIS_PRINTF("Gauss-Kronrod outer, adaptive trapezoidal // inner..\n"); // } // } // densityAccumulator[segmentIndex] = accumulatedDensity; // colorAccumulator[segmentIndex] = color; } template <unsigned int block_size> __device__ void PrefixSumTrapezoidalIntegration( const ElVisFloat& initialValue, volatile ElVisFloat* __restrict__ input, volatile ElVisFloat* __restrict__ output, ElVisFloat h, bool traceEnabled) { ElVisFloat val = MAKE_FLOAT(.5) * input[threadIdx.x]; output[threadIdx.x] = val; __syncthreads(); if (threadIdx.x == 0) { output[0] = MAKE_FLOAT(0.0); } if (traceEnabled) { // ELVIS_PRINTF("Input Array Values: ("); // for(unsigned int i = 0; i < block_size; ++i) // { // ELVIS_PRINTF("%f, ", input[i]); // } // ELVIS_PRINTF(")\n\n"); // ELVIS_PRINTF("Output Array Values: ("); // for(unsigned int i = 0; i < block_size; ++i) // { // ELVIS_PRINTF("%f, ", output[i]); // } // ELVIS_PRINTF(")\n\n"); } if (block_size > 1) { if (threadIdx.x >= 1) { val = output[threadIdx.x - 1] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 2) { if (threadIdx.x >= 2) { val = output[threadIdx.x - 2] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 4) { if (threadIdx.x >= 4) { val = output[threadIdx.x - 4] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 8) { if (threadIdx.x >= 8) { val = output[threadIdx.x - 8] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 16) { if (threadIdx.x >= 16) { val = output[threadIdx.x - 16] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 32) { if (threadIdx.x >= 32) { val = output[threadIdx.x - 32] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 64) { if (threadIdx.x >= 64) { val = output[threadIdx.x - 64] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 128) { if (threadIdx.x >= 128) { val = output[threadIdx.x - 128] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 256) { if (threadIdx.x >= 256) { val = output[threadIdx.x - 256] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 512) { if (threadIdx.x >= 512) { val = output[threadIdx.x - 512] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } output[threadIdx.x] = initialValue + h * (output[threadIdx.x]); __syncthreads(); if (traceEnabled) { // ELVIS_PRINTF("Result Array Values: ("); // for(unsigned int i = 0; i < block_size; ++i) // { // ELVIS_PRINTF("%f, ", output[i]); // } // ELVIS_PRINTF(")\n\n"); } } template <unsigned int block_size> __device__ void PrefixSum(volatile ElVisFloat* __restrict__ input, volatile ElVisFloat* __restrict__ output) { ElVisFloat val = input[threadIdx.x]; output[threadIdx.x] = val; __syncthreads(); if (block_size > 1) { if (threadIdx.x >= 1) { val = output[threadIdx.x - 1] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 2) { if (threadIdx.x >= 2) { val = output[threadIdx.x - 2] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 4) { if (threadIdx.x >= 4) { val = output[threadIdx.x - 4] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 8) { if (threadIdx.x >= 8) { val = output[threadIdx.x - 8] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 16) { if (threadIdx.x >= 16) { val = output[threadIdx.x - 16] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 32) { if (threadIdx.x >= 32) { val = output[threadIdx.x - 32] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 64) { if (threadIdx.x >= 64) { val = output[threadIdx.x - 64] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 128) { if (threadIdx.x >= 128) { val = output[threadIdx.x - 128] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 256) { if (threadIdx.x >= 256) { val = output[threadIdx.x - 256] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } if (block_size > 512) { if (threadIdx.x >= 512) { val = output[threadIdx.x - 512] + val; } __syncthreads(); output[threadIdx.x] = val; __syncthreads(); } } template <typename IntegrationType> __device__ void IntegrateDensityFunction() { } // Assumes a single warp per ray, and that each block only contains a single // warp. extern "C" __global__ void IntegrateSegmentWarpPerSegment( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, TransferFunction* transferFunction, ElVisFloat epsilon, bool enableTrace, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { // __shared__ int2 trace; // trace = make_int2(512/2, 512/2); // // // Step 1 - Categorize the field along the segment. // // // // Case 1: Total space skipping. When the density function is // identically 0 over the entire segment, then there is nothing to // do. // // Case 2: Density only. If the color components are identially // 0, then we only need to integrate the density contribution. // // 2.1: No breakpoints. Use gauss-kronrod on the entire // interval. The 7-15 rule is probably sufficient. // // 2.2: Breakpoints. Adaptive trapezoidal. // // Case 3: Everything. Both density and color contribute. // // 2.1. No breakpoints in either. // // 2.2. Color breakpoints, density no // // 2.3. Color no breakpoints, density yes. // // 3.5 Color and density have breakpoints. // // First pass - do adaptive trapezoidal on the density because I // know I can then evaluate at any point within my error // // budget. Then I can do adaptive or gauss-kronrod on the main // integral without a problem. // // // If the color is doing gauss-kronrod, then I can incrementally // evaluate the density using adaptive trapezoidal and don't need to // keep the entire // // structure around. It is only when the outer integral is // adaptive that I need to do that (case 2.2). // // // In cases 2 and 3, each component can be integrated differently // based on breakpoints. // // // Density integration // // __shared__ uint2 pixel; // pixel.x = blockIdx.x; // pixel.y = blockIdx.y; // // bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && // enableTrace && threadIdx.x == 0); // //// if( traceEnabled ) //// { //// ELVIS_PRINTF("Esilon %2.10f\n", epsilon); //// } // __shared__ uint2 screen; // screen.x = gridDim.x; // screen.y = gridDim.y; // // __shared__ int segmentIndex; // segmentIndex = pixel.x + screen.x*pixel.y; // __shared__ ElVisFloat b; // b = segmentEnd[segmentIndex]; // if( b < MAKE_FLOAT(0.0) ) // { // return; // } // // __shared__ int elementId; // elementId = segmentElementId[segmentIndex]; // if( elementId == -1 ) // { // return; // } // // __shared__ int elementTypeId; // elementTypeId = segmentElementType[segmentIndex]; // __shared__ ElVisFloat accumulatedDensity; // accumulatedDensity = densityAccumulator[segmentIndex]; // __shared__ ElVisFloat3 color; // color = colorAccumulator[segmentIndex]; // __shared__ ElVisFloat a; // a = segmentStart[segmentIndex]; // // // __shared__ ElVisFloat3 dir; // dir = segmentDirection[segmentIndex]; // __shared__ ElVisFloat d; // d = (b-a); // // if( d == MAKE_FLOAT(0.0) ) // { // return; // } // // __shared__ ElVisFloat h; // h = d/31; // // // First test for density identically 0. This means the segment // does not contribute at // // all to the integral and can be skipped. // __shared__ ElVisFloat3 p0; // p0 = origin + a*dir; // __shared__ ElVisFloat3 p1; // p1 = origin + b*dir; // ElVisFloat s0 = EvaluateFieldCuda(elementId, elementTypeId, p0); // ElVisFloat s1 = EvaluateFieldCuda(elementId, elementTypeId, p1); // if( traceEnabled ) // { // ELVIS_PRINTF("%f, %f\n", s0, s1); // } // ElVis::Interval<ElVisFloat> range; // EstimateRangeCuda(elementId, elementTypeId, p0, p1, range); // // if( traceEnabled ) // { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // } // // bool densityContainsBreakpoints = // transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, // range); // if( traceEnabled ) // { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // } // if( !densityContainsBreakpoints ) // { // // No breakpoints. If 0 at the endpoints, then 0 everywhere. // if( transferFunction->Sample(eDensity, s0) == MAKE_FLOAT(0.0) // && // transferFunction->Sample(eDensity, s1) == MAKE_FLOAT(0.0) // ) // { //// if( traceEnabled ) //// { //// ELVIS_PRINTF("Density is identically 0.\n"); //// } // // Case 1 // return; // } // } // // bool colorContainsBreakpoints = // transferFunction->ColorContainsAtLeastOneBreakpoint(range); // if( traceEnabled ) // { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // } // if( !colorContainsBreakpoints ) // { // if( transferFunction->Sample(eRed, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eRed, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eGreen, s1) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s0) == MAKE_FLOAT(0.0) && // transferFunction->Sample(eBlue, s1) == MAKE_FLOAT(0.0) ) // { // // Case 2 - Integrate density only. // if( densityContainsBreakpoints ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // adaptive trapezoidal.\n"); // } // // // Case 2.1 // // Integrate density using adaptive trapezoidal. // } // else // { // if( traceEnabled ) // { // ELVIS_PRINTF("Integrate density alone using // gauss-kronrod.\n"); // } // // Case 2.2 // // Integrate density using gauss-kronrod. // //IntegrateDensityFunction<G7K15>(); // } // return; // } // } // // //// // Sample the field. //// __shared__ ElVisFloat fieldSamples[32]; //// ElVisFloat3 p = origin + (a+threadIdx.x*h)*dir; //// fieldSamples[threadIdx.x] = EvaluateFieldCuda(elementId, ///elementTypeId, p); //// __syncthreads(); // //// __shared__ ElVisFloat density[32]; //// density[threadIdx.x] = transferFunction->Sample(eDensity, ///fieldSamples[threadIdx.x]); //// __syncthreads(); // //// __shared__ ElVisFloat accumulatedDensityIntegration[32]; //// PrefixSumTrapezoidalIntegration<32>(accumulatedDensity, ///&density[0], &accumulatedDensityIntegration[0], h, traceEnabled); //// __syncthreads(); // //// __shared__ ElVisFloat red[32]; //// __shared__ ElVisFloat green[32]; //// __shared__ ElVisFloat blue[32]; // //// ElVisFloat attenuation = ///Exp(-accumulatedDensityIntegration[threadIdx.x]); //// red[threadIdx.x] = transferFunction->Sample(eRed, ///fieldSamples[threadIdx.x])*density[threadIdx.x]*attenuation; //// green[threadIdx.x] = transferFunction->Sample(eGreen, ///fieldSamples[threadIdx.x])*density[threadIdx.x]*attenuation;; //// blue[threadIdx.x] = transferFunction->Sample(eBlue, ///fieldSamples[threadIdx.x])*density[threadIdx.x]*attenuation;; //// __syncthreads(); // //// __shared__ ElVisFloat redIntegral[32]; //// __shared__ ElVisFloat greenIntegral[32]; //// __shared__ ElVisFloat blueIntegral[32]; // //// PrefixSumTrapezoidalIntegration<32>(color.x, &red[0], ///&redIntegral[0], h, traceEnabled); //// PrefixSumTrapezoidalIntegration<32>(color.y, &green[0], ///&greenIntegral[0], h, traceEnabled); //// PrefixSumTrapezoidalIntegration<32>(color.z, &blue[0], ///&blueIntegral[0], h, traceEnabled); // //// if( threadIdx.x == 0 ) //// { //// densityAccumulator[segmentIndex] = ///accumulatedDensityIntegration[31]; //// color.x += redIntegral[31]; //// color.y += greenIntegral[31]; //// color.z += blueIntegral[31]; // //// colorAccumulator[segmentIndex] = color; //// } } // Actual code extern "C" __global__ void //__launch_bounds__(32, 8) IntegrateSegmentSingleThreadPerRay( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, TransferFunction* transferFunction, ElVisFloat epsilon, ElVisFloat desiredH, bool enableTrace, int tracex, int tracey, int* numSamples, bool renderIntegrationType, bool enableEmptySpaceSkipping, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { int2 trace = make_int2(tracex, tracey); uint2 pixel; pixel.x = blockIdx.x * blockDim.x + threadIdx.x; pixel.y = blockIdx.y * blockDim.y + threadIdx.y; bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && enableTrace); if (traceEnabled) { // ELVIS_PRINTF("Esilon %2.10f\n", epsilon); // ELVIS_PRINTF("Number of samples enabled %d\n", (numSamples ? // 1: 0)); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } } uint2 screen; screen.x = gridDim.x * blockDim.x; screen.y = gridDim.y * blockDim.y; int segmentIndex = pixel.x + screen.x * pixel.y; // if( traceEnabled ) // { // ELVIS_PRINTF("Segment index %d\n", segmentIndex); // } if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0)) { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because ray has left volume based // on segment end\n", segmentIndex); // } return; } int elementId = segmentElementId[segmentIndex]; // if( traceEnabled ) // { // ELVIS_PRINTF("Element id %d\n", elementId); // } if (elementId == -1) { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because element id is 0\n", // segmentIndex); // } return; } int elementTypeId = segmentElementType[segmentIndex]; ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex]; ElVisFloat3 color = colorAccumulator[segmentIndex]; ElVisFloat a = segmentStart[segmentIndex]; ElVisFloat b = segmentEnd[segmentIndex]; ElVisFloat3 dir = segmentDirection[segmentIndex]; ElVisFloat d = (b - a); // if( traceEnabled ) // { // ELVIS_PRINTF("Ray Direction (%2.10f, %2.10f, %2.10f), segment // distance %2.10f\n", dir.x, dir.y, dir.z, d); // } if (d == MAKE_FLOAT(0.0)) { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because d is 0\n", dir.x, dir.y, // dir.z, d); // } return; } int n = Floor(d / desiredH); ElVisFloat h; if (n == 0) { h = b - a; n = 1; } else { h = d / (ElVisFloat)(n); } // if( traceEnabled ) // { // ELVIS_PRINTF("Total segment range: [%2.15f, %2.15f], segment // Id %d\n", segmentStart[segmentIndex], // segmentEnd[segmentIndex], segmentIndex); // ELVIS_PRINTF("D = %2.15f, H = %2.15f, N = %d\n", d, h, n); // } // First test for density identically 0. This means the segment does not // contribute at // all to the integral and can be skipped. ElVisFloat3 p0 = origin + a * dir; ElVisFloat3 p1 = origin + b * dir; ElVis::Interval<ElVisFloat> range; EstimateRangeCuda(elementId, elementTypeId, fieldId, p0, p1, range); if (traceEnabled) { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // ELVIS_PRINTF("Origin (%f, %f, %f)\n", origin.x, origin.y, // origin.z); // ELVIS_PRINTF("Direction (%f, %f, %f)\n", dir.x, dir.y, // dir.z); // ELVIS_PRINTF("Integration domain [%f, %f]\n", a, b); } bool densityContainsBreakpoints = transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, range); Interval<ElVisFloat> densityRange = transferFunction->Sample(eDensity, range); if (traceEnabled) { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Density range (%f, %f).\n", // densityRange.GetLow(), densityRange.GetHigh()); } if (enableEmptySpaceSkipping) { if (densityRange.GetLow() == MAKE_FLOAT(0.0) && densityRange.GetHigh() == MAKE_FLOAT(0.0)) { if (traceEnabled) { // ELVIS_PRINTF("Density is identically 0.\n"); } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].x += // MAKE_FLOAT(20.0)/MAKE_FLOAT(255.0); // } return; } } // At this point we know that there is some non-0 density along the segment. // Check if the color is identically 0. If so, we can just integrate the // density. bool colorContainsBreakpoints = transferFunction->ColorContainsAtLeastOneBreakpoint(range); Interval<ElVisFloat> redRange = transferFunction->Sample(eRed, range); Interval<ElVisFloat> greenRange = transferFunction->Sample(eGreen, range); Interval<ElVisFloat> blueRange = transferFunction->Sample(eBlue, range); Interval<ElVisFloat> totalColorRange; totalColorRange.Combine(redRange); totalColorRange.Combine(blueRange); totalColorRange.Combine(greenRange); if (traceEnabled) { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Red range (%f, %f).\n", redRange.GetLow(), // redRange.GetHigh()); // ELVIS_PRINTF("Green range (%f, %f).\n", greenRange.GetLow(), // greenRange.GetHigh()); // ELVIS_PRINTF("Blue range (%f, %f).\n", blueRange.GetLow(), // blueRange.GetHigh()); // ELVIS_PRINTF("Total Color range (%f, %f).\n", // totalColorRange.GetLow(), blueRange.GetHigh()); } // If the color does not contribute, then we can just integrate the density. FieldEvaluator f; f.Origin = origin; f.Direction = dir; f.ElementId = elementId; f.ElementType = elementTypeId; f.sampleCount = numSamples; f.FieldId = fieldId; // bool colorEmpty = totalColorRange.GetLow() == MAKE_FLOAT(0.0) && // totalColorRange.GetHigh() == MAKE_FLOAT(0.0); // int doDensityOnly = __all(colorEmpty); // __syncthreads(); if (totalColorRange.GetLow() == MAKE_FLOAT(0.0) && totalColorRange.GetHigh() == MAKE_FLOAT(0.0)) // if( doDensityOnly ) { InnerIntegralFunctor innerIntegralFunc; innerIntegralFunc.transferFunction = transferFunction; // int doBreakpoints = __any(densityContainsBreakpoints); // __syncthreads(); // Case 2 - Integrate density only. if (densityContainsBreakpoints) // if( doBreakpoints ) { if (traceEnabled) { // ELVIS_PRINTF("Integrate density alone using adaptive // trapezoidal.\n"); } ElVisFloat result = FieldTrapezoidalIntegration::CompositeIntegration( innerIntegralFunc, f, a, b, n, traceEnabled); accumulatedDensity += result; f.AdjustSampleCount(-1); } else { if (traceEnabled) { // ELVIS_PRINTF("Integrate density alone using gauss-kronrod.\n"); } ElVisFloat errorEstimate = MAKE_FLOAT(0.0); ElVisFloat result = SingleThreadGaussKronrod<G7K15>::Integrate<ElVisFloat>( innerIntegralFunc, a, b, f, errorEstimate, traceEnabled); accumulatedDensity += result; // if( traceEnabled ) // { // ELVIS_PRINTF("[%d, %d] - GK Density (%f, %f) - // [%f, %f].\n", pixel.x, pixel.y, a, b, // range.GetLow(), range.GetHigh()); // //ELVIS_PRINTF("G7K15 Density over segment %f with // error %f\n", result, errorEstimate); // } } } else { // int doColorContainsBreakpoints = // __any(colorContainsBreakpoints); // __syncthreads(); // Color Contributes. // Case 3: Everything. Both density and color contribute. // 2.1. No breakpoints in either. // 2.2. Color breakpoints, density no // 2.3. Color no breakpoints, density yes. // 3.5 Color and density have breakpoints. if (colorContainsBreakpoints) // if( doColorContainsBreakpoints ) { // Do trapezoidal for outer and inner in lockstep. if (traceEnabled) { // ELVIS_PRINTF("Trapezoidal for outer and inner.\n"); } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].y += // MAKE_FLOAT(20.0)/MAKE_FLOAT(255.0); // return; // } ElVisFloat s0 = f(a); ElVisFloat3 color0 = transferFunction->SampleColor(s0); ElVisFloat d0 = transferFunction->Sample(eDensity, s0); ElVisFloat atten = expf(-accumulatedDensity); color += h * MAKE_FLOAT(.5) * color0 * d0 * atten; for (int i = 1; i < n; ++i) { ElVisFloat t = a + i * h; ElVisFloat sample = f(t); // if( traceEnabled ) // { // ElVisFloat3 tempPoint = origin + t*dir; // ELVIS_PRINTF("Sample at %f (%f, %f, %f) = // %f\n", t, tempPoint.x, tempPoint.y, // tempPoint.z, sample); // } ElVisFloat d1 = transferFunction->Sample(eDensity, sample); accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + d1); // if( traceEnabled ) // { // ELVIS_PRINTF("Density = %f\n", d1); // } ElVisFloat3 colorSample = transferFunction->SampleColor(sample); ElVisFloat atten = expf(-accumulatedDensity); color += h * colorSample * d1 * atten; // if( traceEnabled ) // { // ELVIS_PRINTF("Density = %f, accumulated // density = %f\n", d1, accumulatedDensity); // ELVIS_PRINTF("Color Samples = (%f, %f, %f), // Accumulated Color = (%f, %f, %f)\n", // colorSample.x, colorSample.y, colorSample.z, // color.x, color.y, color.z); // } d0 = d1; } ElVisFloat sn = f(b); ElVisFloat3 colorn = transferFunction->SampleColor(sn); ElVisFloat dn = transferFunction->Sample(eDensity, sn); accumulatedDensity += MAKE_FLOAT(.5) * h * (d0 + dn); atten = expf(-accumulatedDensity); color += h * MAKE_FLOAT(.5) * colorn * dn * atten; // if( traceEnabled ) // { // ELVIS_PRINTF("Final Sample %f, Final Density // Sample %f, Final Color Sample (%f, %f, %f)\n", sn, // dn, colorn.x, colorn.y, colorn.z); // } f.AdjustSampleCount(-1); } else { // Color doesn't have breakpoints, so the main integral can be evaluated // with Gauss-Kronrod. // We'll do adaptive trapezoidal in the density, adding on to the // existing integral as we sample // the gauss-kronrod points. This way we don't have to keep the // adaptive structure around. if (traceEnabled) { // ELVIS_PRINTF("Gauss-Kronrod outer, Trapezoidal inner.\n"); } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].z += // MAKE_FLOAT(20.0)/MAKE_FLOAT(255.0); // return; // } OuterIntegralIntegrandWithInnerTrapezoidal outerIntegrand; outerIntegrand.accumulatedDensity = accumulatedDensity; outerIntegrand.field = &f; outerIntegrand.innerH = h; outerIntegrand.innerT = a; outerIntegrand.transferFunction = transferFunction; InnerIntegralFunctor innerIntegrand; innerIntegrand.transferFunction = transferFunction; outerIntegrand.innerIntegral = &innerIntegrand; // if( traceEnabled ) // { // ELVIS_PRINTF("Start GK with incoming density // %2.15f\n", outerIntegrand.accumulatedDensity); // } ElVisFloat3 errorEstimate; ElVisFloat3 colorContribution = SingleThreadGaussKronrod<G7K15>::Integrate<ElVisFloat3>( outerIntegrand, a, b, f, errorEstimate, traceEnabled); // TODO - need to finish the density contribution for the space between // the last sample and the end of the interval. // if( traceEnabled ) // { // ElVisFloat testDensity = // FieldTrapezoidalIntegration::CompositeIntegration(innerIntegrand, // f, a, b, n, traceEnabled); // ELVIS_PRINTF("After running GK, the incoming color // is (%2.15f, %2.15f, %2.15f), the color // contribution is (%2.15f, %2.15f, %2.15f), and // density contribution is %2.15f (test density is // %2.15f) \n", // color.x, color.y, color.z, // colorContribution.x, colorContribution.y, // colorContribution.z, // outerIntegrand.accumulatedDensity, // testDensity); // } color += colorContribution; accumulatedDensity = outerIntegrand.accumulatedDensity; } } // if( traceEnabled ) // { // ELVIS_PRINTF("Final density %2.15f\n", accumulatedDensity); // ELVIS_PRINTF("Final color is (%2.15f, %2.15f, %2.15f)\n", // color.x, color.y, color.z); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } // } densityAccumulator[segmentIndex] = accumulatedDensity; colorAccumulator[segmentIndex] = color; } // Force GK/Trap extern "C" __global__ void //__launch_bounds__(32, 8) GKOnly(ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, TransferFunction* transferFunction, ElVisFloat epsilon, ElVisFloat desiredH, bool enableTrace, int tracex, int tracey, int* numSamples, bool renderIntegrationType, bool enableEmptySpaceSkipping, ElVisFloat* __restrict__ densityAccumulator, ElVisFloat3* __restrict__ colorAccumulator) { // int2 trace = make_int2(tracex, tracey); // uint2 pixel; // pixel.x = blockIdx.x * blockDim.x + threadIdx.x; // pixel.y = blockIdx.y * blockDim.y + threadIdx.y; // bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && // enableTrace); // if( traceEnabled ) // { // ELVIS_PRINTF("Esilon %2.10f\n", epsilon); // ELVIS_PRINTF("Number of samples enabled %d\n", (numSamples ? // 1: 0)); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } // } // uint2 screen; // screen.x = gridDim.x * blockDim.x; // screen.y = gridDim.y * blockDim.y; // int segmentIndex = pixel.x + screen.x*pixel.y; // if( traceEnabled ) // { // ELVIS_PRINTF("Segment index %d\n", segmentIndex); // } // if( segmentEnd[segmentIndex] < MAKE_FLOAT(0.0) ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because ray has left volume based on // segment end\n", segmentIndex); // } // return; // } // int elementId = segmentElementId[segmentIndex]; // if( traceEnabled ) // { // ELVIS_PRINTF("Element id %d\n", elementId); // } // if( elementId == -1 ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because element id is 0\n", // segmentIndex); // } // return; // } // int elementTypeId = segmentElementType[segmentIndex]; // ElVisFloat accumulatedDensity = densityAccumulator[segmentIndex]; // ElVisFloat3 color = colorAccumulator[segmentIndex]; // ElVisFloat a = segmentStart[segmentIndex]; // ElVisFloat b = segmentEnd[segmentIndex]; // ElVisFloat3 dir = segmentDirection[segmentIndex]; // ElVisFloat d = (b-a); // if( traceEnabled ) // { // ELVIS_PRINTF("Ray Direction (%2.10f, %2.10f, %2.10f), segment // distance %2.10f\n", dir.x, dir.y, dir.z, d); // } // if( d == MAKE_FLOAT(0.0) ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because d is 0\n", dir.x, dir.y, // dir.z, d); // } // return; // } // int n = Floor(d/desiredH); // ElVisFloat h; // if( n == 0 ) // { // h = b-a; // n = 1; // } // else // { // h= d/(ElVisFloat)(n); // } // if( traceEnabled ) // { // ELVIS_PRINTF("Total segment range: [%2.15f, %2.15f], segment // Id %d\n", segmentStart[segmentIndex], // segmentEnd[segmentIndex], segmentIndex); // ELVIS_PRINTF("D = %2.15f, H = %2.15f, N = %d\n", d, h, n); // } // // First test for density identically 0. This means the segment // does not contribute at // // all to the integral and can be skipped. // ElVisFloat3 p0 = origin + a*dir; // ElVisFloat3 p1 = origin + b*dir; // ElVisFloat s0 = EvaluateFieldCuda(elementId, elementTypeId, p0); // ElVisFloat s1 = EvaluateFieldCuda(elementId, elementTypeId, p1); // ElVis::Interval<ElVisFloat> range; // EstimateRangeCuda(elementId, elementTypeId, p0, p1, range); // if( traceEnabled ) // { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // ELVIS_PRINTF("Origin (%f, %f, %f)\n", origin.x, origin.y, // origin.z); // ELVIS_PRINTF("Direction (%f, %f, %f)\n", dir.x, dir.y, dir.z); // ELVIS_PRINTF("Integration domain [%f, %f]\n", a, b); // } // bool densityContainsBreakpoints = // transferFunction->RangeContainsAtLeastOneBreakpoint(eDensity, // range); // Interval<ElVisFloat> densityRange = // transferFunction->Sample(eDensity, range); // if( traceEnabled ) // { // ELVIS_PRINTF("Density contains breakpoints // %d.\n",densityContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Density range (%f, %f).\n", // densityRange.GetLow(), densityRange.GetHigh()); // } //// if( enableEmptySpaceSkipping ) //// { //// if( densityRange.GetLow() == MAKE_FLOAT(0.0) && //// densityRange.GetHigh() == MAKE_FLOAT(0.0) ) //// { //// if( traceEnabled ) //// { //// ELVIS_PRINTF("Density is identically 0.\n"); //// } //// if( renderIntegrationType ) //// { //// colorAccumulator[segmentIndex].x += ///MAKE_FLOAT(1.0)/MAKE_FLOAT(255.0); //// } //// return; //// } //// } // // At this point we know that there is some non-0 density along // the segment. // // Check if the color is identically 0. If so, we can just // integrate the // // density. // bool colorContainsBreakpoints = // transferFunction->ColorContainsAtLeastOneBreakpoint(range); // Interval<ElVisFloat> redRange = transferFunction->Sample(eRed, // range); // Interval<ElVisFloat> greenRange = transferFunction->Sample(eGreen, // range); // Interval<ElVisFloat> blueRange = transferFunction->Sample(eBlue, // range); // Interval<ElVisFloat> totalColorRange; // totalColorRange.Combine(redRange); // totalColorRange.Combine(blueRange); // totalColorRange.Combine(greenRange); // if( traceEnabled ) // { // ELVIS_PRINTF("Color contains breakpoints // %d.\n",colorContainsBreakpoints ? 1 : 0 ); // ELVIS_PRINTF("Red range (%f, %f).\n", redRange.GetLow(), // redRange.GetHigh()); // ELVIS_PRINTF("Green range (%f, %f).\n", greenRange.GetLow(), // greenRange.GetHigh()); // ELVIS_PRINTF("Blue range (%f, %f).\n", blueRange.GetLow(), // blueRange.GetHigh()); // ELVIS_PRINTF("Total Color range (%f, %f).\n", // totalColorRange.GetLow(), blueRange.GetHigh()); // } // // If the color does not contribute, then we can just integrate // the density. // FieldEvaluator f; // f.Origin = origin; // f.Direction = dir; // f.ElementId = elementId; // f.ElementType = elementTypeId; // f.sampleCount = numSamples; // f.FieldId = fieldId; // { // ELVIS_PRINTF("[%d, %d] - GK Colro - Trap Density.\n", // pixel.x, pixel.y); // // Color doesn't have breakpoints, so the main // integral can be evaluated with Gauss-Kronrod. // // We'll do adaptive trapezoidal in the density, // adding on to the existing integral as we sample // // the gauss-kronrod points. This way we don't have // to keep the adaptive structure around. // if( traceEnabled ) // { // ELVIS_PRINTF("Gauss-Kronrod outer, Trapezoidal // inner.\n"); // } // if( renderIntegrationType ) // { // colorAccumulator[segmentIndex].z += // MAKE_FLOAT(1.0)/MAKE_FLOAT(255.0); // return; // } // OuterIntegralIntegrandWithInnerTrapezoidal // outerIntegrand; // outerIntegrand.accumulatedDensity = // accumulatedDensity; // outerIntegrand.field = &f; // outerIntegrand.innerH = h; // outerIntegrand.innerT = a; // outerIntegrand.transferFunction = transferFunction; // InnerIntegralFunctor innerIntegrand; // innerIntegrand.transferFunction = transferFunction; // outerIntegrand.innerIntegral = &innerIntegrand; // if( traceEnabled ) // { // ELVIS_PRINTF("Start GK with incoming density // %2.15f\n", outerIntegrand.accumulatedDensity); // } // ElVisFloat3 errorEstimate; // ElVisFloat3 colorContribution = // SingleThreadGaussKronrod<G7K15>::Integrate<ElVisFloat3>(outerIntegrand, // a, b, f, errorEstimate, traceEnabled); // // TODO - need to finish the density contribution for // the space between the last sample and the end of the // interval. // if( traceEnabled ) // { // ElVisFloat testDensity = // FieldTrapezoidalIntegration::CompositeIntegration(innerIntegrand, // f, a, b, n, traceEnabled); // ELVIS_PRINTF("After running GK, the incoming color // is (%2.15f, %2.15f, %2.15f), the color // contribution is (%2.15f, %2.15f, %2.15f), and // density contribution is %2.15f (test density is // %2.15f) \n", // color.x, color.y, color.z, // colorContribution.x, colorContribution.y, // colorContribution.z, // outerIntegrand.accumulatedDensity, // testDensity); // } // color += colorContribution; // accumulatedDensity = // outerIntegrand.accumulatedDensity; // } // if( traceEnabled ) // { // ELVIS_PRINTF("Final density %2.15f\n", accumulatedDensity); // ELVIS_PRINTF("Final color is (%2.15f, %2.15f, %2.15f)\n", // color.x, color.y, color.z); // if( numSamples ) // { // ELVIS_PRINTF("Value of samples: %d\n", numSamples[0]); // } // } // densityAccumulator[segmentIndex] = accumulatedDensity; // colorAccumulator[segmentIndex] = color; } } #endif
e57c3e25e96afaf8acdb42cce2c108efbc394e2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define PRECISION_z // These routines merge multiple kernels from zmergebicgstab into one // This is the code used for the ASHES2014 paper // "Accelerating Krylov Subspace Solvers on Graphics Processing Units". // notice that only CSR format is supported so far. // accelerated reduction for one vector __global__ void magma_zreduce_kernel_spmv1( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_zbicgmerge_spmv1_kernel( int n, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * p, magmaDoubleComplex * r, magmaDoubleComplex * v, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * p[ dcolind[j] ]; v[ i ] = dot; } __syncthreads(); temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_zbicgstab_alphakernel( magmaDoubleComplex * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaDoubleComplex tmp = skp[0]; skp[0] = skp[4]/tmp; } } /** Purpose ------- Merges the first SpmV using CSR with the dot product and the computation of alpha Arguments --------- @param[in] A magma_z_matrix system matrix @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] dp magmaDoubleComplex_ptr input vector p @param[in] dr magmaDoubleComplex_ptr input vector r @param[in] dv magmaDoubleComplex_ptr output vector v @param[in,out] skp magmaDoubleComplex_ptr array for parameters ( skp[0]=alpha ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbicgmerge_spmv1( magma_z_matrix A, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr dp, magmaDoubleComplex_ptr dr, magmaDoubleComplex_ptr dv, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) hipLaunchKernelGGL(( magma_zbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_zbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // accelerated block reduction for multiple vectors __global__ void magma_zreduce_kernel_spmv2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } __global__ void magma_zbicgmerge_spmv2_kernel( int n, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * s, magmaDoubleComplex * t, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * s[ dcolind[j] ]; t[ i ] = dot; } __syncthreads(); // 2 vectors if (i<n){ magmaDoubleComplex tmp2 = t[i]; temp[Idx] = s[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_zbicgstab_omegakernel( magmaDoubleComplex * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ skp[2] = skp[6]/skp[7]; skp[3] = skp[4]; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] A magma_z_matrix input matrix @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] ds magmaDoubleComplex_ptr input vector s @param[in] dt magmaDoubleComplex_ptr output vector t @param[in,out] skp magmaDoubleComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbicgmerge_spmv2( magma_z_matrix A, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr ds, magmaDoubleComplex_ptr dt, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) hipLaunchKernelGGL(( magma_zbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, A.dval, A.drow, A.dcol, ds, dt, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue ); magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_zbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_zbicgmerge_xrbeta_kernel( int n, magmaDoubleComplex * rr, magmaDoubleComplex * r, magmaDoubleComplex * p, magmaDoubleComplex * s, magmaDoubleComplex * t, magmaDoubleComplex * x, magmaDoubleComplex * skp, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; magmaDoubleComplex alpha=skp[0]; magmaDoubleComplex omega=skp[2]; if( i<n ){ magmaDoubleComplex sl; sl = s[i]; x[i] = x[i] + alpha * p[i] + omega * sl; r[i] = sl - omega * t[i]; } __syncthreads(); // 2 vectors if (i<n){ magmaDoubleComplex tmp2 = r[i]; temp[Idx] = rr[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_zbicgstab_betakernel( magmaDoubleComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaDoubleComplex tmp1 = skp[4]/skp[3]; magmaDoubleComplex tmp2 = skp[0] / skp[2]; skp[1] = tmp1*tmp2; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] rr magmaDoubleComplex_ptr input vector rr @param[in] r magmaDoubleComplex_ptr input/output vector r @param[in] p magmaDoubleComplex_ptr input vector p @param[in] s magmaDoubleComplex_ptr input vector s @param[in] t magmaDoubleComplex_ptr input vector t @param[out] x magmaDoubleComplex_ptr output vector x @param[in] skp magmaDoubleComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbicgmerge_xrbeta( magma_int_t n, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr rr, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr p, magmaDoubleComplex_ptr s, magmaDoubleComplex_ptr t, magmaDoubleComplex_ptr x, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_zbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(), n, rr, r, p, s, t, x, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(), Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue ); magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); hipLaunchKernelGGL(( magma_zbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
e57c3e25e96afaf8acdb42cce2c108efbc394e2a.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define PRECISION_z // These routines merge multiple kernels from zmergebicgstab into one // This is the code used for the ASHES2014 paper // "Accelerating Krylov Subspace Solvers on Graphics Processing Units". // notice that only CSR format is supported so far. // accelerated reduction for one vector __global__ void magma_zreduce_kernel_spmv1( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0); int i = blockIdx.x * ( blockSize * 2 ) + Idx; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_zbicgmerge_spmv1_kernel( int n, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * p, magmaDoubleComplex * r, magmaDoubleComplex * v, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * p[ dcolind[j] ]; v[ i ] = dot; } __syncthreads(); temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0); __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } __global__ void magma_zbicgstab_alphakernel( magmaDoubleComplex * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaDoubleComplex tmp = skp[0]; skp[0] = skp[4]/tmp; } } /** Purpose ------- Merges the first SpmV using CSR with the dot product and the computation of alpha Arguments --------- @param[in] A magma_z_matrix system matrix @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] dp magmaDoubleComplex_ptr input vector p @param[in] dr magmaDoubleComplex_ptr input vector r @param[in] dv magmaDoubleComplex_ptr output vector v @param[in,out] skp magmaDoubleComplex_ptr array for parameters ( skp[0]=alpha ) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbicgmerge_spmv1( magma_z_matrix A, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr dp, magmaDoubleComplex_ptr dr, magmaDoubleComplex_ptr dv, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) magma_zbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_zbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ // accelerated block reduction for multiple vectors __global__ void magma_zreduce_kernel_spmv2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0); while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_MAKE( 0.0, 0.0); i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } __global__ void magma_zbicgmerge_spmv2_kernel( int n, magmaDoubleComplex * dval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * s, magmaDoubleComplex * t, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; if( i<n ){ magmaDoubleComplex dot = MAGMA_Z_ZERO; int start = drowptr[ i ]; int end = drowptr[ i+1 ]; for( j=start; j<end; j++) dot += dval[ j ] * s[ dcolind[j] ]; t[ i ] = dot; } __syncthreads(); // 2 vectors if (i<n){ magmaDoubleComplex tmp2 = t[i]; temp[Idx] = s[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_zbicgstab_omegakernel( magmaDoubleComplex * skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ skp[2] = skp[6]/skp[7]; skp[3] = skp[4]; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] A magma_z_matrix input matrix @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] ds magmaDoubleComplex_ptr input vector s @param[in] dt magmaDoubleComplex_ptr output vector t @param[in,out] skp magmaDoubleComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbicgmerge_spmv2( magma_z_matrix A, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr ds, magmaDoubleComplex_ptr dt, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int n = A.num_rows; int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; if ( A.storage_type == Magma_CSR) magma_zbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, A.dval, A.drow, A.dcol, ds, dt, d1 ); else printf("error: only CSR format supported.\n"); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue ); magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_zbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_zbicgmerge_xrbeta_kernel( int n, magmaDoubleComplex * rr, magmaDoubleComplex * r, magmaDoubleComplex * p, magmaDoubleComplex * s, magmaDoubleComplex * t, magmaDoubleComplex * x, magmaDoubleComplex * skp, magmaDoubleComplex * vtmp ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; magmaDoubleComplex alpha=skp[0]; magmaDoubleComplex omega=skp[2]; if( i<n ){ magmaDoubleComplex sl; sl = s[i]; x[i] = x[i] + alpha * p[i] + omega * sl; r[i] = sl - omega * t[i]; } __syncthreads(); // 2 vectors if (i<n){ magmaDoubleComplex tmp2 = r[i]; temp[Idx] = rr[i] * tmp2; temp[Idx+blockDim.x] = tmp2 * tmp2; } else { for( j=0; j<2; j++) temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0); } __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #if defined(PRECISION_z) || defined(PRECISION_c) if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #if defined(PRECISION_d) if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif #if defined(PRECISION_s) if( Idx < 32 ){ volatile float *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } __global__ void magma_zbicgstab_betakernel( magmaDoubleComplex * skp ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ magmaDoubleComplex tmp1 = skp[4]/skp[3]; magmaDoubleComplex tmp2 = skp[0] / skp[2]; skp[1] = tmp1*tmp2; } } /** Purpose ------- Merges the second SpmV using CSR with the dot product and the computation of omega Arguments --------- @param[in] n int dimension n @param[in] d1 magmaDoubleComplex_ptr temporary vector @param[in] d2 magmaDoubleComplex_ptr temporary vector @param[in] rr magmaDoubleComplex_ptr input vector rr @param[in] r magmaDoubleComplex_ptr input/output vector r @param[in] p magmaDoubleComplex_ptr input vector p @param[in] s magmaDoubleComplex_ptr input vector s @param[in] t magmaDoubleComplex_ptr input vector t @param[out] x magmaDoubleComplex_ptr output vector x @param[in] skp magmaDoubleComplex_ptr array for parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbicgmerge_xrbeta( magma_int_t n, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr rr, magmaDoubleComplex_ptr r, magmaDoubleComplex_ptr p, magmaDoubleComplex_ptr s, magmaDoubleComplex_ptr t, magmaDoubleComplex_ptr x, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2*local_block_size * sizeof( magmaDoubleComplex ); magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_zbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>> ( n, rr, r, p, s, t, x, skp, d1); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue ); magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue ); dim3 Bs2( 2 ); dim3 Gs2( 1 ); magma_zbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */
15fb498c1917112c2dc46fb3832da28b25e58ba4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // GroupNorm op in Caffe2 for GPU // Written by Kaiming He // Improved by Xiaomeng Yang // see https://arxiv.org/abs/1803.08494 // This is a stand-alone op: Y = gamma * (X - mu) / sig + beta // ------------------------------------------------------------------ #include "group_norm_op.h" #include <array> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T> inline __device__ T CubeCUDA(const T x) { return x * x * x; } __global__ void InvStdCUDAKernel( const int size, const float epsilon, const float* var, float* rsig) { CUDA_1D_KERNEL_LOOP(i, size) { #if __CUDA_ARCH__ >= 350 rsig[i] = rsqrtf(__ldg(var + i) + epsilon); #else rsig[i] = rsqrtf(var[i] + epsilon); #endif } } template <typename T, StorageOrder kOrder> __global__ void GroupNormForwardCUDAKernel( const int size, const int G, const int D, const int HxW, const T* X, const T* mu, const T* rsig, const T* gamma, const T* beta, T* Y) { const int C = G * D; CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / (D * HxW) : i / (C * HxW) * G + (i / D % G); const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 Y[i] = __ldg(gamma + i_gamma) * (__ldg(X + i) - __ldg(mu + i_mu)) * __ldg(rsig + i_mu) + __ldg(beta + i_gamma); #else Y[i] = gamma[i_gamma] * (X[i] - mu[i_mu]) * rsig[i_mu] + beta[i_gamma]; #endif } } template <typename T, StorageOrder kOrder> __global__ void ComputeInternalGradientsCUDAKernel( const int N, const int G, const int D, const int HxW, const T* dY, const T* X, const T* gamma, T* ds, T* db) { const int outer_size = N * G; const int inner_size = D * HxW; __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_gamma = i % G * D + j / HxW; const int index = kOrder == StorageOrder::NCHW ? i * inner_size + j : (i / G * HxW + j % HxW) * G * D + i_gamma; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index); db_val += __ldg(gamma + i_gamma) * __ldg(dY + index); #else ds_val += gamma[i_gamma] * dY[index] * X[index]; db_val += gamma[i_gamma] * dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, hipcub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum()); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } // Math: // Y = gamma * (X - mu) * rsig + beta // let s = gamma * rsig // let b = beta - mu * rsig // Y = s * X + b // let n = D * HxW // dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX) // d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX // db/dX = -u * drsig/dX - rsig * dmu/dX // drsig/dX = -rsig^3 * (X - mu) / n // dmu/dX = 1 / n template <typename T, StorageOrder kOrder> __global__ void GroupNormBackwardCUDAKernel( const int size, const int G, const int D, const int HxW, const T* dY, const T* X, const T* mu, const T* rsig, const T* gamma, const T* ds, const T* db, T* dX) { const int C = G * D; const T denom = T(1) / static_cast<T>(D * HxW); CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / (D * HxW) : i / (C * HxW) * G + (i / D % G); const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 const T u = (__ldg(db + i_mu) * __ldg(mu + i_mu) - __ldg(ds + i_mu)) * (__ldg(X + i) - __ldg(mu + i_mu)) * CubeCUDA(__ldg(rsig + i_mu)); const T v = __ldg(db + i_mu) * __ldg(rsig + i_mu); dX[i] = __ldg(gamma + i_gamma) * __ldg(dY + i) * __ldg(rsig + i_mu) + (u - v) * denom; #else const T u = (db[i_mu] * mu[i_mu] - ds[i_mu]) * (X[i] - mu[i_mu]) * CubeCUDA(rsig[i_mu]); const T v = db[i_mu] * rsig[i_mu]; dX[i] = gamma[i_gamma] * dY[i] * rsig[i_mu] + (u - v) * denom; #endif } } template <typename T, StorageOrder kOrder> __global__ void GammaBetaBackwardCUDAKernel( const int N, const int G, const int D, const int HxW, const T* dY, const T* X, const T* mu, const T* rsig, T* dgamma, T* dbeta) { const int outer_size = G * D; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage dg_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T dg_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int n = j / HxW; const int index = kOrder == StorageOrder::NCHW ? (n * outer_size + i) * HxW + j % HxW : j * outer_size + i; const int i_mu = n * G + i / D; #if __CUDA_ARCH__ >= 350 dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(mu + i_mu)) * __ldg(rsig + i_mu); db_val += __ldg(dY + index); #else dg_val += dY[index] * (X[index] - mu[i_mu]) * rsig[i_mu]; db_val += dY[index]; #endif } dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, hipcub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, hipcub::Sum()); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } __syncthreads(); } } } // namespace template <> bool GroupNormOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int G, const int D, const int HxW, const float* X_data, const float* gamma_data, const float* beta_data, float* Y_data, float* mu_data, float* rsig_data) { const std::array<int, 4> dims = order_ == StorageOrder::NCHW ? std::array<int, 4>{N, G, D, HxW} : std::array<int, 4>{N, HxW, G, D}; const std::array<int, 2> axes = order_ == StorageOrder::NCHW ? std::array<int, 2>{2, 3} : std::array<int, 2>{1, 3}; // Computes mean and variance. math::Moments<float, CUDAContext>( 4, dims.data(), 2, axes.data(), X_data, mu_data, rsig_data, &context_); // Uses rsqrt to computes 1 / std which is much faster than computes std. hipLaunchKernelGGL(( InvStdCUDAKernel), dim3(CAFFE_GET_BLOCKS(N * G)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N * G, epsilon_, rsig_data, rsig_data); // Computes Y = gamma * (X - mu) * rsig + beta. const int size = N * G * D * HxW; if (order_ == StorageOrder::NCHW) { hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, G, D, HxW, X_data, mu_data, rsig_data, gamma_data, beta_data, Y_data); } else { hipLaunchKernelGGL(( GroupNormForwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, G, D, HxW, X_data, mu_data, rsig_data, gamma_data, beta_data, Y_data); } return true; } // Math: // let: s = gamma * rsig // let: b = beta - mu * gamma * rsig // then: Y = s * X + b template <> bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int G, const int D, const int HxW, const float* dY_data, const float* X_data, const float* mu_data, const float* rsig_data, const float* gamma_data, float* dX_data, float* dgamma_data, float* dbeta_data) { const int size = N * G * D * HxW; const int C = G * D; ds_.Resize(N, G); db_.Resize(N, G); float* ds_data = ds_.mutable_data<float>(); float* db_data = db_.mutable_data<float>(); if (order_ == StorageOrder::NCHW) { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW>) , dim3(::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data); // Computes dL/dX. hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, G, D, HxW, dY_data, X_data, mu_data, rsig_data, gamma_data, ds_data, db_data, dX_data); // Computes dL/dgamma and dL/dbeta. hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, G, D, HxW, dY_data, X_data, mu_data, rsig_data, dgamma_data, dbeta_data); } else { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC>) , dim3(::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data); // Computes dL/dX. hipLaunchKernelGGL(( GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), size, G, D, HxW, dY_data, X_data, mu_data, rsig_data, gamma_data, ds_data, db_data, dX_data); // Computes dL/dgamma and dL/dbeta. hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(::min(C, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, G, D, HxW, dY_data, X_data, mu_data, rsig_data, dgamma_data, dbeta_data); } return true; } REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( GroupNormGradient, GroupNormGradientOp<float, CUDAContext>); } // namespace caffe2
15fb498c1917112c2dc46fb3832da28b25e58ba4.cu
// ------------------------------------------------------------------ // GroupNorm op in Caffe2 for GPU // Written by Kaiming He // Improved by Xiaomeng Yang // see https://arxiv.org/abs/1803.08494 // This is a stand-alone op: Y = gamma * (X - mu) / sig + beta // ------------------------------------------------------------------ #include "group_norm_op.h" #include <array> #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T> inline __device__ T CubeCUDA(const T x) { return x * x * x; } __global__ void InvStdCUDAKernel( const int size, const float epsilon, const float* var, float* rsig) { CUDA_1D_KERNEL_LOOP(i, size) { #if __CUDA_ARCH__ >= 350 rsig[i] = rsqrtf(__ldg(var + i) + epsilon); #else rsig[i] = rsqrtf(var[i] + epsilon); #endif } } template <typename T, StorageOrder kOrder> __global__ void GroupNormForwardCUDAKernel( const int size, const int G, const int D, const int HxW, const T* X, const T* mu, const T* rsig, const T* gamma, const T* beta, T* Y) { const int C = G * D; CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / (D * HxW) : i / (C * HxW) * G + (i / D % G); const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 Y[i] = __ldg(gamma + i_gamma) * (__ldg(X + i) - __ldg(mu + i_mu)) * __ldg(rsig + i_mu) + __ldg(beta + i_gamma); #else Y[i] = gamma[i_gamma] * (X[i] - mu[i_mu]) * rsig[i_mu] + beta[i_gamma]; #endif } } template <typename T, StorageOrder kOrder> __global__ void ComputeInternalGradientsCUDAKernel( const int N, const int G, const int D, const int HxW, const T* dY, const T* X, const T* gamma, T* ds, T* db) { const int outer_size = N * G; const int inner_size = D * HxW; __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_gamma = i % G * D + j / HxW; const int index = kOrder == StorageOrder::NCHW ? i * inner_size + j : (i / G * HxW + j % HxW) * G * D + i_gamma; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index); db_val += __ldg(gamma + i_gamma) * __ldg(dY + index); #else ds_val += gamma[i_gamma] * dY[index] * X[index]; db_val += gamma[i_gamma] * dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Reduce(ds_val, cub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum()); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } // Math: // Y = gamma * (X - mu) * rsig + beta // let s = gamma * rsig // let b = beta - mu * rsig // Y = s * X + b // let n = D * HxW // dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX) // d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX // db/dX = -u * drsig/dX - rsig * dmu/dX // drsig/dX = -rsig^3 * (X - mu) / n // dmu/dX = 1 / n template <typename T, StorageOrder kOrder> __global__ void GroupNormBackwardCUDAKernel( const int size, const int G, const int D, const int HxW, const T* dY, const T* X, const T* mu, const T* rsig, const T* gamma, const T* ds, const T* db, T* dX) { const int C = G * D; const T denom = T(1) / static_cast<T>(D * HxW); CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = kOrder == StorageOrder::NCHW ? i / (D * HxW) : i / (C * HxW) * G + (i / D % G); const int i_gamma = kOrder == StorageOrder::NCHW ? (i / HxW) % C : i % C; #if __CUDA_ARCH__ >= 350 const T u = (__ldg(db + i_mu) * __ldg(mu + i_mu) - __ldg(ds + i_mu)) * (__ldg(X + i) - __ldg(mu + i_mu)) * CubeCUDA(__ldg(rsig + i_mu)); const T v = __ldg(db + i_mu) * __ldg(rsig + i_mu); dX[i] = __ldg(gamma + i_gamma) * __ldg(dY + i) * __ldg(rsig + i_mu) + (u - v) * denom; #else const T u = (db[i_mu] * mu[i_mu] - ds[i_mu]) * (X[i] - mu[i_mu]) * CubeCUDA(rsig[i_mu]); const T v = db[i_mu] * rsig[i_mu]; dX[i] = gamma[i_gamma] * dY[i] * rsig[i_mu] + (u - v) * denom; #endif } } template <typename T, StorageOrder kOrder> __global__ void GammaBetaBackwardCUDAKernel( const int N, const int G, const int D, const int HxW, const T* dY, const T* X, const T* mu, const T* rsig, T* dgamma, T* dbeta) { const int outer_size = G * D; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage dg_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T dg_val = 0; T db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int n = j / HxW; const int index = kOrder == StorageOrder::NCHW ? (n * outer_size + i) * HxW + j % HxW : j * outer_size + i; const int i_mu = n * G + i / D; #if __CUDA_ARCH__ >= 350 dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(mu + i_mu)) * __ldg(rsig + i_mu); db_val += __ldg(dY + index); #else dg_val += dY[index] * (X[index] - mu[i_mu]) * rsig[i_mu]; db_val += dY[index]; #endif } dg_val = BlockReduce<T>(dg_storage).Reduce(dg_val, cub::Sum()); db_val = BlockReduce<T>(db_storage).Reduce(db_val, cub::Sum()); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } __syncthreads(); } } } // namespace template <> bool GroupNormOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int G, const int D, const int HxW, const float* X_data, const float* gamma_data, const float* beta_data, float* Y_data, float* mu_data, float* rsig_data) { const std::array<int, 4> dims = order_ == StorageOrder::NCHW ? std::array<int, 4>{N, G, D, HxW} : std::array<int, 4>{N, HxW, G, D}; const std::array<int, 2> axes = order_ == StorageOrder::NCHW ? std::array<int, 2>{2, 3} : std::array<int, 2>{1, 3}; // Computes mean and variance. math::Moments<float, CUDAContext>( 4, dims.data(), 2, axes.data(), X_data, mu_data, rsig_data, &context_); // Uses rsqrt to computes 1 / std which is much faster than computes std. InvStdCUDAKernel<<< CAFFE_GET_BLOCKS(N * G), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N * G, epsilon_, rsig_data, rsig_data); // Computes Y = gamma * (X - mu) * rsig + beta. const int size = N * G * D * HxW; if (order_ == StorageOrder::NCHW) { GroupNormForwardCUDAKernel<float, StorageOrder::NCHW> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, G, D, HxW, X_data, mu_data, rsig_data, gamma_data, beta_data, Y_data); } else { GroupNormForwardCUDAKernel<float, StorageOrder::NHWC> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, G, D, HxW, X_data, mu_data, rsig_data, gamma_data, beta_data, Y_data); } return true; } // Math: // let: s = gamma * rsig // let: b = beta - mu * gamma * rsig // then: Y = s * X + b template <> bool GroupNormGradientOp<float, CUDAContext>::RunOnDeviceImpl( const int N, const int G, const int D, const int HxW, const float* dY_data, const float* X_data, const float* mu_data, const float* rsig_data, const float* gamma_data, float* dX_data, float* dgamma_data, float* dbeta_data) { const int size = N * G * D * HxW; const int C = G * D; ds_.Resize(N, G); db_.Resize(N, G); float* ds_data = ds_.mutable_data<float>(); float* db_data = db_.mutable_data<float>(); if (order_ == StorageOrder::NCHW) { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) ComputeInternalGradientsCUDAKernel<float, StorageOrder::NCHW> <<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data); // Computes dL/dX. GroupNormBackwardCUDAKernel<float, StorageOrder::NCHW> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, G, D, HxW, dY_data, X_data, mu_data, rsig_data, gamma_data, ds_data, db_data, dX_data); // Computes dL/dgamma and dL/dbeta. GammaBetaBackwardCUDAKernel<float, StorageOrder::NCHW> <<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, D, HxW, dY_data, X_data, mu_data, rsig_data, dgamma_data, dbeta_data); } else { // Computes dL/ds and dL/db. // dL/ds = Sum(dL/dY * gamma * X) // dL/db = Sum(dL/dY * gamma) ComputeInternalGradientsCUDAKernel<float, StorageOrder::NHWC> <<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data); // Computes dL/dX. GroupNormBackwardCUDAKernel<float, StorageOrder::NHWC> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( size, G, D, HxW, dY_data, X_data, mu_data, rsig_data, gamma_data, ds_data, db_data, dX_data); // Computes dL/dgamma and dL/dbeta. GammaBetaBackwardCUDAKernel<float, StorageOrder::NHWC> <<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, G, D, HxW, dY_data, X_data, mu_data, rsig_data, dgamma_data, dbeta_data); } return true; } REGISTER_CUDA_OPERATOR(GroupNorm, GroupNormOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( GroupNormGradient, GroupNormGradientOp<float, CUDAContext>); } // namespace caffe2
c6b21ca99f86b81a26120816855a36a26265dcbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> inline __device__ double cube(double value) { return value * value * value; } __global__ void cu_cbrt(double* value) { value[threadIdx.x] = cube(value[threadIdx.x]); }
c6b21ca99f86b81a26120816855a36a26265dcbd.cu
#include <cmath> inline __device__ double cube(double value) { return value * value * value; } __global__ void cu_cbrt(double* value) { value[threadIdx.x] = cube(value[threadIdx.x]); }
dbdd68f199481eea6e35270ad53c75f2e52aff1b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(hipGetLastError()); */ static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } __global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = deviceDataIn[index]+index%256+ 3; } __global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = deviceDataIn[index]-index%256-3; } int fileSize() { int size; ifstream file ("original.data", ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } int EncryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential encryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]=data_in[i]+i%256+3; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int DecryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential decryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]=data_in[i]-i%256-3; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int EncryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(hipFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( encryptKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int DecryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(hipMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(hipMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(hipFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceDataIn, data_in, n*sizeof(char), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( decryptKernel), dim3(n/threadBlockSize+1), dim3(threadBlockSize), 0, 0, deviceDataIn, deviceDataOut); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(data_out, deviceDataOut, n * sizeof(char), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceDataIn)); checkCudaCall(hipFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int n; n = fileSize(); if (n == -1) { cout << "File not found! Exiting ... " << endl; exit(0); } char* data_in = new char[n]; char* data_out = new char[n]; readData("original.data", data_in); cout << "Encrypting a file of " << n << " characters." << endl; EncryptSeq(n, data_in, data_out); writeData(n, "sequential.data", data_out); EncryptCuda(n, data_in, data_out); writeData(n, "cuda.data", data_out); readData("cuda.data", data_in); cout << "Decrypting a file of " << n << "characters" << endl; DecryptSeq(n, data_in, data_out); writeData(n, "sequential_decrypted.data", data_out); DecryptCuda(n, data_in, data_out); writeData(n, "recovered.data", data_out); delete[] data_in; delete[] data_out; return 0; }
dbdd68f199481eea6e35270ad53c75f2e52aff1b.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } __global__ void encryptKernel(char* deviceDataIn, char* deviceDataOut) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = deviceDataIn[index]+index%256+ 3; } __global__ void decryptKernel(char* deviceDataIn, char* deviceDataOut) { unsigned index = blockIdx.x * blockDim.x + threadIdx.x; deviceDataOut[index] = deviceDataIn[index]-index%256-3; } int fileSize() { int size; ifstream file ("original.data", ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.close(); } else { cout << "Unable to open file"; size = -1; } return size; } int readData(char *fileName, char *data) { streampos size; ifstream file (fileName, ios::in|ios::binary|ios::ate); if (file.is_open()) { size = file.tellg(); file.seekg (0, ios::beg); file.read (data, size); file.close(); cout << "The entire file content is in memory." << endl; } else cout << "Unable to open file" << endl; return 0; } int writeData(int size, char *fileName, char *data) { ofstream file (fileName, ios::out|ios::binary|ios::trunc); if (file.is_open()) { file.write (data, size); file.close(); cout << "The entire file content was written to file." << endl; return 0; } else cout << "Unable to open file"; return -1; } int EncryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential encryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]=data_in[i]+i%256+3; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Encryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int DecryptSeq (int n, char* data_in, char* data_out) { int i; timer sequentialTime = timer("Sequential decryption"); sequentialTime.start(); for (i=0; i<n; i++) { data_out[i]=data_in[i]-i%256-3; } sequentialTime.stop(); cout << fixed << setprecision(6); cout << "Decryption (sequential): \t\t" << sequentialTime.getElapsed() << " seconds." << endl; return 0; } int EncryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(cudaFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); encryptKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceDataIn, deviceDataOut); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Encrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Encrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int DecryptCuda (int n, char* data_in, char* data_out) { int threadBlockSize = 512; // allocate the vectors on the GPU char* deviceDataIn = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataIn, n * sizeof(char))); if (deviceDataIn == NULL) { cout << "could not allocate memory!" << endl; return -1; } char* deviceDataOut = NULL; checkCudaCall(cudaMalloc((void **) &deviceDataOut, n * sizeof(char))); if (deviceDataOut == NULL) { checkCudaCall(cudaFree(deviceDataIn)); cout << "could not allocate memory!" << endl; return -1; } timer kernelTime1 = timer("kernelTime"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceDataIn, data_in, n*sizeof(char), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); decryptKernel<<<n/threadBlockSize+1, threadBlockSize>>>(deviceDataIn, deviceDataOut); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(data_out, deviceDataOut, n * sizeof(char), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceDataIn)); checkCudaCall(cudaFree(deviceDataOut)); cout << fixed << setprecision(6); cout << "Decrypt (kernel): \t\t" << kernelTime1.getElapsed() << " seconds." << endl; cout << "Decrypt (memory): \t\t" << memoryTime.getElapsed() << " seconds." << endl; return 0; } int main(int argc, char* argv[]) { int n; n = fileSize(); if (n == -1) { cout << "File not found! Exiting ... " << endl; exit(0); } char* data_in = new char[n]; char* data_out = new char[n]; readData("original.data", data_in); cout << "Encrypting a file of " << n << " characters." << endl; EncryptSeq(n, data_in, data_out); writeData(n, "sequential.data", data_out); EncryptCuda(n, data_in, data_out); writeData(n, "cuda.data", data_out); readData("cuda.data", data_in); cout << "Decrypting a file of " << n << "characters" << endl; DecryptSeq(n, data_in, data_out); writeData(n, "sequential_decrypted.data", data_out); DecryptCuda(n, data_in, data_out); writeData(n, "recovered.data", data_out); delete[] data_in; delete[] data_out; return 0; }
3b689465bfdfa879f87ded6ec40e97c9784c6beb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ float softplus_kernel(float x, float threshold = 20) { if (x > threshold) return x; // too large else if (x < -threshold) return expf(x); // too small return logf(expf(x) + 1); } __device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);} __global__ void mish_kernel(const float *input, float *output, int num_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; //float t = exp(input[idx]); //if (input[idx] > 20.0) { // t *= t; // output[idx] = (t - 1.0) / (t + 1.0); //} else { // float tt = t * t; // output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0); //} //output[idx] *= input[idx]; output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx])); }
3b689465bfdfa879f87ded6ec40e97c9784c6beb.cu
#include "includes.h" __device__ float softplus_kernel(float x, float threshold = 20) { if (x > threshold) return x; // too large else if (x < -threshold) return expf(x); // too small return logf(expf(x) + 1); } __device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);} __global__ void mish_kernel(const float *input, float *output, int num_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; //float t = exp(input[idx]); //if (input[idx] > 20.0) { // t *= t; // output[idx] = (t - 1.0) / (t + 1.0); //} else { // float tt = t * t; // output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0); //} //output[idx] *= input[idx]; output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx])); }
803ca56d91589594390133054bfe27f67767a375.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void print1D(int* input, dim3 size) { int axis_x = blockIdx.x * blockDim.x + threadIdx.x; // printf("%d, %d\n", axis_x, size.x); int gid = axis_x; // input[gid] = gid; if (axis_x < size.x) { printf( "size(%d), blockDim(%d), blockIdx(%d), threadIdx(%d), input(%d), " "gid(%d)\n", size.x, blockDim.x, blockIdx.x, threadIdx.x, input[gid], gid); } } __global__ void print2D(int* input, dim3 size) { int axis_x = blockIdx.x * blockDim.x + threadIdx.x; // col int axis_y = blockIdx.y * blockDim.y + threadIdx.y; // row // printf("%d, %d, %d, %d\n", axis_x, axis_y, size.x, size.y); int gid = axis_y * size.x + axis_x; // input[gid] = gid; if (axis_x < size.x && axis_y < size.y) { printf( "size(%d,%d), blockDim(%d,%d), blockIdx(%d,%d), threadIdx(%d,%d), " "input(%d), gid(%d)\n", size.x, size.y, blockDim.x, blockDim.y, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, input[gid], gid); } } __global__ void print3D(int* input, dim3 size) { int axis_x = blockIdx.x * blockDim.x + threadIdx.x; int axis_y = blockIdx.y * blockDim.y + threadIdx.y; int axis_z = blockIdx.z * blockDim.z + threadIdx.z; // printf("%d, %d, %d, %d, %d, %d\n", axis_x, axis_y, axis_z, size.x, size.y, // size.z); int gid = axis_z * size.x * size.y + axis_y * size.x + axis_x; // input[gid] = gid; if (axis_x < size.x && axis_y < size.y && axis_z < size.z) { printf( "size(%d,%d,%d), blockDim(%d,%d,%d), blockIdx(%d,%d,%d), " "threadIdx(%d,%d,%d), input(%d), gid(%d)\n", size.x, size.y, size.z, blockDim.x, blockDim.y, blockDim.z, blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, input[gid], gid); } } void initInput(int* input, int size) { for (int index = 0; index < size; index++) { input[index] = index; } } int main(void) { dim3 size(2, 4, 8); dim3 block_dim(0); dim3 grid_dim(0); int* h_input = NULL; int* d_input = NULL; //// 1D printf("\nprint 1D:\n"); h_input = (int*)calloc(size.x, sizeof(int)); initInput(h_input, size.x); hipMalloc((void**)&d_input, size.x * sizeof(int)); hipMemcpy(d_input, h_input, size.x * sizeof(int), hipMemcpyHostToDevice); block_dim.x = 2; grid_dim.x = size.x / block_dim.x + 1; hipLaunchKernelGGL(( print1D), dim3(grid_dim), dim3(block_dim), 0, 0, d_input, size); hipDeviceSynchronize(); hipMemcpy(h_input, d_input, size.x * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_input); free(h_input); //// 2D printf("\nprint 2D:\n"); h_input = (int*)calloc(size.x * size.y, sizeof(int)); initInput(h_input, size.x * size.y); hipMalloc((void**)&d_input, size.x * size.y * sizeof(int)); hipMemcpy(d_input, h_input, size.x * size.y * sizeof(int), hipMemcpyHostToDevice); block_dim.y = 4; grid_dim.y = size.y / block_dim.y + 1; hipLaunchKernelGGL(( print2D), dim3(grid_dim), dim3(block_dim), 0, 0, d_input, size); hipDeviceSynchronize(); hipMemcpy(h_input, d_input, size.x * size.y * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_input); free(h_input); //// 3D printf("\nprint 3D:\n"); h_input = (int*)calloc(size.x * size.y * size.z, sizeof(int)); initInput(h_input, size.x * size.y * size.z); hipMalloc((void**)&d_input, size.x * size.y * size.z * sizeof(int)); hipMemcpy(d_input, h_input, size.x * size.y * size.z * sizeof(int), hipMemcpyHostToDevice); block_dim.z = 8; grid_dim.z = size.z / block_dim.z + 1; hipLaunchKernelGGL(( print3D), dim3(grid_dim), dim3(block_dim), 0, 0, d_input, size); hipDeviceSynchronize(); hipMemcpy(h_input, d_input, size.x * size.y * size.z * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_input); free(h_input); //// reset hipDeviceReset(); }
803ca56d91589594390133054bfe27f67767a375.cu
#include <stdio.h> __global__ void print1D(int* input, dim3 size) { int axis_x = blockIdx.x * blockDim.x + threadIdx.x; // printf("%d, %d\n", axis_x, size.x); int gid = axis_x; // input[gid] = gid; if (axis_x < size.x) { printf( "size(%d), blockDim(%d), blockIdx(%d), threadIdx(%d), input(%d), " "gid(%d)\n", size.x, blockDim.x, blockIdx.x, threadIdx.x, input[gid], gid); } } __global__ void print2D(int* input, dim3 size) { int axis_x = blockIdx.x * blockDim.x + threadIdx.x; // col int axis_y = blockIdx.y * blockDim.y + threadIdx.y; // row // printf("%d, %d, %d, %d\n", axis_x, axis_y, size.x, size.y); int gid = axis_y * size.x + axis_x; // input[gid] = gid; if (axis_x < size.x && axis_y < size.y) { printf( "size(%d,%d), blockDim(%d,%d), blockIdx(%d,%d), threadIdx(%d,%d), " "input(%d), gid(%d)\n", size.x, size.y, blockDim.x, blockDim.y, blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, input[gid], gid); } } __global__ void print3D(int* input, dim3 size) { int axis_x = blockIdx.x * blockDim.x + threadIdx.x; int axis_y = blockIdx.y * blockDim.y + threadIdx.y; int axis_z = blockIdx.z * blockDim.z + threadIdx.z; // printf("%d, %d, %d, %d, %d, %d\n", axis_x, axis_y, axis_z, size.x, size.y, // size.z); int gid = axis_z * size.x * size.y + axis_y * size.x + axis_x; // input[gid] = gid; if (axis_x < size.x && axis_y < size.y && axis_z < size.z) { printf( "size(%d,%d,%d), blockDim(%d,%d,%d), blockIdx(%d,%d,%d), " "threadIdx(%d,%d,%d), input(%d), gid(%d)\n", size.x, size.y, size.z, blockDim.x, blockDim.y, blockDim.z, blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, input[gid], gid); } } void initInput(int* input, int size) { for (int index = 0; index < size; index++) { input[index] = index; } } int main(void) { dim3 size(2, 4, 8); dim3 block_dim(0); dim3 grid_dim(0); int* h_input = NULL; int* d_input = NULL; //// 1D printf("\nprint 1D:\n"); h_input = (int*)calloc(size.x, sizeof(int)); initInput(h_input, size.x); cudaMalloc((void**)&d_input, size.x * sizeof(int)); cudaMemcpy(d_input, h_input, size.x * sizeof(int), cudaMemcpyHostToDevice); block_dim.x = 2; grid_dim.x = size.x / block_dim.x + 1; print1D<<<grid_dim, block_dim>>>(d_input, size); cudaDeviceSynchronize(); cudaMemcpy(h_input, d_input, size.x * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_input); free(h_input); //// 2D printf("\nprint 2D:\n"); h_input = (int*)calloc(size.x * size.y, sizeof(int)); initInput(h_input, size.x * size.y); cudaMalloc((void**)&d_input, size.x * size.y * sizeof(int)); cudaMemcpy(d_input, h_input, size.x * size.y * sizeof(int), cudaMemcpyHostToDevice); block_dim.y = 4; grid_dim.y = size.y / block_dim.y + 1; print2D<<<grid_dim, block_dim>>>(d_input, size); cudaDeviceSynchronize(); cudaMemcpy(h_input, d_input, size.x * size.y * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_input); free(h_input); //// 3D printf("\nprint 3D:\n"); h_input = (int*)calloc(size.x * size.y * size.z, sizeof(int)); initInput(h_input, size.x * size.y * size.z); cudaMalloc((void**)&d_input, size.x * size.y * size.z * sizeof(int)); cudaMemcpy(d_input, h_input, size.x * size.y * size.z * sizeof(int), cudaMemcpyHostToDevice); block_dim.z = 8; grid_dim.z = size.z / block_dim.z + 1; print3D<<<grid_dim, block_dim>>>(d_input, size); cudaDeviceSynchronize(); cudaMemcpy(h_input, d_input, size.x * size.y * size.z * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_input); free(h_input); //// reset cudaDeviceReset(); }
c82d2eae2a4169341a24fc088dafd15bc6000f19.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "solve.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; hipMalloc(&mat, XSIZE*YSIZE); float *b = NULL; hipMalloc(&b, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int rows = XSIZE; int cols = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( solve), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,b,x,rows,cols); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( solve), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,b,x,rows,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( solve), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,b,x,rows,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c82d2eae2a4169341a24fc088dafd15bc6000f19.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "solve.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; cudaMalloc(&mat, XSIZE*YSIZE); float *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int rows = XSIZE; int cols = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); solve<<<gridBlock,threadBlock>>>(mat,b,x,rows,cols); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { solve<<<gridBlock,threadBlock>>>(mat,b,x,rows,cols); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { solve<<<gridBlock,threadBlock>>>(mat,b,x,rows,cols); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cbb29ef1583566d0776fcc1208a1f7da1fb8a304.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/core/Tensor.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/Resize.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPApplyUtils.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/bincount_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/histc_native.h> #include <ATen/ops/zeros_native.h> #endif namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; namespace { template <typename input_t, typename IndexType> __device__ static IndexType getBin( input_t bVal, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, int64_t nbins) { IndexType bin = (int)(((bVal - minvalue)) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, // i.e. [start, end) the last bin is inclusive at both, i.e. [start, end], in // order to include maxvalue if exists therefore when bin == nbins, adjust bin // to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); gpuAtomicAddNoReturn(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); gpuAtomicAddNoReturn(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `p` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType pIdx = p.strides[0] * blockIdx.x + bin; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); gpuAtomicAddNoReturn(&p.data[pOffset], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); gpuAtomicAddNoReturn(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); gpuAtomicAddNoReturn(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ hipLaunchKernelGGL(( kernelHistogram1D< \ output_t, \ input_t, \ IndexType, \ 1, \ 2, \ -1, \ MEMORY_TYPE>), dim3(grid), dim3(block), SHARED_MEM, getCurrentHIPStreamMasqueradingAsCUDA(), \ aInfo, \ pInfo, \ bInfo, \ nbins, \ minvalue, \ maxvalue, \ totalElements, \ WEIGHTS_OP); \ C10_HIP_KERNEL_LAUNCH_CHECK(); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } inline int64_t getFreeGlobalMemory() { // no need to use `hipSetDevice` size_t free_mem, total_mem; hipMemGetInfo(&free_mem, &total_mem); TORCH_INTERNAL_ASSERT( hipGetLastError() == hipSuccess, "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); if (totalElements == 0) { return false; } const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros( {grid.x, nbins}, optTypeMetaToScalarType(a.options().dtype_opt()), a.options().layout_opt(), a.options().device_opt(), a.options().pinned_memory_opt()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros( {minlength}, kLong, c10::nullopt /* layout */, kCUDA, c10::nullopt /* pin_memory */); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data_ptr<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } const int64_t nbins = ::max(self.max().item<input_t>() + (int64_t)1, minlength); // we are using acc_type for the bounds, in particular int64_t for integers // in order to avoid overflows (e.g. using 256 bins for dtype uint8) using bounds_t = at::acc_type<input_t, /*is_cuda=*/true>; const bounds_t minvalue = 0; const bounds_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros( {nbins}, optTypeMetaToScalarType(weights.options().dtype_opt()), weights.options().layout_opt(), weights.options().device_opt(), weights.options().pinned_memory_opt()); cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = native::zeros( {nbins}, kLong, c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> min, at::acc_type<input_t, /*is_cuda=*/true> max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = native::zeros( {nbins}, self.scalar_type(), c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); input_t minvalue = min; input_t maxvalue = max; if (min == max && self.numel() > 0) { minvalue = *self.min().cpu().data_ptr<input_t>(); maxvalue = *self.max().cpu().data_ptr<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } #if !defined(USE_ROCM) TORCH_CHECK( !(at::_isinf(minvalue) || at::_isinf(maxvalue) || at::_isnan(minvalue) || at::_isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #else TORCH_CHECK( !(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) || std::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #endif TORCH_CHECK(minvalue < maxvalue, "max must be larger than min"); cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const c10::optional<Tensor>& weights_opt, int64_t minlength) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt); const Tensor& weights = *weights_maybe_owned; // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_bincount_cuda"); return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.to(kDouble), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, const Scalar& min, const Scalar& max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_histc_cuda"); return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { using bounds_t = at::acc_type<scalar_t, /*is_cuda=*/true>; return _histc_cuda_template<scalar_t>( self, nbins, min.to<bounds_t>(), max.to<bounds_t>()); }); } Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) { auto ret = _histc_cuda(self, bins, min, max); resize_output(result, ret.sizes()); result.copy_(ret); return result; } } // namespace native } // namespace at
cbb29ef1583566d0776fcc1208a1f7da1fb8a304.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/core/Tensor.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/Resize.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAApplyUtils.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/bincount_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/histc_native.h> #include <ATen/ops/zeros_native.h> #endif namespace at { namespace cuda { #define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 #define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 #define FOR_KERNEL_LOOP(i, lim) \ for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ i += gridDim.x * blockDim.x) /* Memory types used for the 3 histogram implementations. See `CUDA_tensor_histogram` below. */ enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; namespace { template <typename input_t, typename IndexType> __device__ static IndexType getBin( input_t bVal, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, int64_t nbins) { IndexType bin = (int)(((bVal - minvalue)) * nbins / (maxvalue - minvalue)); // (only applicable for histc) // while each bin is inclusive at the lower end and exclusive at the higher, // i.e. [start, end) the last bin is inclusive at both, i.e. [start, end], in // order to include maxvalue if exists therefore when bin == nbins, adjust bin // to the last bin if (bin == nbins) bin -= 1; return bin; } } /* Kernel for computing the histogram of the input. */ template < typename output_t, typename input_t, typename IndexType, int ADims, int PDims, int BDims, CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, typename Op> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void kernelHistogram1D( detail::TensorInfo<output_t, IndexType> a, /* output */ detail::TensorInfo<output_t, IndexType> p, /* partial output */ detail::TensorInfo<input_t, IndexType> b, /* input */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, IndexType totalElements, Op getOp) { extern __shared__ unsigned char my_smem[]; output_t* smem = nullptr; if (MemoryType == CUDAHistogramMemoryType::SHARED) { ////////////////////////// Shared memory ////////////////////////// // atomically add to block specific shared memory // then atomically add to the global output tensor smem = reinterpret_cast<output_t*>(my_smem); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { smem[i] = 0; } __syncthreads(); FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `smem` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); gpuAtomicAddNoReturn(&smem[bin], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); gpuAtomicAddNoReturn(&a.data[aOffset], smem[i]); } } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { ////////////////////////// Multi Block memory ////////////////////////// // atomically add to block specific global tensor // then atomically add to the global output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `p` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType pIdx = p.strides[0] * blockIdx.x + bin; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); gpuAtomicAddNoReturn(&p.data[pOffset], getOp(linearIndex)); } } __syncthreads(); // NOTE: atomically update output bin count. // Atomic update is imp since __syncthread() will only synchronize threads // in a given block, not across blocks. const IndexType pIdx = p.strides[0] * blockIdx.x; const IndexType pOffset = detail::IndexToOffset<output_t, IndexType, PDims>::get(pIdx, p); for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(i, a); gpuAtomicAddNoReturn(&a.data[aOffset], p.data[pOffset + i]); } } else { ////////////////////////// Global memory ////////////////////////// // atomically add to the output tensor // compute histogram for the block FOR_KERNEL_LOOP(linearIndex, totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = detail::IndexToOffset<input_t, IndexType, BDims>::get(linearIndex, b); const auto bVal = b.data[bOffset]; if (bVal >= minvalue && bVal <= maxvalue) { // Use value at `b` as an offset of `a` const IndexType bin = getBin<input_t, IndexType>(bVal, minvalue, maxvalue, nbins); const IndexType aOffset = detail::IndexToOffset<output_t, IndexType, ADims>::get(bin, a); gpuAtomicAddNoReturn(&a.data[aOffset], getOp(linearIndex)); } } } } #define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ kernelHistogram1D< \ output_t, \ input_t, \ IndexType, \ 1, \ 2, \ -1, \ MEMORY_TYPE><<<grid, block, SHARED_MEM, getCurrentCUDAStream()>>>( \ aInfo, \ pInfo, \ bInfo, \ nbins, \ minvalue, \ maxvalue, \ totalElements, \ WEIGHTS_OP); \ C10_CUDA_KERNEL_LAUNCH_CHECK(); #define HANDLE_SWITCH_CASE(mType, getOp) \ switch (mType) { \ case CUDAHistogramMemoryType::SHARED: \ HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ break; \ case CUDAHistogramMemoryType::MULTI_BLOCK: \ HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \ break; \ default: \ HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ } inline int64_t getFreeGlobalMemory() { // no need to use `cudaSetDevice` size_t free_mem, total_mem; cudaMemGetInfo(&free_mem, &total_mem); TORCH_INTERNAL_ASSERT( cudaGetLastError() == cudaSuccess, "CUDA_tensor_histogram failed to get free global memory"); return static_cast<int64_t>(free_mem); } /* Calculate the frequency of the input values. `a` contains the final output or the histogram. Input `b` is assumed to be 1-D non-negative int array. `c` optionally contains the weight vector. See `help torch.bincount` for details on the math. 3 implementations based of input size and memory usage: case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem SHARED: Each block atomically adds to it's own **shared** hist copy, then atomically updates the global tensor. case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem MULTI_BLOCK: Each block atomically adds to it's own **global** hist copy, then atomically updates the global tensor. case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins GLOBAL: all threads atomically update to a single **global** hist copy. */ template <typename output_t, typename input_t, bool HasWeights> bool CUDA_tensor_histogram( at::Tensor a, /* output */ at::Tensor b, /* input */ at::Tensor c, /* weights(optional) */ int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> minvalue, at::acc_type<input_t, /*is_cuda=*/true> maxvalue, TensorArgType aType = TensorArgType::ReadWrite, TensorArgType bType = TensorArgType::ReadOnly, TensorArgType cType = TensorArgType::ReadOnly) { checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); if (HasWeights) { checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); } auto totalElements = b.numel(); if (totalElements == 0) { return false; } const dim3 block = getApplyBlock(); dim3 grid; int64_t curDevice = current_device(); if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { return false; } CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes auto maxGlobalMem = getFreeGlobalMemory(); auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes // determine memory type to use in the kernel if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && sharedMem < maxSharedMem) { memType = CUDAHistogramMemoryType::SHARED; } else if ( nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && multiBlockMem < (maxGlobalMem / 2)) { // check against half of free mem to be extra safe // due to cached allocator, we may anyway have slightly more free mem memType = CUDAHistogramMemoryType::MULTI_BLOCK; } // alloc memory for MULTI_BLOCK using IndexType = int64_t; auto aInfo = detail::getTensorInfo<output_t, IndexType>(a); auto bInfo = detail::getTensorInfo<input_t, IndexType>(b); detail::TensorInfo<output_t, IndexType> pInfo(nullptr, 0, {}, {}); Tensor partial_output; if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { partial_output = native::zeros( {grid.x, nbins}, optTypeMetaToScalarType(a.options().dtype_opt()), a.options().layout_opt(), a.options().device_opt(), a.options().pinned_memory_opt()); pInfo = detail::getTensorInfo<output_t, IndexType>(partial_output); } if (HasWeights) { auto cInfo = detail::getTensorInfo<output_t, IndexType>(c); const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { const IndexType cOffset = detail::IndexToOffset<output_t, IndexType, 1>::get(cIndex, cInfo); return cInfo.data[cOffset]; }; HANDLE_SWITCH_CASE(memType, getWeightsOp) } else { static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; HANDLE_SWITCH_CASE(memType, getDummyOp) } return true; } #undef HANDLE_CASE #undef HANDLE_SWITCH_CASE #undef FOR_KERNEL_LOOP #undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM #undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM } // namespace cuda namespace { ///////////////// bincount ///////////////// template <typename input_t, typename weights_t> Tensor _bincount_cuda_template( const Tensor& self, const Tensor& weights, int64_t minlength) { if (minlength < 0) { AT_ERROR("minlength should be >= 0"); } if (self.dim() == 1 && self.numel() == 0) { return native::zeros( {minlength}, kLong, c10::nullopt /* layout */, kCUDA, c10::nullopt /* pin_memory */); } if (self.dim() != 1 || (!std::is_same<input_t, uint8_t>::value && *self.min().cpu().data_ptr<input_t>() < 0)) { AT_ERROR("bincount only supports 1-d non-negative integral inputs."); } bool has_weights = weights.defined(); if (has_weights && weights.size(0) != self.size(0)) { AT_ERROR("input and weights should have the same length"); } const int64_t nbins = std::max(self.max().item<input_t>() + (int64_t)1, minlength); // we are using acc_type for the bounds, in particular int64_t for integers // in order to avoid overflows (e.g. using 256 bins for dtype uint8) using bounds_t = at::acc_type<input_t, /*is_cuda=*/true>; const bounds_t minvalue = 0; const bounds_t maxvalue = nbins; // alloc output counter on GPU Tensor output; if (has_weights) { output = native::zeros( {nbins}, optTypeMetaToScalarType(weights.options().dtype_opt()), weights.options().layout_opt(), weights.options().device_opt(), weights.options().pinned_memory_opt()); cuda::CUDA_tensor_histogram<weights_t, input_t, true>( output, self, weights, nbins, minvalue, maxvalue); } else { output = native::zeros( {nbins}, kLong, c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); cuda::CUDA_tensor_histogram<int64_t, input_t, false>( output, self, weights, nbins, minvalue, maxvalue); } return output; } ///////////////// histc ///////////////// template <typename input_t> Tensor _histc_cuda_template( const Tensor& self, int64_t nbins, at::acc_type<input_t, /*is_cuda=*/true> min, at::acc_type<input_t, /*is_cuda=*/true> max) { if (nbins <= 0) { AT_ERROR("bins must be > 0"); } Tensor output = native::zeros( {nbins}, self.scalar_type(), c10::nullopt /* layout */, DeviceType::CUDA, c10::nullopt /* pin_memory */); input_t minvalue = min; input_t maxvalue = max; if (min == max && self.numel() > 0) { minvalue = *self.min().cpu().data_ptr<input_t>(); maxvalue = *self.max().cpu().data_ptr<input_t>(); } if (minvalue == maxvalue) { minvalue = minvalue - 1; maxvalue = maxvalue + 1; } #if !defined(USE_ROCM) TORCH_CHECK( !(at::_isinf(minvalue) || at::_isinf(maxvalue) || at::_isnan(minvalue) || at::_isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #else TORCH_CHECK( !(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) || std::isnan(maxvalue)), "range of [", minvalue, ", ", maxvalue, "] is not finite"); #endif TORCH_CHECK(minvalue < maxvalue, "max must be larger than min"); cuda::CUDA_tensor_histogram<input_t, input_t, false>( output, self, Tensor(), nbins, minvalue, maxvalue); return output; } } // namespace namespace native { Tensor _bincount_cuda( const Tensor& self, const c10::optional<Tensor>& weights_opt, int64_t minlength) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> weights_maybe_owned = at::borrow_from_optional_tensor(weights_opt); const Tensor& weights = *weights_maybe_owned; // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_bincount_cuda"); return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { const auto scalar = weights.scalar_type(); if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) return _bincount_cuda_template<scalar_t, float>(self, weights, minlength); return _bincount_cuda_template<scalar_t, double>( self, weights.to(kDouble), minlength); }); } Tensor _histc_cuda( const Tensor& self, int64_t nbins, const Scalar& min, const Scalar& max) { if (self.scalar_type() == ScalarType::Half) { AT_ERROR("HalfTensor is not supported"); } // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("_histc_cuda"); return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { using bounds_t = at::acc_type<scalar_t, /*is_cuda=*/true>; return _histc_cuda_template<scalar_t>( self, nbins, min.to<bounds_t>(), max.to<bounds_t>()); }); } Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) { auto ret = _histc_cuda(self, bins, min, max); resize_output(result, ret.sizes()); result.copy_(ret); return result; } } // namespace native } // namespace at
218d08db9cd9e607f7830cbe6b08079138e61104.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a hipDeviceSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.hip" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "Printf_cuda.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_60_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if(!printfBufferPtr) return NULL; // Thread/block restriction check if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) return NULL; if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) return NULL; // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if(thread_buf_len < (CUPRINTF_MAX_LEN * 2)) return NULL; // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if(hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if(offset >= hdr.thread_buf_len) offset = CUPRINTF_MAX_LEN; // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if(ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if(!dest || !src || (dest >= end)) return NULL; // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while(n--) { if(dest >= end) // Overflow check break; len++; *dest++ = *src; if(*src++ == '\0') break; } // Now write out the padding bytes, and we have our length. while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if(!ptr || !arg) return NULL; // strncpy does all our work. We just terminate. if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) *ptr = 0; return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) return NULL; // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) restrictRules.threadid = threadid; int block_count = gridDim.x * gridDim.y; if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) restrictRules.blockid = blockid; } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while(p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if(*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if(arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch(specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if(arglen == 4) // Float vs. Double thing fprintf(printf_fp, format, *((float *)data)); else fprintf(printf_fp, format, *((double *)data)); break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, "%s", format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while(bufptr != endptr) { // Wrap ourselves at the end-of-buffer if(bufptr == bufend) bufptr = bufstart; // Adjust our start pointer to within the circular buffer and copy a block. hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if(headings) fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); if(hdr->fmtoffset == 0) fprintf(printf_fp, "printf buffer overflow\n"); else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) break; printf_count++; // Clear if asked if(clear) hipMemset(bufptr, 0, CUPRINTF_MAX_LEN); // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" hipError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if((bufferLen % CUPRINTF_MAX_LEN) > 0) bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess) return hipErrorInitializationError; hipMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return hipSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if(!printfbuf_start || !printfbuf_device) return; hipFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if(!printfbuf_start || !printfbuf_device || !printf_fp) return hipErrorMissingConfiguration; // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if(magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while(blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost); // We get our block-size-step from the very first header if(hdr.thread_buf_len != 0) blocklen = hdr.thread_buf_len; // No magic number means no printfs from this thread if(hdr.magic != CUPRINTF_SM10_MAGIC) { if(blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if(hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if(magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if(sync_printfs) hipMemset(printfbuf_device, 0, printfbuf_len); return hipSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
218d08db9cd9e607f7830cbe6b08079138e61104.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a cudaThreadSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.cu" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "Printf_cuda.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_60_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if(!printfBufferPtr) return NULL; // Thread/block restriction check if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) return NULL; if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) return NULL; // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if(thread_buf_len < (CUPRINTF_MAX_LEN * 2)) return NULL; // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if(hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if(offset >= hdr.thread_buf_len) offset = CUPRINTF_MAX_LEN; // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if(ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if(!dest || !src || (dest >= end)) return NULL; // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while(n--) { if(dest >= end) // Overflow check break; len++; *dest++ = *src; if(*src++ == '\0') break; } // Now write out the padding bytes, and we have our length. while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if(!ptr || !arg) return NULL; // strncpy does all our work. We just terminate. if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) *ptr = 0; return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) return NULL; // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) restrictRules.threadid = threadid; int block_count = gridDim.x * gridDim.y; if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) restrictRules.blockid = blockid; } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while(p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if(*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if(arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch(specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if(arglen == 4) // Float vs. Double thing fprintf(printf_fp, format, *((float *)data)); else fprintf(printf_fp, format, *((double *)data)); break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, "%s", format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while(bufptr != endptr) { // Wrap ourselves at the end-of-buffer if(bufptr == bufend) bufptr = bufstart; // Adjust our start pointer to within the circular buffer and copy a block. cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if(headings) fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); if(hdr->fmtoffset == 0) fprintf(printf_fp, "printf buffer overflow\n"); else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) break; printf_count++; // Clear if asked if(clear) cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN); // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" cudaError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if((bufferLen % CUPRINTF_MAX_LEN) > 0) bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess) return cudaErrorInitializationError; cudaMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return cudaSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if(!printfbuf_start || !printfbuf_device) return; cudaFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if(!printfbuf_start || !printfbuf_device || !printf_fp) return cudaErrorMissingConfiguration; // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if(magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while(blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost); // We get our block-size-step from the very first header if(hdr.thread_buf_len != 0) blocklen = hdr.thread_buf_len; // No magic number means no printfs from this thread if(hdr.magic != CUPRINTF_SM10_MAGIC) { if(blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if(hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if(magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if(sync_printfs) cudaMemset(printfbuf_device, 0, printfbuf_len); return cudaSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
480e68cf484a9720faac4693e6f2a4341589c5fc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> inline void checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { printf("Error: %s : %d", __FILE__, __LINE__); printf("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result)); exit(1); } #endif } /* * simpleDivergence demonstrates divergent code on the GPU and its impact on * performance and CUDA metrics. */ __global__ void mathKernel1(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; if (tid % 2 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } __global__ void mathKernel2(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; if ((tid / warpSize) % 2 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } __global__ void mathKernel3(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; bool ipred = (tid % 2 == 0); if (ipred) { ia = 100.0f; } if (!ipred) { ib = 200.0f; } c[tid] = ia + ib; } __global__ void mathKernel4(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; int itid = tid >> 5; if (itid & 0x01 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } __global__ void warmingup(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; if ((tid / warpSize) % 2 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; checkCuda(hipGetDeviceProperties(&deviceProp, dev)); printf("%s using Device %d: %s\n", argv[0], dev, deviceProp.name); // set up data size int size = 64; int blocksize = 64; if (argc > 1) blocksize = atoi(argv[1]); if (argc > 2) size = atoi(argv[2]); printf("Data size %d ", size); // set up execution configuration dim3 block(blocksize, 1); dim3 grid((size + block.x - 1) / block.x, 1); printf("Execution Configure (block %d grid %d)\n", block.x, grid.x); // allocate gpu memory float *d_C; size_t nBytes = size * sizeof(float); checkCuda(hipMalloc((float**)&d_C, nBytes)); // run a warmup kernel to remove overhead checkCuda(hipDeviceSynchronize()); warmingup << <grid, block >> >(d_C); checkCuda(hipDeviceSynchronize()); // run kernel 1 mathKernel1 << <grid, block >> >(d_C); checkCuda(hipDeviceSynchronize()); // run kernel 2 mathKernel2 << <grid, block >> >(d_C); checkCuda(hipDeviceSynchronize()); // run kernel 3 mathKernel3 << <grid, block >> >(d_C); checkCuda(hipDeviceSynchronize()); // run kernel 4 mathKernel4 << <grid, block >> >(d_C); checkCuda(hipDeviceSynchronize()); // free gpu memory and reset divece checkCuda(hipFree(d_C)); checkCuda(hipDeviceReset()); return 0; }
480e68cf484a9720faac4693e6f2a4341589c5fc.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> inline void checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { printf("Error: %s : %d", __FILE__, __LINE__); printf("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result)); exit(1); } #endif } /* * simpleDivergence demonstrates divergent code on the GPU and its impact on * performance and CUDA metrics. */ __global__ void mathKernel1(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; if (tid % 2 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } __global__ void mathKernel2(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; if ((tid / warpSize) % 2 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } __global__ void mathKernel3(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; bool ipred = (tid % 2 == 0); if (ipred) { ia = 100.0f; } if (!ipred) { ib = 200.0f; } c[tid] = ia + ib; } __global__ void mathKernel4(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; int itid = tid >> 5; if (itid & 0x01 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } __global__ void warmingup(float *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; float ia, ib; ia = ib = 0.0f; if ((tid / warpSize) % 2 == 0) { ia = 100.0f; } else { ib = 200.0f; } c[tid] = ia + ib; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; checkCuda(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s using Device %d: %s\n", argv[0], dev, deviceProp.name); // set up data size int size = 64; int blocksize = 64; if (argc > 1) blocksize = atoi(argv[1]); if (argc > 2) size = atoi(argv[2]); printf("Data size %d ", size); // set up execution configuration dim3 block(blocksize, 1); dim3 grid((size + block.x - 1) / block.x, 1); printf("Execution Configure (block %d grid %d)\n", block.x, grid.x); // allocate gpu memory float *d_C; size_t nBytes = size * sizeof(float); checkCuda(cudaMalloc((float**)&d_C, nBytes)); // run a warmup kernel to remove overhead checkCuda(cudaDeviceSynchronize()); warmingup << <grid, block >> >(d_C); checkCuda(cudaDeviceSynchronize()); // run kernel 1 mathKernel1 << <grid, block >> >(d_C); checkCuda(cudaDeviceSynchronize()); // run kernel 2 mathKernel2 << <grid, block >> >(d_C); checkCuda(cudaDeviceSynchronize()); // run kernel 3 mathKernel3 << <grid, block >> >(d_C); checkCuda(cudaDeviceSynchronize()); // run kernel 4 mathKernel4 << <grid, block >> >(d_C); checkCuda(cudaDeviceSynchronize()); // free gpu memory and reset divece checkCuda(cudaFree(d_C)); checkCuda(cudaDeviceReset()); return 0; }
496bf0b89739161bc84d5dd258b1c90b7522d03d.hip
// !!! This is a file automatically generated by hipify!!! #include "calculateLoss.h" double calculate_loss(hipblasHandle_t handle, int* indptr, int* indices, double* data, double* X, double* Y, double reg, int users, int items, int factors, int nnz) { int loss = 0; int total_confidence = 0; int item_norm = 0; int user_norm = 0; hipblasStatus_t stat; hipError_t err; // malloc this double* YtY; err = hipMallocManaged(&YtY, factors*factors*sizeof(double)); if (err != hipSuccess) { printf("%s\n", hipGetErrorString(err)); hipFree(YtY); return -1; } const double alpha = 1; const double beta = 0; // do transpose stat = hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, factors, factors, items, &alpha, Y, items, Y, items, &beta, YtY, factors); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("dgemm failed\n"); hipFree(YtY); return -1; } for (int u = 0; u < users; ++u) { if (u % 10000 == 0) { printf("loss iter %d\n", u); } hipDeviceSynchronize(); double temp = 1.0; double* r; err = hipMallocManaged(&r, items*sizeof(double)); if (err != hipSuccess) { printf("%s\n", hipGetErrorString(err)); hipFree(YtY); hipFree(r); return -1; } double* Xu = &X[u*factors]; stat = hipblasDgemv(handle, HIPBLAS_OP_N, items, factors, &alpha, Y, items, Xu, 1, &beta, r, 1); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("dgemv failed\n"); hipFree(YtY); hipFree(r); return -1; } hipDeviceSynchronize(); int rowStart = indptr[u]; int rowEnd = indptr[u+1]; int cols[rowEnd-rowStart]; memcpy(cols, &indices[rowStart], (rowEnd-rowStart)*sizeof(int)); //int* cols = Cui[1][rowStart:rowEnd]; double vals[rowEnd-rowStart]; memcpy(vals, &data[rowStart], (rowEnd-rowStart)*sizeof(double)); //double* vals = Cui[2][rowStart:rowEnd]; for (int index = 0; index < rowEnd-rowStart; ++index) { int i = cols[index]; double confidence = vals[index]; double* Yi = &Y[i*factors]; hipDeviceSynchronize(); double d; stat = hipblasDdot(handle, factors, Yi, 1, Xu, 1, &d); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("ddot 1 failed\n"); hipFree(YtY); hipFree(r); return -1; } temp = (confidence - 1)*d - (2*confidence); hipDeviceSynchronize(); stat = hipblasDaxpy(handle, factors, &temp, Yi, 1, r, 1); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("daxpy failed\n"); hipFree(YtY); hipFree(r); return -1; } total_confidence += confidence; loss += confidence; } double other_temp; hipDeviceSynchronize(); stat = hipblasDdot(handle, factors, r, 1, Xu, 1, &other_temp); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("ddot 2 failed\n"); hipFree(YtY); hipFree(r); return -1; } loss += other_temp; hipDeviceSynchronize(); stat = hipblasDdot(handle, factors, Xu, 1, Xu, 1, &other_temp); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("ddot 3 failed\n"); hipFree(YtY); hipFree(r); return -1; } user_norm += other_temp; hipFree(r); } for (int i = 0; i < items; ++i) { hipDeviceSynchronize(); double* Yi = &Y[i*factors]; double other_temp; stat = hipblasDdot(handle, factors, Yi, 1, Yi, 1, &other_temp); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("ddot 4 failed\n"); hipFree(YtY); return -1; } item_norm += other_temp; } loss += reg * (item_norm + user_norm); hipFree(YtY); return loss / ((double) (total_confidence + users * items - nnz)); }
496bf0b89739161bc84d5dd258b1c90b7522d03d.cu
#include "calculateLoss.h" double calculate_loss(cublasHandle_t handle, int* indptr, int* indices, double* data, double* X, double* Y, double reg, int users, int items, int factors, int nnz) { int loss = 0; int total_confidence = 0; int item_norm = 0; int user_norm = 0; cublasStatus_t stat; cudaError_t err; // malloc this double* YtY; err = cudaMallocManaged(&YtY, factors*factors*sizeof(double)); if (err != cudaSuccess) { printf("%s\n", cudaGetErrorString(err)); cudaFree(YtY); return -1; } const double alpha = 1; const double beta = 0; // do transpose stat = cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, factors, factors, items, &alpha, Y, items, Y, items, &beta, YtY, factors); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("dgemm failed\n"); cudaFree(YtY); return -1; } for (int u = 0; u < users; ++u) { if (u % 10000 == 0) { printf("loss iter %d\n", u); } cudaDeviceSynchronize(); double temp = 1.0; double* r; err = cudaMallocManaged(&r, items*sizeof(double)); if (err != cudaSuccess) { printf("%s\n", cudaGetErrorString(err)); cudaFree(YtY); cudaFree(r); return -1; } double* Xu = &X[u*factors]; stat = cublasDgemv(handle, CUBLAS_OP_N, items, factors, &alpha, Y, items, Xu, 1, &beta, r, 1); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("dgemv failed\n"); cudaFree(YtY); cudaFree(r); return -1; } cudaDeviceSynchronize(); int rowStart = indptr[u]; int rowEnd = indptr[u+1]; int cols[rowEnd-rowStart]; memcpy(cols, &indices[rowStart], (rowEnd-rowStart)*sizeof(int)); //int* cols = Cui[1][rowStart:rowEnd]; double vals[rowEnd-rowStart]; memcpy(vals, &data[rowStart], (rowEnd-rowStart)*sizeof(double)); //double* vals = Cui[2][rowStart:rowEnd]; for (int index = 0; index < rowEnd-rowStart; ++index) { int i = cols[index]; double confidence = vals[index]; double* Yi = &Y[i*factors]; cudaDeviceSynchronize(); double d; stat = cublasDdot(handle, factors, Yi, 1, Xu, 1, &d); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("ddot 1 failed\n"); cudaFree(YtY); cudaFree(r); return -1; } temp = (confidence - 1)*d - (2*confidence); cudaDeviceSynchronize(); stat = cublasDaxpy(handle, factors, &temp, Yi, 1, r, 1); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("daxpy failed\n"); cudaFree(YtY); cudaFree(r); return -1; } total_confidence += confidence; loss += confidence; } double other_temp; cudaDeviceSynchronize(); stat = cublasDdot(handle, factors, r, 1, Xu, 1, &other_temp); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("ddot 2 failed\n"); cudaFree(YtY); cudaFree(r); return -1; } loss += other_temp; cudaDeviceSynchronize(); stat = cublasDdot(handle, factors, Xu, 1, Xu, 1, &other_temp); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("ddot 3 failed\n"); cudaFree(YtY); cudaFree(r); return -1; } user_norm += other_temp; cudaFree(r); } for (int i = 0; i < items; ++i) { cudaDeviceSynchronize(); double* Yi = &Y[i*factors]; double other_temp; stat = cublasDdot(handle, factors, Yi, 1, Yi, 1, &other_temp); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("ddot 4 failed\n"); cudaFree(YtY); return -1; } item_norm += other_temp; } loss += reg * (item_norm + user_norm); cudaFree(YtY); return loss / ((double) (total_confidence + users * items - nnz)); }
d0b5d4b78e5d1433add7e74ba776af44b63d5c51.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { template<typename scalar_t, typename accscalar_t> struct MulScalarFunctor { MulScalarFunctor(accscalar_t b_): b(b_) {} __device__ scalar_t operator() (scalar_t a) const { return a * b; } private: accscalar_t b; }; template<typename scalar_t> struct DivFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a / b; } }; template<typename scalar_t> struct MulFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a * b; } }; // Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context] template<> struct MulFunctor<bool> { __device__ bool operator() (bool a, bool b) const { return a && b; } }; void div_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && iter.is_cpu_scalar(2)) { // optimization for floating-point types: if the second operand is a CPU // scalar, compute a * reciprocal(b). Note that this may lose one bit of // precision compared to computing the division. AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto inv_b = accscalar_t(1.0) / iter.scalar_value<accscalar_t>(2); iter.remove_operand(2); MulScalarFunctor<scalar_t, decltype(inv_b)> f(inv_b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { DivFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } void mul_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && (iter.is_cpu_scalar(1) || iter.is_cpu_scalar(2))) { //if common dtype is half the scalar constant can overflow in half precision, and yet the result can //still be representable in the half dtype. Cast scalar to acc_type to have better accuracy AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "mul_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; int scalar_arg = iter.is_cpu_scalar(1) ? 1 : 2; auto b = iter.scalar_value<accscalar_t>(scalar_arg); iter.remove_operand(scalar_arg); const hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(iter.tensor(1))); MulScalarFunctor<scalar_t, decltype(b)> f(b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "mul_cuda", [&]() { MulFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } REGISTER_DISPATCH(div_stub, &div_kernel_cuda); REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda); }} // namespace at::native
d0b5d4b78e5d1433add7e74ba776af44b63d5c51.cu
#include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <c10/cuda/CUDAGuard.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { template<typename scalar_t, typename accscalar_t> struct MulScalarFunctor { MulScalarFunctor(accscalar_t b_): b(b_) {} __device__ scalar_t operator() (scalar_t a) const { return a * b; } private: accscalar_t b; }; template<typename scalar_t> struct DivFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a / b; } }; template<typename scalar_t> struct MulFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a * b; } }; // Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context] template<> struct MulFunctor<bool> { __device__ bool operator() (bool a, bool b) const { return a && b; } }; void div_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && iter.is_cpu_scalar(2)) { // optimization for floating-point types: if the second operand is a CPU // scalar, compute a * reciprocal(b). Note that this may lose one bit of // precision compared to computing the division. AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto inv_b = accscalar_t(1.0) / iter.scalar_value<accscalar_t>(2); iter.remove_operand(2); MulScalarFunctor<scalar_t, decltype(inv_b)> f(inv_b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { DivFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } void mul_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && (iter.is_cpu_scalar(1) || iter.is_cpu_scalar(2))) { //if common dtype is half the scalar constant can overflow in half precision, and yet the result can //still be representable in the half dtype. Cast scalar to acc_type to have better accuracy AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "mul_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; int scalar_arg = iter.is_cpu_scalar(1) ? 1 : 2; auto b = iter.scalar_value<accscalar_t>(scalar_arg); iter.remove_operand(scalar_arg); const cuda::OptionalCUDAGuard device_guard(device_of(iter.tensor(1))); MulScalarFunctor<scalar_t, decltype(b)> f(b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "mul_cuda", [&]() { MulFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } REGISTER_DISPATCH(div_stub, &div_kernel_cuda); REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda); }} // namespace at::native
73afbf2617bf086ba4644d41c147b147efef5b51.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <algorithm> #include <random> #include <chrono> #include <sstream> #include <exception> #include <string> #include <cstddef> #include <iomanip> #include <hip/hip_runtime.h> #include <thrust/scan.h> #include "../helpers.hpp" namespace cpu { template <class T> void sort(const T* RESTRICT input, T* RESTRICT output, int N) { std::copy(input, input + N, output); std::sort(output, output + N); } template <class T> void radix_sort(const T* RESTRICT input, T* RESTRICT output, int N) { std::copy(input, input + N, output); constexpr int NUM_ELEMENTS_PER_SEGMENT = 16; constexpr int BITS_PER_ITERATION = 4; for (int starting_bit = 0; starting_bit < sizeof(T) * 8; starting_bit += BITS_PER_ITERATION) { bool masks[NUM_ELEMENTS_PER_SEGMENT]; int pos[NUM_ELEMENTS_PER_SEGMENT + 1]; T sorted_nums[NUM_ELEMENTS_PER_SEGMENT]; for (int pattern = (1 << (BITS_PER_ITERATION - 1)) - 1; pattern >= 0; pattern--) { for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) { auto value = output[i]; auto bits = (value >> starting_bit) & ((1 << (BITS_PER_ITERATION - 1)) - 1); masks[i] = (bits == pattern); } pos[0] = 0; for (int i = 1; i < NUM_ELEMENTS_PER_SEGMENT; i++) pos[i] = pos[i - 1] + masks[i - 1]; auto total_matches = pos[NUM_ELEMENTS_PER_SEGMENT - 1] + masks[NUM_ELEMENTS_PER_SEGMENT - 1]; for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) { if (masks[i]) sorted_nums[pos[i]] = output[i]; } // invert masks for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) masks[i] = !masks[i]; pos[0] = total_matches; for (int i = 1; i <= NUM_ELEMENTS_PER_SEGMENT; i++) pos[i] = pos[i - 1] + masks[i - 1]; for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) { if (masks[i]) sorted_nums[pos[i]] = output[i]; } std::cout << "PASS1\n"; print_range(sorted_nums, sorted_nums + NUM_ELEMENTS_PER_SEGMENT); std::cout << '\n'; std::copy(sorted_nums, sorted_nums + NUM_ELEMENTS_PER_SEGMENT, output); } } } } namespace thrust_gpu { template <class T> void sort(const T* input, T* output, int N) { thrust::copy(thrust::device, input, input + N, output); thrust::sort(thrust::device, output, output + N); } } namespace gpu { } int main () { using T = int; const int N = 16; constexpr float THRESHOLD = 0.001; std::vector<T> input(N); random_fill_integers(std::begin(input), std::end(input)); T* d_input, *d_output; CHECK_CUDA(hipMalloc(&d_input, input.size() * sizeof(T))); CHECK_CUDA(hipMalloc(&d_output, input.size() * sizeof(T))); std::vector<T> output_cpu(N); auto cpu_time = benchmark([&input, &output_cpu, N] { cpu::radix_sort(input.data(), output_cpu.data(), N); }); std::cout << "[CPU] Running time: " << to_milliseconds(cpu_time).count() << "ms\n"; std::cout << std::endl; print_range(std::begin(output_cpu), std::end(output_cpu)); std::vector<T> output_thrust(N); auto thrust_gpu_time = benchmark([&] { CHECK_CUDA(hipMemcpy(d_input, input.data(), input.size() * sizeof(T), hipMemcpyHostToDevice)); thrust_gpu::sort(d_input, d_output, N); CHECK_CUDA(hipGetLastError()); CHECK_CUDA(hipMemcpy(output_thrust.data(), d_output, output_thrust.size() * sizeof(T), hipMemcpyDeviceToHost)); }); std::cout << "[thrust] Running time (incl. memory copy): " << to_milliseconds(thrust_gpu_time).count() << "ms\n"; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_thrust), THRESHOLD); std::cout << std::endl; // erase previous results CHECK_CUDA(hipMemset(d_output, 0, input.size() * sizeof(T))); std::vector<T> output_gpu(N); auto gpu_time = benchmark([&] { CHECK_CUDA(hipMemcpy(d_input, input.data(), input.size() * sizeof(T), hipMemcpyHostToDevice)); // gpu::inclusive_scan_v2(d_input, d_output, N); CHECK_CUDA(hipMemcpy(output_gpu.data(), d_output, output_gpu.size() * sizeof(T), hipMemcpyDeviceToHost)); }); std::cout << "[GPU] Running time (incl. memory copy): " << to_milliseconds(gpu_time).count() << "ms" << std::endl; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD); CHECK_CUDA(hipFree(d_input)); CHECK_CUDA(hipFree(d_output)); return 0; }
73afbf2617bf086ba4644d41c147b147efef5b51.cu
#include <iostream> #include <vector> #include <algorithm> #include <random> #include <chrono> #include <sstream> #include <exception> #include <string> #include <cstddef> #include <iomanip> #include <cuda_runtime.h> #include <thrust/scan.h> #include "../helpers.hpp" namespace cpu { template <class T> void sort(const T* RESTRICT input, T* RESTRICT output, int N) { std::copy(input, input + N, output); std::sort(output, output + N); } template <class T> void radix_sort(const T* RESTRICT input, T* RESTRICT output, int N) { std::copy(input, input + N, output); constexpr int NUM_ELEMENTS_PER_SEGMENT = 16; constexpr int BITS_PER_ITERATION = 4; for (int starting_bit = 0; starting_bit < sizeof(T) * 8; starting_bit += BITS_PER_ITERATION) { bool masks[NUM_ELEMENTS_PER_SEGMENT]; int pos[NUM_ELEMENTS_PER_SEGMENT + 1]; T sorted_nums[NUM_ELEMENTS_PER_SEGMENT]; for (int pattern = (1 << (BITS_PER_ITERATION - 1)) - 1; pattern >= 0; pattern--) { for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) { auto value = output[i]; auto bits = (value >> starting_bit) & ((1 << (BITS_PER_ITERATION - 1)) - 1); masks[i] = (bits == pattern); } pos[0] = 0; for (int i = 1; i < NUM_ELEMENTS_PER_SEGMENT; i++) pos[i] = pos[i - 1] + masks[i - 1]; auto total_matches = pos[NUM_ELEMENTS_PER_SEGMENT - 1] + masks[NUM_ELEMENTS_PER_SEGMENT - 1]; for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) { if (masks[i]) sorted_nums[pos[i]] = output[i]; } // invert masks for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) masks[i] = !masks[i]; pos[0] = total_matches; for (int i = 1; i <= NUM_ELEMENTS_PER_SEGMENT; i++) pos[i] = pos[i - 1] + masks[i - 1]; for (int i = 0; i < NUM_ELEMENTS_PER_SEGMENT; i++) { if (masks[i]) sorted_nums[pos[i]] = output[i]; } std::cout << "PASS1\n"; print_range(sorted_nums, sorted_nums + NUM_ELEMENTS_PER_SEGMENT); std::cout << '\n'; std::copy(sorted_nums, sorted_nums + NUM_ELEMENTS_PER_SEGMENT, output); } } } } namespace thrust_gpu { template <class T> void sort(const T* input, T* output, int N) { thrust::copy(thrust::device, input, input + N, output); thrust::sort(thrust::device, output, output + N); } } namespace gpu { } int main () { using T = int; const int N = 16; constexpr float THRESHOLD = 0.001; std::vector<T> input(N); random_fill_integers(std::begin(input), std::end(input)); T* d_input, *d_output; CHECK_CUDA(cudaMalloc(&d_input, input.size() * sizeof(T))); CHECK_CUDA(cudaMalloc(&d_output, input.size() * sizeof(T))); std::vector<T> output_cpu(N); auto cpu_time = benchmark([&input, &output_cpu, N] { cpu::radix_sort(input.data(), output_cpu.data(), N); }); std::cout << "[CPU] Running time: " << to_milliseconds(cpu_time).count() << "ms\n"; std::cout << std::endl; print_range(std::begin(output_cpu), std::end(output_cpu)); std::vector<T> output_thrust(N); auto thrust_gpu_time = benchmark([&] { CHECK_CUDA(cudaMemcpy(d_input, input.data(), input.size() * sizeof(T), cudaMemcpyHostToDevice)); thrust_gpu::sort(d_input, d_output, N); CHECK_CUDA(cudaGetLastError()); CHECK_CUDA(cudaMemcpy(output_thrust.data(), d_output, output_thrust.size() * sizeof(T), cudaMemcpyDeviceToHost)); }); std::cout << "[thrust] Running time (incl. memory copy): " << to_milliseconds(thrust_gpu_time).count() << "ms\n"; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_thrust), THRESHOLD); std::cout << std::endl; // erase previous results CHECK_CUDA(cudaMemset(d_output, 0, input.size() * sizeof(T))); std::vector<T> output_gpu(N); auto gpu_time = benchmark([&] { CHECK_CUDA(cudaMemcpy(d_input, input.data(), input.size() * sizeof(T), cudaMemcpyHostToDevice)); // gpu::inclusive_scan_v2(d_input, d_output, N); CHECK_CUDA(cudaMemcpy(output_gpu.data(), d_output, output_gpu.size() * sizeof(T), cudaMemcpyDeviceToHost)); }); std::cout << "[GPU] Running time (incl. memory copy): " << to_milliseconds(gpu_time).count() << "ms" << std::endl; print_result(std::begin(output_cpu), std::end(output_cpu), std::begin(output_gpu), THRESHOLD); CHECK_CUDA(cudaFree(d_input)); CHECK_CUDA(cudaFree(d_output)); return 0; }
f25562fba59189ff5c67a65ded4e2e6264fb2dc3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ #include <assert.h> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #define BLOCK_SIZE 256 #define CONFLICT_FREE_OFFSET(n) ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #define MAX_ELEMS_PER_BLOCK 2 * BLOCK_SIZE #define SHARED_MEMORY_SIZE MAX_ELEMS_PER_BLOCK + 30 __global__ void prescan(float *g_odata, float * g_idata, float *sums, int n, int max_per_block) { extern __shared__ float temp[]; int thid = threadIdx.x; int offset = 1; int ai = thid; int bi = thid + blockDim.x; int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); unsigned int global_index = max_per_block * blockIdx.x + threadIdx.x; //This deals with the last block where it may not be completely full if (global_index < n) { temp[ai + bankOffsetA] = g_idata[global_index]; if (global_index + blockDim.x < n) temp[bi + bankOffsetB] = g_idata[global_index + blockDim.x]; } //instead of going through every element, we are only looking at the max number of elements // possible for this block //build tree for (int d = max_per_block >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset * (2*thid+1) - 1; int bi = offset * (2*thid+ 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset <<= 1; } //save sum to external array if (thid == 0) { sums[blockIdx.x] = temp[max_per_block - 1 + CONFLICT_FREE_OFFSET(max_per_block - 1)]; temp[max_per_block - 1 + CONFLICT_FREE_OFFSET(max_per_block - 1)] = 0; } //back down the tree we go for (int d = 1; d < max_per_block; d <<= 1) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2*thid + 1) - 1; int bi = offset * (2*thid + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int tmp = temp[ai]; temp[ai] = temp[bi]; temp[bi] += tmp; } } __syncthreads(); if (global_index < n) { g_odata[global_index] = temp[ai + CONFLICT_FREE_OFFSET(ai)]; if (global_index + blockDim.x < n) g_odata[global_index + blockDim.x] = temp[bi + CONFLICT_FREE_OFFSET(bi)]; } } __global__ void gpu_add_block_sums(float *g_odata, float* g_idata, float* sums, int n) { float d_block_sum_val = sums[blockIdx.x]; int global_index = 2 * blockIdx.x * blockDim.x + threadIdx.x; //if the global index is valid if (global_index < n) { g_odata[global_index] = g_idata[global_index] + d_block_sum_val; if (global_index + blockDim.x < n) g_odata[global_index + blockDim.x] = g_idata[global_index + blockDim.x] + d_block_sum_val; } } void prescanArray_recursive(float *outArray, float *inArray, int numElements) { dim3 blockDim; blockDim.x = BLOCK_SIZE; dim3 gridDim; int max_per_block = MAX_ELEMS_PER_BLOCK; gridDim.x = numElements / max_per_block; //If the elements don't fit perfectly into the //blocks then add another block which // will not be filled up all of the way. if (numElements % max_per_block != 0) gridDim.x += 1; int grid_size = gridDim.x; float* sums; (hipMalloc(&sums, sizeof(float) * gridDim.x)); (hipMemset(sums, 0, sizeof(float) * gridDim.x)); hipLaunchKernelGGL(( prescan), dim3(gridDim), dim3(blockDim), sizeof(float) * SHARED_MEMORY_SIZE, 0, outArray, inArray, sums, numElements, max_per_block); //The sums block could be greater than the number of available threads in a block //use recursion to keep doing the prescan and adding the blocks together until // they are able to fit into a single block of computation. So keep doing the // prescan until they fit, then add all of the blocks together at the end if (gridDim.x <= max_per_block) { float* d_dummy_blocks_sums; (hipMalloc(&d_dummy_blocks_sums, sizeof(float))); (hipMemset(d_dummy_blocks_sums, 0, sizeof(float))); hipLaunchKernelGGL(( prescan), dim3(1), dim3(blockDim), sizeof(float) * SHARED_MEMORY_SIZE, 0, sums, sums, d_dummy_blocks_sums, grid_size, max_per_block); (hipFree(d_dummy_blocks_sums)); } else { //Do recursion to restart everything float* inArray_tmp; (hipMalloc(&inArray_tmp, sizeof(float) * grid_size)); (hipMemcpy(inArray_tmp, sums, sizeof(float) * grid_size, hipMemcpyDeviceToDevice)); prescanArray_recursive(sums, inArray_tmp, grid_size); (hipFree(inArray_tmp)); } hipLaunchKernelGGL(( gpu_add_block_sums), dim3(gridDim), dim3(blockDim), 0, 0, outArray, outArray, sums, numElements); (hipFree(sums)); } void prescanArray(float *outArray, float *inArray, int numElements) { prescanArray_recursive(outArray, inArray, numElements); } #endif // _PRESCAN_CU_
f25562fba59189ff5c67a65ded4e2e6264fb2dc3.cu
#ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ #include <assert.h> #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #define BLOCK_SIZE 256 #define CONFLICT_FREE_OFFSET(n) ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #define MAX_ELEMS_PER_BLOCK 2 * BLOCK_SIZE #define SHARED_MEMORY_SIZE MAX_ELEMS_PER_BLOCK + 30 __global__ void prescan(float *g_odata, float * g_idata, float *sums, int n, int max_per_block) { extern __shared__ float temp[]; int thid = threadIdx.x; int offset = 1; int ai = thid; int bi = thid + blockDim.x; int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(bi); unsigned int global_index = max_per_block * blockIdx.x + threadIdx.x; //This deals with the last block where it may not be completely full if (global_index < n) { temp[ai + bankOffsetA] = g_idata[global_index]; if (global_index + blockDim.x < n) temp[bi + bankOffsetB] = g_idata[global_index + blockDim.x]; } //instead of going through every element, we are only looking at the max number of elements // possible for this block //build tree for (int d = max_per_block >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset * (2*thid+1) - 1; int bi = offset * (2*thid+ 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset <<= 1; } //save sum to external array if (thid == 0) { sums[blockIdx.x] = temp[max_per_block - 1 + CONFLICT_FREE_OFFSET(max_per_block - 1)]; temp[max_per_block - 1 + CONFLICT_FREE_OFFSET(max_per_block - 1)] = 0; } //back down the tree we go for (int d = 1; d < max_per_block; d <<= 1) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2*thid + 1) - 1; int bi = offset * (2*thid + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int tmp = temp[ai]; temp[ai] = temp[bi]; temp[bi] += tmp; } } __syncthreads(); if (global_index < n) { g_odata[global_index] = temp[ai + CONFLICT_FREE_OFFSET(ai)]; if (global_index + blockDim.x < n) g_odata[global_index + blockDim.x] = temp[bi + CONFLICT_FREE_OFFSET(bi)]; } } __global__ void gpu_add_block_sums(float *g_odata, float* g_idata, float* sums, int n) { float d_block_sum_val = sums[blockIdx.x]; int global_index = 2 * blockIdx.x * blockDim.x + threadIdx.x; //if the global index is valid if (global_index < n) { g_odata[global_index] = g_idata[global_index] + d_block_sum_val; if (global_index + blockDim.x < n) g_odata[global_index + blockDim.x] = g_idata[global_index + blockDim.x] + d_block_sum_val; } } void prescanArray_recursive(float *outArray, float *inArray, int numElements) { dim3 blockDim; blockDim.x = BLOCK_SIZE; dim3 gridDim; int max_per_block = MAX_ELEMS_PER_BLOCK; gridDim.x = numElements / max_per_block; //If the elements don't fit perfectly into the //blocks then add another block which // will not be filled up all of the way. if (numElements % max_per_block != 0) gridDim.x += 1; int grid_size = gridDim.x; float* sums; (cudaMalloc(&sums, sizeof(float) * gridDim.x)); (cudaMemset(sums, 0, sizeof(float) * gridDim.x)); prescan<<<gridDim, blockDim, sizeof(float) * SHARED_MEMORY_SIZE>>>(outArray, inArray, sums, numElements, max_per_block); //The sums block could be greater than the number of available threads in a block //use recursion to keep doing the prescan and adding the blocks together until // they are able to fit into a single block of computation. So keep doing the // prescan until they fit, then add all of the blocks together at the end if (gridDim.x <= max_per_block) { float* d_dummy_blocks_sums; (cudaMalloc(&d_dummy_blocks_sums, sizeof(float))); (cudaMemset(d_dummy_blocks_sums, 0, sizeof(float))); prescan<<<1, blockDim, sizeof(float) * SHARED_MEMORY_SIZE>>>(sums, sums, d_dummy_blocks_sums, grid_size, max_per_block); (cudaFree(d_dummy_blocks_sums)); } else { //Do recursion to restart everything float* inArray_tmp; (cudaMalloc(&inArray_tmp, sizeof(float) * grid_size)); (cudaMemcpy(inArray_tmp, sums, sizeof(float) * grid_size, cudaMemcpyDeviceToDevice)); prescanArray_recursive(sums, inArray_tmp, grid_size); (cudaFree(inArray_tmp)); } gpu_add_block_sums<<<gridDim, blockDim>>>(outArray, outArray, sums, numElements); (cudaFree(sums)); } void prescanArray(float *outArray, float *inArray, int numElements) { prescanArray_recursive(outArray, inArray, numElements); } #endif // _PRESCAN_CU_
196c603c2b96e186b674474d388aa62b94014c3b.hip
// !!! This is a file automatically generated by hipify!!! #include "kernel_hip.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __host__ void callKernel(unsigned int size, int *c, const int *a, const int *b) { hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(size) , 0, 0, c, a, b); } __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; }
196c603c2b96e186b674474d388aa62b94014c3b.cu
#include "kernel.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __host__ void callKernel(unsigned int size, int *c, const int *a, const int *b) { addKernel <<< 1, size >>> (c, a, b); } __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; }
43da051faf978eda5351599f57cf0556f4d5d6e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "basic/GPUBasic.h" #include "model/GPUDevice.h" #include "math/GPUMath.h" #include "math/GPUUnaryElementWise.h" #include "math/Tanh.h" namespace Deep8 { namespace Math { template <typename T> struct TanhKernelOp { DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x) { return cudaTanh(x); } }; void TanhGPU(const Tensor &x, Tensor &y) { auto n = (int)x.shape.size(); int blockSize = DEEP8_GPU_BLOCK_SIZE; int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE; switch (x.elementType.id) { case DType::Float32: hipLaunchKernelGGL(( UnaryElementWiseKernel<float, TanhKernelOp<float>>) , dim3(grideSize), dim3(blockSize), 0, 0, x.data<float>(), y.data<float>(), TanhKernelOp<float>(), n ); break; case DType::Float64: hipLaunchKernelGGL(( UnaryElementWiseKernel<double, TanhKernelOp<double>>) , dim3(grideSize), dim3(blockSize), 0, 0, x.data<double>(), y.data<double>(), TanhKernelOp<double>(), n ); break; #ifdef HAVE_HALF case DType::Float16: hipLaunchKernelGGL(( UnaryElementWiseKernel<half, TanhKernelOp<half>>) , dim3(grideSize), dim3(blockSize), 0, 0, x.data<half>(), y.data<half>(), TanhKernelOp<half>(), n ); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } template <typename T> struct TanhGradKernelOp { DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x, const T &y, const T &dy) { return dy * (T(1.0) - y * y); } }; void TanhGradGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &dy) { auto n = (int)dx.shape.size(); int blockSize = DEEP8_GPU_BLOCK_SIZE; int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE; switch (x.elementType.id) { case DType::Float32: hipLaunchKernelGGL(( UnaryElementWiseGradKernel<float, TanhGradKernelOp<float>>) , dim3(grideSize), dim3(blockSize), 0, 0, x.data<float>(), dx.data<float>(), y.data<float>(), dy.data<float>(), TanhGradKernelOp<float>(), n ); break; case DType::Float64: hipLaunchKernelGGL(( UnaryElementWiseGradKernel<double, TanhGradKernelOp<double>>) , dim3(grideSize), dim3(blockSize), 0, 0, x.data<double>(), dx.data<double>(), y.data<double>(), dy.data<double>(), TanhGradKernelOp<double>(), n ); break; #ifdef HAVE_HALF case DType::Float16: hipLaunchKernelGGL(( UnaryElementWiseGradKernel<half, TanhGradKernelOp<half>>) , dim3(grideSize), dim3(blockSize), 0, 0, x.data<half>(), dx.data<half>(), y.data<half>(), dy.data<half>(), TanhGradKernelOp<half>(), n ); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } } }
43da051faf978eda5351599f57cf0556f4d5d6e7.cu
#include "basic/GPUBasic.h" #include "model/GPUDevice.h" #include "math/GPUMath.h" #include "math/GPUUnaryElementWise.h" #include "math/Tanh.h" namespace Deep8 { namespace Math { template <typename T> struct TanhKernelOp { DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x) { return cudaTanh(x); } }; void TanhGPU(const Tensor &x, Tensor &y) { auto n = (int)x.shape.size(); int blockSize = DEEP8_GPU_BLOCK_SIZE; int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE; switch (x.elementType.id) { case DType::Float32: UnaryElementWiseKernel<float, TanhKernelOp<float>> <<<grideSize, blockSize>>>( x.data<float>(), y.data<float>(), TanhKernelOp<float>(), n ); break; case DType::Float64: UnaryElementWiseKernel<double, TanhKernelOp<double>> <<<grideSize, blockSize>>>( x.data<double>(), y.data<double>(), TanhKernelOp<double>(), n ); break; #ifdef HAVE_HALF case DType::Float16: UnaryElementWiseKernel<half, TanhKernelOp<half>> <<<grideSize, blockSize>>>( x.data<half>(), y.data<half>(), TanhKernelOp<half>(), n ); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } template <typename T> struct TanhGradKernelOp { DEEP8_CUDA_FUNC DEEP8_CUDA_INLINE T operator()(const T &x, const T &y, const T &dy) { return dy * (T(1.0) - y * y); } }; void TanhGradGPU(const Tensor &x, Tensor &dx, const Tensor &y, const Tensor &dy) { auto n = (int)dx.shape.size(); int blockSize = DEEP8_GPU_BLOCK_SIZE; int grideSize = (n + DEEP8_GPU_BLOCK_SIZE - 1) / DEEP8_GPU_BLOCK_SIZE; switch (x.elementType.id) { case DType::Float32: UnaryElementWiseGradKernel<float, TanhGradKernelOp<float>> <<<grideSize, blockSize>>> ( x.data<float>(), dx.data<float>(), y.data<float>(), dy.data<float>(), TanhGradKernelOp<float>(), n ); break; case DType::Float64: UnaryElementWiseGradKernel<double, TanhGradKernelOp<double>> <<<grideSize, blockSize>>> ( x.data<double>(), dx.data<double>(), y.data<double>(), dy.data<double>(), TanhGradKernelOp<double>(), n ); break; #ifdef HAVE_HALF case DType::Float16: UnaryElementWiseGradKernel<half, TanhGradKernelOp<half>> <<<grideSize, blockSize>>> ( x.data<half>(), dx.data<half>(), y.data<half>(), dy.data<half>(), TanhGradKernelOp<half>(), n ); break; #endif default: DEEP8_RUNTIME_ERROR("type " << x.elementType.name << " is not support"); break; } } } }
dbf254ec30b8548e60e7fada2932eac0c9a8faf1.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief * ragged * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey * Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <hipcub/hipcub.hpp> #include <vector> #include "k2/csrc/array_ops.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace { // will be used in RaggedShape::MaxSize(int32_t axis) to call // hipcub::DeviceReduce::Max struct RowSplitsDiff { const int32_t *row_splits_data; explicit RowSplitsDiff(const int32_t *row_splits) : row_splits_data(row_splits) {} // operator[] and operator+ are required by hipcub::DeviceReduce::Max __device__ int32_t operator[](int32_t i) const { return row_splits_data[i + 1] - row_splits_data[i]; } __device__ RowSplitsDiff operator+(int32_t n) const { RowSplitsDiff tmp(*this); tmp.row_splits_data += n; return tmp; } }; /* A helper function used in RaggedShape3; if both first and second are non-NULL, it will check if the context of them is compatible or not and return that context if compatible; if one of them is NULL, returns the other one's context. */ static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first, const k2::Array1<int32_t> *second) { K2_CHECK(first != nullptr || second != nullptr) << "At least one of first and second must be non-NULL"; if (first == nullptr) return second->Context(); else if (second == nullptr) return first->Context(); else return k2::GetContext(*first, *second); } } // namespace namespace std { // vaule_type is required by hipcub::DeviceReduce::Max template <> struct iterator_traits<::RowSplitsDiff> { typedef int32_t value_type; }; } // namespace std namespace k2 { RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes, int32_t max_num_axes, int32_t min_num_elements, int32_t max_num_elements) { ContextPtr c = GetCpuContext(); K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes && min_num_elements >= 0 && max_num_elements >= min_num_elements); int32_t num_axes = RandInt(min_num_axes, max_num_axes); int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements); bool done_repeats = false; std::vector<RaggedShapeDim> axes(num_axes - 1); for (int32_t axis = num_axes - 2; axis >= 0; axis--) { // this axis will have row_ids of length num_elements and // row_splits of length to be determined. int32_t cur_row_split = 0; std::vector<int32_t> row_splits_vec; std::vector<int32_t> row_ids_vec; row_splits_vec.push_back(cur_row_split); // The reason for "|| RandInt(0, 2) == 0)" is so that even if there // are no elements we can still potentially generate empty row-splits. while (cur_row_split < num_elements || RandInt(0, 2) == 0) { int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split); cur_row_split += split_size; // sometimes we have a bunch of empty rows in a row (this will test out // more of the code), so here we generate a bunch of empty rows, but we // just do this only once (that's why we declare `done_repeats` here). if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) { int32_t num_repeats = RandIntGeometric(1, 128); row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split); // don't need to set `row_ids_vec` as there's no element. done_repeats = true; } row_splits_vec.push_back(cur_row_split); if (set_row_ids) { int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2; row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row); } } axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec); if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec); axes[axis].cached_tot_size = num_elements; num_elements = axes[axis].row_splits.Dim() - 1; } // RaggedShape(axes, true) will check the returned RaggedShape for // consistency. return RaggedShape(axes, true); } // Recursive function that prints (part of) a ragged shape. // 0 <= begin_pos <= end_pos < shape.TotSize(axis). void PrintRaggedShapePart(std::ostream &stream, RaggedShape &shape, int32_t axis, int32_t begin_pos, int32_t end_pos) { K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 && begin_pos <= end_pos && end_pos <= shape.TotSize(axis)); for (int32_t d = begin_pos; d < end_pos; ++d) { if (axis == shape.NumAxes() - 1) { stream << d << " "; } else { stream << "[ "; const int32_t *row_splits = shape.RowSplits(axis + 1).Data(); K2_DCHECK(d < shape.RowSplits(axis + 1).Dim()); int32_t row_start = row_splits[d], row_end = row_splits[d + 1]; PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end); stream << "] "; } } } // prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values' // are just the positions in the array, this is for readability. std::ostream &operator<<(std::ostream &stream, RaggedShape &shape) { if (shape.Context()->GetDeviceType() != kCpu) { return stream << shape.To(GetCpuContext()); } else { stream << "[ "; PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0()); stream << "]"; return stream; } } Array1<int32_t> &RaggedShape::RowIds(int32_t axis) { K2_CHECK_GT(axis, 0); K2_CHECK_LT(axis, NumAxes()); RaggedShapeDim &rsd = axes_[axis - 1]; auto &row_splits = rsd.row_splits; auto &row_ids = rsd.row_ids; // there must be row_splits.Dim() >=1 according to the definition of // RaggedShapeDim. K2_CHECK_GE(row_splits.Dim(), 1); if (row_splits.Dim() != 1 && row_ids.Dim() == 0) { // create row_ids as it does not exist row_ids = Array1<int32_t>(Context(), row_splits[row_splits.Dim() - 1]); const int32_t *row_splits_data = row_splits.Data(); int32_t *row_ids_data = row_ids.Data(); RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data, row_ids.Dim(), row_ids_data); // set cached_tot_size rsd.cached_tot_size = row_ids.Dim(); } return row_ids; } int32_t RaggedShape::MaxSize(int32_t axis) { K2_CHECK_GT(axis, 0); K2_CHECK_LT(axis, NumAxes()); const auto &row_splits = axes_[axis - 1].row_splits; const int32_t num_rows = row_splits.Dim() - 1; if (num_rows == 0) return 0; const int32_t *row_splits_data = row_splits.Data(); ContextPtr c = Context(); if (c->GetDeviceType() == kCpu) { int32_t max_value = 0; for (int32_t i = 0; i < num_rows; ++i) { int32_t value = row_splits_data[i + 1] - row_splits_data[i]; if (value > max_value) max_value = value; } return max_value; } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); ::RowSplitsDiff row_splits_diff(row_splits_data); Array1<int32_t> max_array(Context(), 1, 0); int32_t *max_value = max_array.Data(); void *d_temp_storage = nullptr; size_t temp_storage_bytes = 0; // the first time is to determine temporary device storage requirements K2_CHECK_CUDA_ERROR(hipcub::DeviceReduce::Max( d_temp_storage, temp_storage_bytes, row_splits_diff, max_value, num_rows, c->GetCudaStream())); void *deleter_context; d_temp_storage = c->Allocate(temp_storage_bytes, &deleter_context); K2_CHECK_CUDA_ERROR(hipcub::DeviceReduce::Max( d_temp_storage, temp_storage_bytes, row_splits_diff, max_value, num_rows, c->GetCudaStream())); c->Deallocate(d_temp_storage, deleter_context); // this will convert to memory on CPU return max_array[0]; } } RaggedShape RaggedShape::Index(int32_t axis, int32_t i) { // only support `axis == 0` for now K2_CHECK_EQ(axis, 0); K2_CHECK_GE(i, 0); int32_t num_axes = NumAxes(); K2_CHECK_GE(num_axes, 2); const auto &src_axes = Axes(); K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim()); int32_t idx = src_axes[0].row_splits[i]; int32_t idx_next = src_axes[0].row_splits[i + 1]; std::vector<RaggedShapeDim> axes(src_axes.size() - 1); ContextPtr c = Context(); for (int32_t i = 2; i < num_axes; ++i) { const Array1<int32_t> &src_row_splits = src_axes[i - 1].row_splits; int32_t num_rows = idx_next - idx; int32_t offset = idx; idx = src_row_splits[idx]; idx_next = src_row_splits[idx_next]; // allocate new memory here as we need to change the values, // i.e. subtracts the offset. axes[i - 2].row_splits = Array1<int32_t>(c, num_rows + 1); int32_t *data = axes[i - 2].row_splits.Data(); const int32_t *src_data = src_row_splits.Data(); auto lambda_set_values = [=] __host__ __device__(int32_t i) -> void { data[i] = src_data[i + offset] - idx; }; Eval(c, num_rows + 1, lambda_set_values); // leave row_ids and cached_tot_size unset axes[i - 2].cached_tot_size = -1; } RaggedShape shape(axes, true); return shape; } void RaggedShape::Populate() { int32_t num_axes = NumAxes(); for (int32_t i = 1; i < num_axes; ++i) { // ignore return values of the following calls. this->TotSize(i); this->RowIds(i); } } RaggedShape RaggedShape::To(ContextPtr ctx) const { if (ctx->IsCompatible(*Context())) return *this; std::vector<RaggedShapeDim> axes(axes_.size()); int32_t num_axes = NumAxes(); for (int32_t i = 1; i < num_axes; ++i) { axes[i - 1].row_splits = axes_[i - 1].row_splits.To(ctx); // leave row_ids and cached_tot_size unset axes[i - 1].cached_tot_size = -1; } return RaggedShape(axes); } RaggedShapeIndexIterator RaggedShape::Iterator() { return RaggedShapeIndexIterator(*this); } int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) { K2_CHECK(indexes.size() == NumAxes()); K2_CHECK(Context()->GetDeviceType() == kCpu); int32_t cur_idx = indexes[0]; for (int32_t i = 1; i < NumAxes(); i++) { Array1<int32_t> &row_splits = axes_[i - 1].row_splits; K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim()); cur_idx = row_splits[cur_idx]; cur_idx += indexes[i]; } return cur_idx; } int32_t RaggedShape::TotSize(int32_t axis) const { K2_CHECK_GE(axis, 0); K2_CHECK_LT(axis, NumAxes()); if (axis == 0) return Dim0(); else { const RaggedShapeDim &rsd = axes_[axis - 1]; if (rsd.cached_tot_size >= 0) { return rsd.cached_tot_size; } else { // if we had row_ids set up, we should have set cached_tot_size. K2_CHECK_EQ(rsd.row_ids.Dim(), 0); K2_CHECK_GT(rsd.row_splits.Dim(), 0); const_cast<RaggedShapeDim &>(rsd).cached_tot_size = rsd.row_splits.Back(); return rsd.cached_tot_size; } } } void RaggedShape::Check() { ContextPtr c = Context(); int32_t num_axes = axes_.size(); for (int32_t axis = 0; axis < num_axes; ++axis) { RaggedShapeDim &rsd = axes_[axis]; K2_CHECK_GE(rsd.row_splits.Dim(), 0); if (rsd.cached_tot_size >= 0) { K2_CHECK(rsd.row_splits.Dim() == 0 || rsd.cached_tot_size == rsd.row_splits.Back()); K2_CHECK(rsd.row_ids.Dim() == 0 || rsd.cached_tot_size == rsd.row_ids.Dim()); } else { K2_CHECK_EQ(rsd.cached_tot_size, -1); K2_CHECK_EQ(rsd.row_ids.Dim(), 0); } int32_t num_elems; // Check row_splits. { // meta[0] is a bool, ok == 1, not-ok == 0. // meta[1] will contain the number of row_splits. Array1<int32_t> meta(c, 2, 1); int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1; const int32_t *row_splits_data = rsd.row_splits.Data(); int32_t num_rows = rsd.row_splits.Dim() - 1; auto lambda_check_row_splits = [=] __host__ __device__(int32_t i) -> void { int32_t this_idx = row_splits_data[i]; if (i == 0 && this_idx != 0) *ok_data = 0; if (i < num_rows) { int32_t next_idx = row_splits_data[i + 1]; if (next_idx < this_idx) *ok_data = 0; } else { K2_CHECK(i == num_rows); *num_elems_data = this_idx; } }; Eval(c, num_rows + 1, lambda_check_row_splits); meta = meta.To(GetCpuContext()); num_elems = meta[1]; int32_t ok = meta[0]; if (!ok) { K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis << "], row_splits = " << rsd.row_splits; } if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) { K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis << "], row_splits[-1] = " << num_elems << " but cached_tot_size == " << rsd.cached_tot_size; } } if (axis + 1 < num_axes) { int32_t next_num_rows = axes_[axis + 1].row_splits.Dim() - 1; if (num_elems != next_num_rows) { K2_LOG(FATAL) << "Ragged shape has num_elems for axes_[" << axis << "] == " << num_elems << " and num-rows for axes_[" << (axis + 1) << "] == " << next_num_rows; } } if (rsd.row_ids.Dim() != 0) { // check row_ids. K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits)); // 1st elem is `ok` (1 or 0); 2nd elem is location of bad index // into row_splits Array1<int32_t> meta(c, 2, 1); int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1; const int32_t *row_splits_data = rsd.row_splits.Data(), *row_ids_data = rsd.row_ids.Data(); int32_t num_elems_from_row_ids = rsd.row_ids.Dim(), num_rows = rsd.row_splits.Dim() - 1; K2_CHECK_EQ(num_elems, num_elems_from_row_ids); auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) { *ok_data = 0; *bad_index_data = i; } }; // TODO: could do this and the other one in separate streams. Eval(c, num_elems, lambda_check_row_ids); meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should // be faster. int32_t ok = meta[0]; if (!ok) { K2_LOG(FATAL) << "Problem validating row-ids: for axes_[" << axis << "], row_splits = " << rsd.row_splits << ", row_ids = " << rsd.row_ids << ", see index " << meta[1] << " of row_ids, whose dim is " << rsd.row_ids.Dim(); } } if (axis + 1 < axes_.size()) { K2_CHECK(IsCompatible(rsd.row_splits, axes_[axis + 1].row_splits)); } } } RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids, int32_t cached_tot_size) { K2_CHECK(row_splits != nullptr || row_ids != nullptr) << "At least one of row_splits and row_ids must be defined"; ContextPtr ctx = ::GetContext(row_splits, row_ids); if (cached_tot_size != -1) { if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim()); if (row_splits != nullptr) { // may be slow as it may copy memory from device to host K2_CHECK_EQ(cached_tot_size, row_splits->Back()); } } std::vector<RaggedShapeDim> axes(1); if (row_splits != nullptr) { axes[0].row_splits = *row_splits; } else { // we need to work out row_splits as we always require row_splits is not // empty for RaggedShape. Note here we suppose the last element in row_ids // is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`. int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1; Array1<int32_t> row_splits_array(ctx, num_rows + 1); RowIdsToRowSplits(*row_ids, row_splits_array); axes[0].row_splits = row_splits_array; } if (row_ids != nullptr) axes[0].row_ids = *row_ids; axes[0].cached_tot_size = cached_tot_size; // note below line will check if row_splits and row_ids are valid and agree // with each other. return RaggedShape(axes); } RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) { if (a.NumElements() != b.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements() << " vs. " << b.Dim0(); } const auto &a_axes = a.Axes(); const auto &b_axes = b.Axes(); std::vector<RaggedShapeDim> axes(a_axes.size() + b_axes.size()); std::size_t a_size = a_axes.size(), b_size = b_axes.size(); for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i]; for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i]; return RaggedShape(axes); } RaggedShape RaggedShape3(Array1<int32_t> *row_splits1, Array1<int32_t> *row_ids1, int32_t cached_tot_size1, Array1<int32_t> *row_splits2, Array1<int32_t> *row_ids2, int32_t cached_tot_size2) { K2_CHECK(row_splits1 != nullptr || row_ids1 != nullptr) << "At least one of row_splits1 and row_ids1 must be defined"; K2_CHECK(row_splits2 != nullptr || row_ids2 != nullptr) << "At least one of row_splits2 and row_ids2 must be defined"; // check context ContextPtr ctx1 = ::GetContext(row_splits1, row_ids1); ContextPtr ctx2 = ::GetContext(row_splits2, row_ids2); K2_CHECK(ctx1->IsCompatible(*ctx2)); // check row_splits and row_ids of axis-1 if (cached_tot_size1 != -1) { if (row_ids1 != nullptr) K2_CHECK_EQ(cached_tot_size1, row_ids1->Dim()); if (row_splits1 != nullptr) { // may be slow as it may copy memory from device to host K2_CHECK_EQ(cached_tot_size1, row_splits1->Back()); } } // check row_splits and row_ids of axis-2 if (cached_tot_size2 != -1) { if (row_ids2 != nullptr) K2_CHECK_EQ(cached_tot_size2, row_ids2->Dim()); if (row_splits2 != nullptr) { // may be slow as it may copy memory from device to host K2_CHECK_EQ(cached_tot_size2, row_splits2->Back()); } } std::vector<RaggedShapeDim> axes(2); // set row_splits and row_ids for axis 1 if (row_splits1 != nullptr) { axes[0].row_splits = *row_splits1; } else { // work out row_splits1, see code in RaggedShape2 above for the reason int32_t num_rows = row_ids1->Dim() == 0 ? 0 : row_ids1->Back() + 1; Array1<int32_t> row_splits_array(ctx1, num_rows + 1); RowIdsToRowSplits(*row_ids1, row_splits_array); axes[0].row_splits = row_splits_array; } if (row_ids1 != nullptr) axes[0].row_ids = *row_ids1; axes[0].cached_tot_size = cached_tot_size1; // set row_splits and row_ids for axis 2 if (row_splits2 != nullptr) { axes[1].row_splits = *row_splits1; } else { // work out row_splits1, see code in RaggedShape2 above for the reason int32_t num_rows = row_ids2->Dim() == 0 ? 0 : row_ids2->Back() + 1; Array1<int32_t> row_splits_array(ctx1, num_rows + 1); RowIdsToRowSplits(*row_ids2, row_splits_array); axes[1].row_splits = row_splits_array; } if (row_ids2 != nullptr) axes[1].row_ids = *row_ids2; axes[1].cached_tot_size = cached_tot_size2; // we don't check here if // row_splits1[row_splits1.Dim() - 1] == row_ids1.Dim() // == (row_splits2.Dim() - 1) // >= (row_ids2[row_ids2.Dim() - 1] + 1) // but RaggedShape(axes) below will check this. return RaggedShape(axes); } RaggedShape RaggedShapeFromTotSizes(ContextPtr &c, int32_t num_axes, int32_t *tot_sizes) { K2_CHECK_GE(num_axes, 2); std::vector<RaggedShapeDim> axes(num_axes - 1); // In future we might choose to allocate everything in one big array, to avoid // multiple allocations, but for now just do it the simple way. for (int32_t axis = 1; axis < num_axes; ++axis) { axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1); axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]); axes[axis - 1].cached_tot_size = tot_sizes[axis]; } return RaggedShape(axes); } Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) { int32_t axes = src.NumAxes(); K2_CHECK_GE(axes, 2); std::vector<int32_t *> row_splits_start(axes - 1); for (int32_t i = 1; i != axes; ++i) { Array1<int32_t> &cur_splits = src.RowSplits(i); row_splits_start[i - 1] = cur_splits.Data(); } return Array1<int32_t *>(src.Context(), row_splits_start); } // See declaration in ragged.h for documentation of its purpose and interface. RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) { // If axis == 0, initial row_splits and row_ids will look like the following, // if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes // would be pushed forward. // // If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would // look like the following, if for instance the src.TotSize(axis-1) = 8: // [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ]. // // The reason why the code is different for axis == 0, is that in that case we // are really making visible an "implicit" axis of the input `src`; we could // call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's // row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from // an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 < // Dim0(). ContextPtr c = src.Context(); K2_CHECK(axis >= 0 && axis <= src.NumAxes()); const std::vector<RaggedShapeDim> &axes_in = src.Axes(); int32_t num_axes_in = src.NumAxes(); // Note: in RaggedShape, the vector of RaggedShapeDim is of length // num_axes - 1, so the output will have one more axis than the input. std::vector<RaggedShapeDim> axes_out(num_axes_in); int32_t row_splits_dim, row_ids_dim; Array1<int32_t> mem; if (axis == 0) { row_splits_dim = 2; // e.g. [ 0 5 ] row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ] mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); auto lambda_set_mem = [=] __host__ __device__(int32_t i) -> void { if (i == 1) mem_data[i] = row_ids_dim; else mem_data[i] = 0; }; Eval(c, mem.Dim(), lambda_set_mem); } else { int32_t tot_size = src.TotSize(axis - 1); row_splits_dim = tot_size + 1; row_ids_dim = tot_size; mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); auto lambda_set_mem2 = [=] __host__ __device__(int32_t i) -> void { mem_data[i] = i % (tot_size + 1); }; Eval(c, mem.Dim(), lambda_set_mem2); } axes_out[axis].row_splits = mem.Range(0, row_splits_dim); axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim); for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i]; // Note: the returned array has `num_axes_in + 1` axes, so its // array of RaggedShapeDim is of length `num_axes_in`. for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1]; return RaggedShape(axes_out); } RaggedShape Renumber(RaggedShape &src, const Array1<int32_t> &new2old) { ContextPtr c = src.Context(); K2_CHECK(IsCompatible(src, new2old)); int32_t num_axes = src.NumAxes(), dim0 = src.Dim0(); K2_CHECK_EQ(new2old.Dim(), dim0); std::vector<int32_t> tot_sizes_out(num_axes); for (int32_t axis = 0; axis < num_axes; axis++) tot_sizes_out[axis] = src.TotSize(axis); // the arrays in `ans` will be the same sizes as those in `src`. RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data()); src.Populate(); Array2<int32_t> old_offsets(c, num_axes, dim0 + 1), new_offsets(c, num_axes, dim0 + 1); auto old_offsets_acc = old_offsets.Accessor(), new_offsets_acc = new_offsets.Accessor(); Array1<int32_t *> row_splits_ptrs = GetRowSplitsPtr(src); int32_t **row_splits_ptrs_data = row_splits_ptrs.Data(); // Set old_offsets auto lambda_get_old_offsets = [=] __host__ __device__(int32_t i) { // 0 <= i <= dim0 int32_t cur_offset = i; for (int32_t axis = 0; axis < num_axes; axis++) { old_offsets_acc(0, i) = cur_offset; if (axis + 1 == num_axes) return; cur_offset = row_splits_ptrs_data[axis][cur_offset]; } }; Eval(c, dim0 + 1, lambda_get_old_offsets); const int32_t *new2old_data = new2old.Data(); auto lambda_get_new_offsets = [=] __host__ __device__(int32_t axis, int32_t new_i) { // 0 <= axis < num_axes; 0 <= new_i < dim0 int32_t old_i = new2old_data[new_i], this_old_offset = old_offsets_acc(axis, old_i), next_old_offset = old_offsets_acc(axis, old_i + 1), size = next_old_offset - this_old_offset; new_offsets_acc(axis, new_i) = size; }; Eval2(c, num_axes, dim0, lambda_get_new_offsets); ExclusiveSum(new_offsets, &new_offsets); // Now new_offsets contains the offsets, not the sizes. ParallelRunner pr(c); std::vector<hipStream_t> streams(num_axes); int32_t num_jobs = dim0 * 2; // note: this formula is not a heuristic; it's // how TaskRedirect works.. Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs); auto task_redirects_acc = task_redirects.Accessor(); for (int32_t axis = 0; axis < num_axes; axis++) { streams[axis] = pr.NewStream(); With w(streams[axis]); const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis); TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis); GetTaskRedirect(c, dim0, new_offsets_ptr, task_redirect_ptr); } for (int32_t axis = 0; axis < num_axes - 1; axis++) { { int32_t *this_new_row_splits = ans.RowSplits(axis).Data(); const int32_t *this_old_row_splits = src.RowSplits(axis).Data(); auto lambda_set_row_splits = [=] __host__ __device__( int32_t new_idx, int32_t num_threads, int32_t thread_idx) -> void { // 0 <= new_idx < dim0; and 0 <= thread_idx < num_threads, // num_threads may have any value > 0 as far as this code is concerned. // // Reminder of how row_splits work dimensionally: they are a map // from, e.g. an idx0 to an idx01. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. // The locations in the row_splits array are as given by // the `axis`'th row of `offsets`; the values in the array // are related to those in the `axis+1`'th row. int32_t old_idx = new2old_data[new_idx], this_old_offset = old_offsets_acc(axis, old_idx), next_old_offset = old_offsets_acc(axis, old_idx + 1), this_new_offset = new_offsets_acc(axis, old_idx), num_rows = next_old_offset - this_old_offset, value_offset = new_offsets_acc(axis + 1, new_idx) - old_offsets_acc(axis + 1, old_idx); // Using <= instead of < below causes threads for different src_idx to // write a single overlapping value, but also ensures that the // terminating value is written. This only works because row_splits // vectors always start with 0, which is not necessarily the case // for row-ids. for (; thread_idx <= num_rows; thread_idx += num_threads) { this_new_row_splits[this_new_offset + thread_idx] = value_offset + this_old_row_splits[thread_idx]; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis], target_num_loops = (tot_work > 1000000 ? 4 : 2); // bool include_final_task = false; EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_splits); } { int32_t *this_new_row_ids = ans.RowIds(axis).Data(); const int32_t *this_old_row_ids = src.RowIds(axis).Data(); auto lambda_set_row_ids = [=] __host__ __device__( int32_t new_idx, int32_t num_threads, int32_t thread_idx) -> void { // 0 <= new_idx < dim0; and 0 <= thread_idx < num_threads, // num_threads may have any value > 0 as far as this code is concerned. // // Reminder of how row_ids work dimensionally: they are a map // from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. // The locations in the row_ids array are as given by // the `axis+1`'th row of `offsets`; the values in the array // are related to those in the `axis`'th row. int32_t old_idx = new2old_data[new_idx], this_old_offset = old_offsets_acc(axis + 1, old_idx), next_old_offset = old_offsets_acc(axis + 1, old_idx + 1), this_new_offset = new_offsets_acc(axis + 1, old_idx), num_rows = next_old_offset - this_old_offset, value_offset = new_offsets_acc(axis, new_idx) - old_offsets_acc(axis, old_idx); // Using <= instead of < below causes threads for different src_idx to // write a single overlapping value, but also ensures that the // terminating value is written. This only works because row_splits // vectors always start with 0, which is not necessarily the case // for row-ids. for (; thread_idx < num_rows; thread_idx += num_threads) { this_new_row_ids[this_new_offset + thread_idx] = value_offset + this_old_row_ids[thread_idx]; } // TODO: maybe remove this if I decide last value is not needed. if (new_idx == dim0 - 1 && thread_idx == num_rows) { int32_t next_value_offset = new_offsets_acc(axis, new_idx + 1) - old_offsets_acc(axis, old_idx + 1); this_new_row_ids[this_new_offset + thread_idx] = next_value_offset; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis], target_num_loops = (tot_work > 1000000 ? 4 : 2); EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_ids); } } #ifndef NDEBUG ans.Check(); #endif return ans; } Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) { K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); ContextPtr ctx = src[0]->Context(); Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1); int32_t *src_offsets_data = src_offsets.Data(); int32_t src_offsets_stride0 = src_offsets.ElemStride0(); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } for (int32_t axis = 0; axis <= num_axes_in; ++axis) { int32_t sum = 0; for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column src_offsets_data[axis * src_offsets_stride0 + i] = sum; if (i < num_srcs) { sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1)); } } } return src_offsets; } /* Extract meta-info from the shape (this will include populating any row_ids and row_splits that were not already populated). This is used inside algorithms when we need to transfer meta-info to GPU. @param [in] src Ragged shape that we're extracting meta-info from @param [out] row_splits This will be set to an array of size src.NumAxes()-1, containing pointers to the row_splits' Data() vectors. The array will be allocated on the same device as `src`. @param [out] row_ids This will be set to an array of size src.NumAxes()-1, containing pointers to the row_ids' Data() vectors. The array will be allocated on the same device as `src`. */ void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits, Array1<int32_t *> *row_ids) { int32_t axes = src.NumAxes(); K2_CHECK_GE(axes, 2); src.Populate(); std::vector<int32_t *> row_splits_ptrs(axes - 1); std::vector<int32_t *> row_ids_ptrs(axes - 1); for (int32_t i = 1; i != axes; ++i) { row_splits_ptrs[i - 1] = src.RowSplits(i).Data(); row_ids_ptrs[i - 1] = src.RowIds(i).Data(); } ContextPtr ctx = src.Context(); *row_splits = Array1<int32_t *>(ctx, row_splits_ptrs); *row_ids = Array1<int32_t *>(ctx, row_ids_ptrs); } /* Get some meta-info for an array of RaggedShape, and transfer them to the device that `src` is located on. Just same with `GetRowInfo` above, but for multiple RaggedShapes. @param [in] num_srcs Number of source arrays to process. @param [in] src Source arrays. All of them must have same num_axes and on the same device, but we just check this in debug mode. @param [in] row_splits Output array of row_splits pointers, will be of dimension num_axes-1 by num_src @param [in] row_splits Output array of row_splits pointers, will be of dimension num_axes-1 by num_src */ void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src, Array2<int32_t *> *row_splits, Array2<int32_t *> *row_ids) { K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); K2_CHECK_GE(num_axes_in, 2); ContextPtr ctx = src[0]->Context(); // check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); int32_t **splits_ptr_data = row_splits_ptrs.Data(); int32_t **ids_ptr_data = row_ids_ptrs.Data(); int32_t stride0 = row_splits_ptrs.ElemStride0(); K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0()); for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) { for (int32_t i = 0; i != num_srcs; ++i) { splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data(); ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data(); } } *row_splits = row_splits_ptrs.To(ctx); *row_ids = row_ids_ptrs.To(ctx); } RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src) { K2_CHECK_EQ(axis, 0) << "Append() with axis > 0 not yet supported"; K2_CHECK_GT(num_srcs, 0); int32_t num_axes = src[0]->NumAxes(); ContextPtr c = src[0]->Context(); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(num_axes, src[i]->NumAxes()); K2_CHECK(IsCompatible(*src[0], *src[i])); } // `offsets` will be on CPU for now. Array2<int32_t> offsets = GetOffsets(num_srcs, src); auto offsets_acc = offsets.Accessor(); std::vector<int32_t> tot_sizes_out(num_axes); for (int32_t axis = 0; axis < num_axes; ++axis) tot_sizes_out[axis] = offsets_acc(axis, num_srcs); RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data()); Array1<int32_t *> dest_row_splits, dest_row_ids; GetRowInfo(ans, &dest_row_splits, &dest_row_ids); Array2<int32_t *> src_row_splits, src_row_ids; GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids); if (c->GetDeviceType() != kCpu) offsets = offsets.To(c); int32_t **dest_row_splits_data = dest_row_splits.Data(), **dest_row_ids_data = dest_row_ids.Data(); auto src_row_splits_acc = src_row_splits.Accessor(), src_row_ids_acc = src_row_ids.Accessor(); offsets_acc = offsets.Accessor(); // on GPU now (if we're using one) ParallelRunner pr(c); std::vector<hipStream_t> streams(num_axes + 1); int32_t num_jobs = num_srcs * 2; // task_redirects is a device array (if using GPU). // We have `num_axes - 1` different sets of row_splits/row_ids to // populate but they have different sizes; the total number of distinct // sizes is `num_axes`. Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs); auto task_redirects_acc = task_redirects.Accessor(); // populate task_redirects (these allocate blocks of threads roughly // proportionally to the amount of data to process from this source. for (int32_t axis = 0; axis < num_axes; axis++) { streams[axis] = pr.NewStream(); const int32_t *offsets = &(offsets_acc(axis, 0)); GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis)); } for (int32_t axis = 0; axis < num_axes - 1; axis++) { // first set the row-splits. TaskRedirect *tr = &(task_redirects_acc(axis, 0)); int32_t **this_src_row_splits = &(src_row_splits_acc(axis, 0)), **this_src_row_ids = &(src_row_ids_acc(axis, 0)); int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(), *this_dest_row_ids = ans.RowIds(axis + 1).Data(); const int32_t *offsets_this_axis = &(offsets_acc(axis, 0)), *offsets_next_axis = &(offsets_acc(axis + 1, 0)); auto lambda_set_row_splits = [=] __host__ __device__( int32_t src_idx, int32_t num_threads, int32_t thread_idx) -> void { // Reminder of how row_splits work dimensionally: they are a map // from, e.g. an idx0 to an idx01. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. int32_t this_offset = offsets_this_axis[src_idx], next_offset = offsets_this_axis[src_idx + 1], this_value_offset = offsets_next_axis[src_idx], num_rows = next_offset - this_offset; int32_t *src_row_splits_ptr = this_src_row_splits[src_idx]; // Using <= instead of < below causes threads for different src_idx to // write a single overlapping value, but also ensures that the // terminating value is written. This only works because row_splits // vectors always start with 0, which is not necessarily the case // for row-ids. for (; thread_idx <= num_rows; thread_idx += num_threads) { this_dest_row_splits[this_offset + thread_idx] = this_value_offset + src_row_splits_ptr[thread_idx]; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis], target_num_loops = (tot_work > 1000000 ? 4 : 2); EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_splits); { // set the row-ids auto lambda_set_row_ids = [=] __host__ __device__( int32_t src_idx, int32_t num_threads, int32_t thread_idx) -> void { // Reminder of how row_ids work dimensionally: they are a map // from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. int32_t this_offset = offsets_next_axis[src_idx], next_offset = offsets_next_axis[src_idx + 1], this_value_offset = offsets_this_axis[src_idx], num_elems = next_offset - this_offset; int32_t *src_row_ids_ptr = this_src_row_ids[src_idx]; // We need to write the very last value at the end of all the // arrays; the last job (for src_idx == num_srcs - 1) does this // by adding 1 to num_srcs. We can't let them all write an // extra value, because unlike row_splits, row_ids vectors may not // start with 0 in general; so having 2 threads write that // value (the 1st of each; one past the last of each) would cause // indeterminacy. if (src_idx == num_srcs - 1) num_elems++; for (; thread_idx <= num_elems; thread_idx += num_threads) { this_dest_row_ids[this_offset + thread_idx] = this_value_offset + src_row_ids_ptr[thread_idx]; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1], target_num_loops = (tot_work > 1000000 ? 4 : 2); // bool include_final_task = false; EvalWithRedirect(streams[axis + 1], num_jobs, task_redirects_acc.Row(axis + 1), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_ids); } } return ans; } RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) { K2_CHECK_GT(src.NumAxes(), 2); K2_CHECK(axis >= 0 && axis < src.NumAxes()); // note, `axes` is of dim src.NumAxes() - 1. // Also note: axes_in[i] pertains to the relationship between // axes i and i+1 in the source. src.Populate(); const std::vector<RaggedShapeDim> &axes_in = src.Axes(); std::vector<RaggedShapeDim> axes_out(axes_in.size() - 1); int32_t axes_out_size = static_cast<int32_t>(axes_out.size()); for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i]; if (axis > 0 && axis + 1 < src.NumAxes()) { axes_out[axis - 1].row_ids = axes_in[axis - 1].row_ids[axes_in[axis].row_ids]; axes_out[axis - 1].row_splits = axes_in[axis].row_splits[axes_in[axis - 1].row_splits]; axes_out[axis - 1].cached_tot_size = axes_in[axis].cached_tot_size; } for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1]; return RaggedShape(axes_out); } // transpose axes 0 and 1. RaggedShape Transpose(RaggedShape &src) { K2_CHECK_GT(src.NumAxes(), 2); int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1); K2_CHECK_EQ(src_tot_size1 % src_dim0, 0) << "Transpose(): all dims on axis 0 must be the same."; int32_t src_dim1 = src_tot_size1 / src_dim0; RaggedShape src_no_axis0 = RemoveAxis(src, 0); K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1); ContextPtr c = src.Context(); // `renumbering` is a `new2old` map, that maps from the first index in // src_no_axis0_renumbered // to the first index into src_no_axis0. Array1<int32_t> renumbering(c, src_tot_size1); int32_t *renumbering_data = renumbering.Data(); auto lambda_set_renumbering = [=] __host__ __device__(int32_t i) { int32_t j = i % src_dim1, k = i / src_dim1, i_old = j * src_dim0 + k; renumbering_data[i] = i_old; }; Eval(c, src_tot_size1, lambda_set_renumbering); RaggedShape src_no_axis0_renumbered = Renumber(src_no_axis0, renumbering); int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1, row_ids_dim = src_tot_size1; std::vector<RaggedShapeDim> ans_axis0(1); Array1<int32_t> mem(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); auto lambda_set_row_info = [=] __host__ __device__(int32_t i) { int32_t val; if (i >= row_splits_dim) { // row_ids int32_t elem_idx = i - row_splits_dim; val = elem_idx / src_dim0; } else { // row_splits int32_t row_idx = i; val = row_idx * src_dim0; } mem_data[i] = val; }; Eval(c, row_splits_dim + row_ids_dim, lambda_set_row_info); ans_axis0[0].row_splits = mem.Range(0, row_splits_dim); ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim); ans_axis0[0].cached_tot_size = row_ids_dim; RaggedShape temp(ans_axis0); return ComposeRaggedShapes(temp, src_no_axis0_renumbered); } RaggedShape Stack(int32_t axis, int32_t num_srcs, const RaggedShape **src) { K2_CHECK_GT(num_srcs, 0); K2_CHECK(axis >= 0 && axis <= 1); ContextPtr c = src[0]->Context(); int32_t num_axes = src[0]->NumAxes(); // Check if they have the same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(num_axes, src[i]->NumAxes()); K2_CHECK(c->IsCompatible(*src[i]->Context())); } std::vector<RaggedShape> unsqueezed(num_srcs); std::vector<RaggedShape *> unsqueezed_ptrs(num_srcs); { ParallelRunner pr(c); for (int32_t i = 0; i < num_srcs; i++) { With w(pr.NewStream()); unsqueezed[i] = Unsqueeze(*src[i], 0); unsqueezed_ptrs[i] = &unsqueezed[i]; } // destructor will wait for work in those launched streams to finish. // (well it won't actually wait, but it will force the current stream to // wait.) } RaggedShape ans = Append(0, num_srcs, unsqueezed_ptrs.data()); // Transpose will check if all src->Dim0() has the same value. if (axis == 1) ans = Transpose(ans); return ans; } RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) { // row_splits= [ Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems); int32_t *row_splits_data = row_splits.Data(); Array1<int32_t> row_ids(c, num_elems, 0); return RaggedShape2(&row_splits, &row_ids, num_elems); } } // namespace k2
dbf254ec30b8548e60e7fada2932eac0c9a8faf1.cu
/** * @brief * ragged * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey * Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <cub/cub.cuh> #include <vector> #include "k2/csrc/array_ops.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace { // will be used in RaggedShape::MaxSize(int32_t axis) to call // cub::DeviceReduce::Max struct RowSplitsDiff { const int32_t *row_splits_data; explicit RowSplitsDiff(const int32_t *row_splits) : row_splits_data(row_splits) {} // operator[] and operator+ are required by cub::DeviceReduce::Max __device__ int32_t operator[](int32_t i) const { return row_splits_data[i + 1] - row_splits_data[i]; } __device__ RowSplitsDiff operator+(int32_t n) const { RowSplitsDiff tmp(*this); tmp.row_splits_data += n; return tmp; } }; /* A helper function used in RaggedShape3; if both first and second are non-NULL, it will check if the context of them is compatible or not and return that context if compatible; if one of them is NULL, returns the other one's context. */ static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first, const k2::Array1<int32_t> *second) { K2_CHECK(first != nullptr || second != nullptr) << "At least one of first and second must be non-NULL"; if (first == nullptr) return second->Context(); else if (second == nullptr) return first->Context(); else return k2::GetContext(*first, *second); } } // namespace namespace std { // vaule_type is required by cub::DeviceReduce::Max template <> struct iterator_traits<::RowSplitsDiff> { typedef int32_t value_type; }; } // namespace std namespace k2 { RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes, int32_t max_num_axes, int32_t min_num_elements, int32_t max_num_elements) { ContextPtr c = GetCpuContext(); K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes && min_num_elements >= 0 && max_num_elements >= min_num_elements); int32_t num_axes = RandInt(min_num_axes, max_num_axes); int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements); bool done_repeats = false; std::vector<RaggedShapeDim> axes(num_axes - 1); for (int32_t axis = num_axes - 2; axis >= 0; axis--) { // this axis will have row_ids of length num_elements and // row_splits of length to be determined. int32_t cur_row_split = 0; std::vector<int32_t> row_splits_vec; std::vector<int32_t> row_ids_vec; row_splits_vec.push_back(cur_row_split); // The reason for "|| RandInt(0, 2) == 0)" is so that even if there // are no elements we can still potentially generate empty row-splits. while (cur_row_split < num_elements || RandInt(0, 2) == 0) { int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split); cur_row_split += split_size; // sometimes we have a bunch of empty rows in a row (this will test out // more of the code), so here we generate a bunch of empty rows, but we // just do this only once (that's why we declare `done_repeats` here). if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) { int32_t num_repeats = RandIntGeometric(1, 128); row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split); // don't need to set `row_ids_vec` as there's no element. done_repeats = true; } row_splits_vec.push_back(cur_row_split); if (set_row_ids) { int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2; row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row); } } axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec); if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec); axes[axis].cached_tot_size = num_elements; num_elements = axes[axis].row_splits.Dim() - 1; } // RaggedShape(axes, true) will check the returned RaggedShape for // consistency. return RaggedShape(axes, true); } // Recursive function that prints (part of) a ragged shape. // 0 <= begin_pos <= end_pos < shape.TotSize(axis). void PrintRaggedShapePart(std::ostream &stream, RaggedShape &shape, int32_t axis, int32_t begin_pos, int32_t end_pos) { K2_CHECK(axis >= 0 && axis < shape.NumAxes() && begin_pos >= 0 && begin_pos <= end_pos && end_pos <= shape.TotSize(axis)); for (int32_t d = begin_pos; d < end_pos; ++d) { if (axis == shape.NumAxes() - 1) { stream << d << " "; } else { stream << "[ "; const int32_t *row_splits = shape.RowSplits(axis + 1).Data(); K2_DCHECK(d < shape.RowSplits(axis + 1).Dim()); int32_t row_start = row_splits[d], row_end = row_splits[d + 1]; PrintRaggedShapePart(stream, shape, axis + 1, row_start, row_end); stream << "] "; } } } // prints a RaggedShape as e.g. [ [ 0 1 ] [ 2 ] [] ]. Note, the 'values' // are just the positions in the array, this is for readability. std::ostream &operator<<(std::ostream &stream, RaggedShape &shape) { if (shape.Context()->GetDeviceType() != kCpu) { return stream << shape.To(GetCpuContext()); } else { stream << "[ "; PrintRaggedShapePart(stream, shape, 0, 0, shape.Dim0()); stream << "]"; return stream; } } Array1<int32_t> &RaggedShape::RowIds(int32_t axis) { K2_CHECK_GT(axis, 0); K2_CHECK_LT(axis, NumAxes()); RaggedShapeDim &rsd = axes_[axis - 1]; auto &row_splits = rsd.row_splits; auto &row_ids = rsd.row_ids; // there must be row_splits.Dim() >=1 according to the definition of // RaggedShapeDim. K2_CHECK_GE(row_splits.Dim(), 1); if (row_splits.Dim() != 1 && row_ids.Dim() == 0) { // create row_ids as it does not exist row_ids = Array1<int32_t>(Context(), row_splits[row_splits.Dim() - 1]); const int32_t *row_splits_data = row_splits.Data(); int32_t *row_ids_data = row_ids.Data(); RowSplitsToRowIds(Context(), row_splits.Dim() - 1, row_splits_data, row_ids.Dim(), row_ids_data); // set cached_tot_size rsd.cached_tot_size = row_ids.Dim(); } return row_ids; } int32_t RaggedShape::MaxSize(int32_t axis) { K2_CHECK_GT(axis, 0); K2_CHECK_LT(axis, NumAxes()); const auto &row_splits = axes_[axis - 1].row_splits; const int32_t num_rows = row_splits.Dim() - 1; if (num_rows == 0) return 0; const int32_t *row_splits_data = row_splits.Data(); ContextPtr c = Context(); if (c->GetDeviceType() == kCpu) { int32_t max_value = 0; for (int32_t i = 0; i < num_rows; ++i) { int32_t value = row_splits_data[i + 1] - row_splits_data[i]; if (value > max_value) max_value = value; } return max_value; } else { K2_CHECK_EQ(c->GetDeviceType(), kCuda); ::RowSplitsDiff row_splits_diff(row_splits_data); Array1<int32_t> max_array(Context(), 1, 0); int32_t *max_value = max_array.Data(); void *d_temp_storage = nullptr; size_t temp_storage_bytes = 0; // the first time is to determine temporary device storage requirements K2_CHECK_CUDA_ERROR(cub::DeviceReduce::Max( d_temp_storage, temp_storage_bytes, row_splits_diff, max_value, num_rows, c->GetCudaStream())); void *deleter_context; d_temp_storage = c->Allocate(temp_storage_bytes, &deleter_context); K2_CHECK_CUDA_ERROR(cub::DeviceReduce::Max( d_temp_storage, temp_storage_bytes, row_splits_diff, max_value, num_rows, c->GetCudaStream())); c->Deallocate(d_temp_storage, deleter_context); // this will convert to memory on CPU return max_array[0]; } } RaggedShape RaggedShape::Index(int32_t axis, int32_t i) { // only support `axis == 0` for now K2_CHECK_EQ(axis, 0); K2_CHECK_GE(i, 0); int32_t num_axes = NumAxes(); K2_CHECK_GE(num_axes, 2); const auto &src_axes = Axes(); K2_CHECK_LT(i + 1, src_axes[0].row_splits.Dim()); int32_t idx = src_axes[0].row_splits[i]; int32_t idx_next = src_axes[0].row_splits[i + 1]; std::vector<RaggedShapeDim> axes(src_axes.size() - 1); ContextPtr c = Context(); for (int32_t i = 2; i < num_axes; ++i) { const Array1<int32_t> &src_row_splits = src_axes[i - 1].row_splits; int32_t num_rows = idx_next - idx; int32_t offset = idx; idx = src_row_splits[idx]; idx_next = src_row_splits[idx_next]; // allocate new memory here as we need to change the values, // i.e. subtracts the offset. axes[i - 2].row_splits = Array1<int32_t>(c, num_rows + 1); int32_t *data = axes[i - 2].row_splits.Data(); const int32_t *src_data = src_row_splits.Data(); auto lambda_set_values = [=] __host__ __device__(int32_t i) -> void { data[i] = src_data[i + offset] - idx; }; Eval(c, num_rows + 1, lambda_set_values); // leave row_ids and cached_tot_size unset axes[i - 2].cached_tot_size = -1; } RaggedShape shape(axes, true); return shape; } void RaggedShape::Populate() { int32_t num_axes = NumAxes(); for (int32_t i = 1; i < num_axes; ++i) { // ignore return values of the following calls. this->TotSize(i); this->RowIds(i); } } RaggedShape RaggedShape::To(ContextPtr ctx) const { if (ctx->IsCompatible(*Context())) return *this; std::vector<RaggedShapeDim> axes(axes_.size()); int32_t num_axes = NumAxes(); for (int32_t i = 1; i < num_axes; ++i) { axes[i - 1].row_splits = axes_[i - 1].row_splits.To(ctx); // leave row_ids and cached_tot_size unset axes[i - 1].cached_tot_size = -1; } return RaggedShape(axes); } RaggedShapeIndexIterator RaggedShape::Iterator() { return RaggedShapeIndexIterator(*this); } int32_t RaggedShape::operator[](const std::vector<int32_t> &indexes) { K2_CHECK(indexes.size() == NumAxes()); K2_CHECK(Context()->GetDeviceType() == kCpu); int32_t cur_idx = indexes[0]; for (int32_t i = 1; i < NumAxes(); i++) { Array1<int32_t> &row_splits = axes_[i - 1].row_splits; K2_CHECK(cur_idx >= 0 && cur_idx + 1 < row_splits.Dim()); cur_idx = row_splits[cur_idx]; cur_idx += indexes[i]; } return cur_idx; } int32_t RaggedShape::TotSize(int32_t axis) const { K2_CHECK_GE(axis, 0); K2_CHECK_LT(axis, NumAxes()); if (axis == 0) return Dim0(); else { const RaggedShapeDim &rsd = axes_[axis - 1]; if (rsd.cached_tot_size >= 0) { return rsd.cached_tot_size; } else { // if we had row_ids set up, we should have set cached_tot_size. K2_CHECK_EQ(rsd.row_ids.Dim(), 0); K2_CHECK_GT(rsd.row_splits.Dim(), 0); const_cast<RaggedShapeDim &>(rsd).cached_tot_size = rsd.row_splits.Back(); return rsd.cached_tot_size; } } } void RaggedShape::Check() { ContextPtr c = Context(); int32_t num_axes = axes_.size(); for (int32_t axis = 0; axis < num_axes; ++axis) { RaggedShapeDim &rsd = axes_[axis]; K2_CHECK_GE(rsd.row_splits.Dim(), 0); if (rsd.cached_tot_size >= 0) { K2_CHECK(rsd.row_splits.Dim() == 0 || rsd.cached_tot_size == rsd.row_splits.Back()); K2_CHECK(rsd.row_ids.Dim() == 0 || rsd.cached_tot_size == rsd.row_ids.Dim()); } else { K2_CHECK_EQ(rsd.cached_tot_size, -1); K2_CHECK_EQ(rsd.row_ids.Dim(), 0); } int32_t num_elems; // Check row_splits. { // meta[0] is a bool, ok == 1, not-ok == 0. // meta[1] will contain the number of row_splits. Array1<int32_t> meta(c, 2, 1); int32_t *ok_data = meta.Data(), *num_elems_data = ok_data + 1; const int32_t *row_splits_data = rsd.row_splits.Data(); int32_t num_rows = rsd.row_splits.Dim() - 1; auto lambda_check_row_splits = [=] __host__ __device__(int32_t i) -> void { int32_t this_idx = row_splits_data[i]; if (i == 0 && this_idx != 0) *ok_data = 0; if (i < num_rows) { int32_t next_idx = row_splits_data[i + 1]; if (next_idx < this_idx) *ok_data = 0; } else { K2_CHECK(i == num_rows); *num_elems_data = this_idx; } }; Eval(c, num_rows + 1, lambda_check_row_splits); meta = meta.To(GetCpuContext()); num_elems = meta[1]; int32_t ok = meta[0]; if (!ok) { K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis << "], row_splits = " << rsd.row_splits; } if (rsd.cached_tot_size > 0 && rsd.cached_tot_size != num_elems) { K2_LOG(FATAL) << "Problem validating row-splits: for axes_[" << axis << "], row_splits[-1] = " << num_elems << " but cached_tot_size == " << rsd.cached_tot_size; } } if (axis + 1 < num_axes) { int32_t next_num_rows = axes_[axis + 1].row_splits.Dim() - 1; if (num_elems != next_num_rows) { K2_LOG(FATAL) << "Ragged shape has num_elems for axes_[" << axis << "] == " << num_elems << " and num-rows for axes_[" << (axis + 1) << "] == " << next_num_rows; } } if (rsd.row_ids.Dim() != 0) { // check row_ids. K2_CHECK(IsCompatible(rsd.row_ids, rsd.row_splits)); // 1st elem is `ok` (1 or 0); 2nd elem is location of bad index // into row_splits Array1<int32_t> meta(c, 2, 1); int32_t *ok_data = meta.Data(), *bad_index_data = ok_data + 1; const int32_t *row_splits_data = rsd.row_splits.Data(), *row_ids_data = rsd.row_ids.Data(); int32_t num_elems_from_row_ids = rsd.row_ids.Dim(), num_rows = rsd.row_splits.Dim() - 1; K2_CHECK_EQ(num_elems, num_elems_from_row_ids); auto lambda_check_row_ids = [=] __host__ __device__(int32_t i) -> void { int32_t this_row = row_ids_data[i]; if (this_row < 0 || this_row >= num_rows || i < row_splits_data[this_row] || i >= row_splits_data[this_row + 1]) { *ok_data = 0; *bad_index_data = i; } }; // TODO: could do this and the other one in separate streams. Eval(c, num_elems, lambda_check_row_ids); meta = meta.To(GetCpuContext()); // since we have 2 accesses, this should // be faster. int32_t ok = meta[0]; if (!ok) { K2_LOG(FATAL) << "Problem validating row-ids: for axes_[" << axis << "], row_splits = " << rsd.row_splits << ", row_ids = " << rsd.row_ids << ", see index " << meta[1] << " of row_ids, whose dim is " << rsd.row_ids.Dim(); } } if (axis + 1 < axes_.size()) { K2_CHECK(IsCompatible(rsd.row_splits, axes_[axis + 1].row_splits)); } } } RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids, int32_t cached_tot_size) { K2_CHECK(row_splits != nullptr || row_ids != nullptr) << "At least one of row_splits and row_ids must be defined"; ContextPtr ctx = ::GetContext(row_splits, row_ids); if (cached_tot_size != -1) { if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim()); if (row_splits != nullptr) { // may be slow as it may copy memory from device to host K2_CHECK_EQ(cached_tot_size, row_splits->Back()); } } std::vector<RaggedShapeDim> axes(1); if (row_splits != nullptr) { axes[0].row_splits = *row_splits; } else { // we need to work out row_splits as we always require row_splits is not // empty for RaggedShape. Note here we suppose the last element in row_ids // is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`. int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1; Array1<int32_t> row_splits_array(ctx, num_rows + 1); RowIdsToRowSplits(*row_ids, row_splits_array); axes[0].row_splits = row_splits_array; } if (row_ids != nullptr) axes[0].row_ids = *row_ids; axes[0].cached_tot_size = cached_tot_size; // note below line will check if row_splits and row_ids are valid and agree // with each other. return RaggedShape(axes); } RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) { if (a.NumElements() != b.Dim0()) { K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements() << " vs. " << b.Dim0(); } const auto &a_axes = a.Axes(); const auto &b_axes = b.Axes(); std::vector<RaggedShapeDim> axes(a_axes.size() + b_axes.size()); std::size_t a_size = a_axes.size(), b_size = b_axes.size(); for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i]; for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i]; return RaggedShape(axes); } RaggedShape RaggedShape3(Array1<int32_t> *row_splits1, Array1<int32_t> *row_ids1, int32_t cached_tot_size1, Array1<int32_t> *row_splits2, Array1<int32_t> *row_ids2, int32_t cached_tot_size2) { K2_CHECK(row_splits1 != nullptr || row_ids1 != nullptr) << "At least one of row_splits1 and row_ids1 must be defined"; K2_CHECK(row_splits2 != nullptr || row_ids2 != nullptr) << "At least one of row_splits2 and row_ids2 must be defined"; // check context ContextPtr ctx1 = ::GetContext(row_splits1, row_ids1); ContextPtr ctx2 = ::GetContext(row_splits2, row_ids2); K2_CHECK(ctx1->IsCompatible(*ctx2)); // check row_splits and row_ids of axis-1 if (cached_tot_size1 != -1) { if (row_ids1 != nullptr) K2_CHECK_EQ(cached_tot_size1, row_ids1->Dim()); if (row_splits1 != nullptr) { // may be slow as it may copy memory from device to host K2_CHECK_EQ(cached_tot_size1, row_splits1->Back()); } } // check row_splits and row_ids of axis-2 if (cached_tot_size2 != -1) { if (row_ids2 != nullptr) K2_CHECK_EQ(cached_tot_size2, row_ids2->Dim()); if (row_splits2 != nullptr) { // may be slow as it may copy memory from device to host K2_CHECK_EQ(cached_tot_size2, row_splits2->Back()); } } std::vector<RaggedShapeDim> axes(2); // set row_splits and row_ids for axis 1 if (row_splits1 != nullptr) { axes[0].row_splits = *row_splits1; } else { // work out row_splits1, see code in RaggedShape2 above for the reason int32_t num_rows = row_ids1->Dim() == 0 ? 0 : row_ids1->Back() + 1; Array1<int32_t> row_splits_array(ctx1, num_rows + 1); RowIdsToRowSplits(*row_ids1, row_splits_array); axes[0].row_splits = row_splits_array; } if (row_ids1 != nullptr) axes[0].row_ids = *row_ids1; axes[0].cached_tot_size = cached_tot_size1; // set row_splits and row_ids for axis 2 if (row_splits2 != nullptr) { axes[1].row_splits = *row_splits1; } else { // work out row_splits1, see code in RaggedShape2 above for the reason int32_t num_rows = row_ids2->Dim() == 0 ? 0 : row_ids2->Back() + 1; Array1<int32_t> row_splits_array(ctx1, num_rows + 1); RowIdsToRowSplits(*row_ids2, row_splits_array); axes[1].row_splits = row_splits_array; } if (row_ids2 != nullptr) axes[1].row_ids = *row_ids2; axes[1].cached_tot_size = cached_tot_size2; // we don't check here if // row_splits1[row_splits1.Dim() - 1] == row_ids1.Dim() // == (row_splits2.Dim() - 1) // >= (row_ids2[row_ids2.Dim() - 1] + 1) // but RaggedShape(axes) below will check this. return RaggedShape(axes); } RaggedShape RaggedShapeFromTotSizes(ContextPtr &c, int32_t num_axes, int32_t *tot_sizes) { K2_CHECK_GE(num_axes, 2); std::vector<RaggedShapeDim> axes(num_axes - 1); // In future we might choose to allocate everything in one big array, to avoid // multiple allocations, but for now just do it the simple way. for (int32_t axis = 1; axis < num_axes; ++axis) { axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1); axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]); axes[axis - 1].cached_tot_size = tot_sizes[axis]; } return RaggedShape(axes); } Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) { int32_t axes = src.NumAxes(); K2_CHECK_GE(axes, 2); std::vector<int32_t *> row_splits_start(axes - 1); for (int32_t i = 1; i != axes; ++i) { Array1<int32_t> &cur_splits = src.RowSplits(i); row_splits_start[i - 1] = cur_splits.Data(); } return Array1<int32_t *>(src.Context(), row_splits_start); } // See declaration in ragged.h for documentation of its purpose and interface. RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) { // If axis == 0, initial row_splits and row_ids will look like the following, // if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes // would be pushed forward. // // If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would // look like the following, if for instance the src.TotSize(axis-1) = 8: // [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ]. // // The reason why the code is different for axis == 0, is that in that case we // are really making visible an "implicit" axis of the input `src`; we could // call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's // row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from // an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 < // Dim0(). ContextPtr c = src.Context(); K2_CHECK(axis >= 0 && axis <= src.NumAxes()); const std::vector<RaggedShapeDim> &axes_in = src.Axes(); int32_t num_axes_in = src.NumAxes(); // Note: in RaggedShape, the vector of RaggedShapeDim is of length // num_axes - 1, so the output will have one more axis than the input. std::vector<RaggedShapeDim> axes_out(num_axes_in); int32_t row_splits_dim, row_ids_dim; Array1<int32_t> mem; if (axis == 0) { row_splits_dim = 2; // e.g. [ 0 5 ] row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ] mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); auto lambda_set_mem = [=] __host__ __device__(int32_t i) -> void { if (i == 1) mem_data[i] = row_ids_dim; else mem_data[i] = 0; }; Eval(c, mem.Dim(), lambda_set_mem); } else { int32_t tot_size = src.TotSize(axis - 1); row_splits_dim = tot_size + 1; row_ids_dim = tot_size; mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); auto lambda_set_mem2 = [=] __host__ __device__(int32_t i) -> void { mem_data[i] = i % (tot_size + 1); }; Eval(c, mem.Dim(), lambda_set_mem2); } axes_out[axis].row_splits = mem.Range(0, row_splits_dim); axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim); for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i]; // Note: the returned array has `num_axes_in + 1` axes, so its // array of RaggedShapeDim is of length `num_axes_in`. for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1]; return RaggedShape(axes_out); } RaggedShape Renumber(RaggedShape &src, const Array1<int32_t> &new2old) { ContextPtr c = src.Context(); K2_CHECK(IsCompatible(src, new2old)); int32_t num_axes = src.NumAxes(), dim0 = src.Dim0(); K2_CHECK_EQ(new2old.Dim(), dim0); std::vector<int32_t> tot_sizes_out(num_axes); for (int32_t axis = 0; axis < num_axes; axis++) tot_sizes_out[axis] = src.TotSize(axis); // the arrays in `ans` will be the same sizes as those in `src`. RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data()); src.Populate(); Array2<int32_t> old_offsets(c, num_axes, dim0 + 1), new_offsets(c, num_axes, dim0 + 1); auto old_offsets_acc = old_offsets.Accessor(), new_offsets_acc = new_offsets.Accessor(); Array1<int32_t *> row_splits_ptrs = GetRowSplitsPtr(src); int32_t **row_splits_ptrs_data = row_splits_ptrs.Data(); // Set old_offsets auto lambda_get_old_offsets = [=] __host__ __device__(int32_t i) { // 0 <= i <= dim0 int32_t cur_offset = i; for (int32_t axis = 0; axis < num_axes; axis++) { old_offsets_acc(0, i) = cur_offset; if (axis + 1 == num_axes) return; cur_offset = row_splits_ptrs_data[axis][cur_offset]; } }; Eval(c, dim0 + 1, lambda_get_old_offsets); const int32_t *new2old_data = new2old.Data(); auto lambda_get_new_offsets = [=] __host__ __device__(int32_t axis, int32_t new_i) { // 0 <= axis < num_axes; 0 <= new_i < dim0 int32_t old_i = new2old_data[new_i], this_old_offset = old_offsets_acc(axis, old_i), next_old_offset = old_offsets_acc(axis, old_i + 1), size = next_old_offset - this_old_offset; new_offsets_acc(axis, new_i) = size; }; Eval2(c, num_axes, dim0, lambda_get_new_offsets); ExclusiveSum(new_offsets, &new_offsets); // Now new_offsets contains the offsets, not the sizes. ParallelRunner pr(c); std::vector<cudaStream_t> streams(num_axes); int32_t num_jobs = dim0 * 2; // note: this formula is not a heuristic; it's // how TaskRedirect works.. Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs); auto task_redirects_acc = task_redirects.Accessor(); for (int32_t axis = 0; axis < num_axes; axis++) { streams[axis] = pr.NewStream(); With w(streams[axis]); const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis); TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis); GetTaskRedirect(c, dim0, new_offsets_ptr, task_redirect_ptr); } for (int32_t axis = 0; axis < num_axes - 1; axis++) { { int32_t *this_new_row_splits = ans.RowSplits(axis).Data(); const int32_t *this_old_row_splits = src.RowSplits(axis).Data(); auto lambda_set_row_splits = [=] __host__ __device__( int32_t new_idx, int32_t num_threads, int32_t thread_idx) -> void { // 0 <= new_idx < dim0; and 0 <= thread_idx < num_threads, // num_threads may have any value > 0 as far as this code is concerned. // // Reminder of how row_splits work dimensionally: they are a map // from, e.g. an idx0 to an idx01. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. // The locations in the row_splits array are as given by // the `axis`'th row of `offsets`; the values in the array // are related to those in the `axis+1`'th row. int32_t old_idx = new2old_data[new_idx], this_old_offset = old_offsets_acc(axis, old_idx), next_old_offset = old_offsets_acc(axis, old_idx + 1), this_new_offset = new_offsets_acc(axis, old_idx), num_rows = next_old_offset - this_old_offset, value_offset = new_offsets_acc(axis + 1, new_idx) - old_offsets_acc(axis + 1, old_idx); // Using <= instead of < below causes threads for different src_idx to // write a single overlapping value, but also ensures that the // terminating value is written. This only works because row_splits // vectors always start with 0, which is not necessarily the case // for row-ids. for (; thread_idx <= num_rows; thread_idx += num_threads) { this_new_row_splits[this_new_offset + thread_idx] = value_offset + this_old_row_splits[thread_idx]; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis], target_num_loops = (tot_work > 1000000 ? 4 : 2); // bool include_final_task = false; EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_splits); } { int32_t *this_new_row_ids = ans.RowIds(axis).Data(); const int32_t *this_old_row_ids = src.RowIds(axis).Data(); auto lambda_set_row_ids = [=] __host__ __device__( int32_t new_idx, int32_t num_threads, int32_t thread_idx) -> void { // 0 <= new_idx < dim0; and 0 <= thread_idx < num_threads, // num_threads may have any value > 0 as far as this code is concerned. // // Reminder of how row_ids work dimensionally: they are a map // from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. // The locations in the row_ids array are as given by // the `axis+1`'th row of `offsets`; the values in the array // are related to those in the `axis`'th row. int32_t old_idx = new2old_data[new_idx], this_old_offset = old_offsets_acc(axis + 1, old_idx), next_old_offset = old_offsets_acc(axis + 1, old_idx + 1), this_new_offset = new_offsets_acc(axis + 1, old_idx), num_rows = next_old_offset - this_old_offset, value_offset = new_offsets_acc(axis, new_idx) - old_offsets_acc(axis, old_idx); // Using <= instead of < below causes threads for different src_idx to // write a single overlapping value, but also ensures that the // terminating value is written. This only works because row_splits // vectors always start with 0, which is not necessarily the case // for row-ids. for (; thread_idx < num_rows; thread_idx += num_threads) { this_new_row_ids[this_new_offset + thread_idx] = value_offset + this_old_row_ids[thread_idx]; } // TODO: maybe remove this if I decide last value is not needed. if (new_idx == dim0 - 1 && thread_idx == num_rows) { int32_t next_value_offset = new_offsets_acc(axis, new_idx + 1) - old_offsets_acc(axis, old_idx + 1); this_new_row_ids[this_new_offset + thread_idx] = next_value_offset; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis], target_num_loops = (tot_work > 1000000 ? 4 : 2); EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_ids); } } #ifndef NDEBUG ans.Check(); #endif return ans; } Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) { K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); ContextPtr ctx = src[0]->Context(); Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1); int32_t *src_offsets_data = src_offsets.Data(); int32_t src_offsets_stride0 = src_offsets.ElemStride0(); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } for (int32_t axis = 0; axis <= num_axes_in; ++axis) { int32_t sum = 0; for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column src_offsets_data[axis * src_offsets_stride0 + i] = sum; if (i < num_srcs) { sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1)); } } } return src_offsets; } /* Extract meta-info from the shape (this will include populating any row_ids and row_splits that were not already populated). This is used inside algorithms when we need to transfer meta-info to GPU. @param [in] src Ragged shape that we're extracting meta-info from @param [out] row_splits This will be set to an array of size src.NumAxes()-1, containing pointers to the row_splits' Data() vectors. The array will be allocated on the same device as `src`. @param [out] row_ids This will be set to an array of size src.NumAxes()-1, containing pointers to the row_ids' Data() vectors. The array will be allocated on the same device as `src`. */ void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits, Array1<int32_t *> *row_ids) { int32_t axes = src.NumAxes(); K2_CHECK_GE(axes, 2); src.Populate(); std::vector<int32_t *> row_splits_ptrs(axes - 1); std::vector<int32_t *> row_ids_ptrs(axes - 1); for (int32_t i = 1; i != axes; ++i) { row_splits_ptrs[i - 1] = src.RowSplits(i).Data(); row_ids_ptrs[i - 1] = src.RowIds(i).Data(); } ContextPtr ctx = src.Context(); *row_splits = Array1<int32_t *>(ctx, row_splits_ptrs); *row_ids = Array1<int32_t *>(ctx, row_ids_ptrs); } /* Get some meta-info for an array of RaggedShape, and transfer them to the device that `src` is located on. Just same with `GetRowInfo` above, but for multiple RaggedShapes. @param [in] num_srcs Number of source arrays to process. @param [in] src Source arrays. All of them must have same num_axes and on the same device, but we just check this in debug mode. @param [in] row_splits Output array of row_splits pointers, will be of dimension num_axes-1 by num_src @param [in] row_splits Output array of row_splits pointers, will be of dimension num_axes-1 by num_src */ void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src, Array2<int32_t *> *row_splits, Array2<int32_t *> *row_ids) { K2_CHECK_GT(num_srcs, 0); int32_t num_axes_in = src[0]->NumAxes(); K2_CHECK_GE(num_axes_in, 2); ContextPtr ctx = src[0]->Context(); // check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in); K2_CHECK(ctx->IsCompatible(*src[i]->Context())); } Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs); int32_t **splits_ptr_data = row_splits_ptrs.Data(); int32_t **ids_ptr_data = row_ids_ptrs.Data(); int32_t stride0 = row_splits_ptrs.ElemStride0(); K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0()); for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) { for (int32_t i = 0; i != num_srcs; ++i) { splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data(); ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data(); } } *row_splits = row_splits_ptrs.To(ctx); *row_ids = row_ids_ptrs.To(ctx); } RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src) { K2_CHECK_EQ(axis, 0) << "Append() with axis > 0 not yet supported"; K2_CHECK_GT(num_srcs, 0); int32_t num_axes = src[0]->NumAxes(); ContextPtr c = src[0]->Context(); // Check if they have same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(num_axes, src[i]->NumAxes()); K2_CHECK(IsCompatible(*src[0], *src[i])); } // `offsets` will be on CPU for now. Array2<int32_t> offsets = GetOffsets(num_srcs, src); auto offsets_acc = offsets.Accessor(); std::vector<int32_t> tot_sizes_out(num_axes); for (int32_t axis = 0; axis < num_axes; ++axis) tot_sizes_out[axis] = offsets_acc(axis, num_srcs); RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.data()); Array1<int32_t *> dest_row_splits, dest_row_ids; GetRowInfo(ans, &dest_row_splits, &dest_row_ids); Array2<int32_t *> src_row_splits, src_row_ids; GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids); if (c->GetDeviceType() != kCpu) offsets = offsets.To(c); int32_t **dest_row_splits_data = dest_row_splits.Data(), **dest_row_ids_data = dest_row_ids.Data(); auto src_row_splits_acc = src_row_splits.Accessor(), src_row_ids_acc = src_row_ids.Accessor(); offsets_acc = offsets.Accessor(); // on GPU now (if we're using one) ParallelRunner pr(c); std::vector<cudaStream_t> streams(num_axes + 1); int32_t num_jobs = num_srcs * 2; // task_redirects is a device array (if using GPU). // We have `num_axes - 1` different sets of row_splits/row_ids to // populate but they have different sizes; the total number of distinct // sizes is `num_axes`. Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs); auto task_redirects_acc = task_redirects.Accessor(); // populate task_redirects (these allocate blocks of threads roughly // proportionally to the amount of data to process from this source. for (int32_t axis = 0; axis < num_axes; axis++) { streams[axis] = pr.NewStream(); const int32_t *offsets = &(offsets_acc(axis, 0)); GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis)); } for (int32_t axis = 0; axis < num_axes - 1; axis++) { // first set the row-splits. TaskRedirect *tr = &(task_redirects_acc(axis, 0)); int32_t **this_src_row_splits = &(src_row_splits_acc(axis, 0)), **this_src_row_ids = &(src_row_ids_acc(axis, 0)); int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(), *this_dest_row_ids = ans.RowIds(axis + 1).Data(); const int32_t *offsets_this_axis = &(offsets_acc(axis, 0)), *offsets_next_axis = &(offsets_acc(axis + 1, 0)); auto lambda_set_row_splits = [=] __host__ __device__( int32_t src_idx, int32_t num_threads, int32_t thread_idx) -> void { // Reminder of how row_splits work dimensionally: they are a map // from, e.g. an idx0 to an idx01. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. int32_t this_offset = offsets_this_axis[src_idx], next_offset = offsets_this_axis[src_idx + 1], this_value_offset = offsets_next_axis[src_idx], num_rows = next_offset - this_offset; int32_t *src_row_splits_ptr = this_src_row_splits[src_idx]; // Using <= instead of < below causes threads for different src_idx to // write a single overlapping value, but also ensures that the // terminating value is written. This only works because row_splits // vectors always start with 0, which is not necessarily the case // for row-ids. for (; thread_idx <= num_rows; thread_idx += num_threads) { this_dest_row_splits[this_offset + thread_idx] = this_value_offset + src_row_splits_ptr[thread_idx]; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis], target_num_loops = (tot_work > 1000000 ? 4 : 2); EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_splits); { // set the row-ids auto lambda_set_row_ids = [=] __host__ __device__( int32_t src_idx, int32_t num_threads, int32_t thread_idx) -> void { // Reminder of how row_ids work dimensionally: they are a map // from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is // dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on. int32_t this_offset = offsets_next_axis[src_idx], next_offset = offsets_next_axis[src_idx + 1], this_value_offset = offsets_this_axis[src_idx], num_elems = next_offset - this_offset; int32_t *src_row_ids_ptr = this_src_row_ids[src_idx]; // We need to write the very last value at the end of all the // arrays; the last job (for src_idx == num_srcs - 1) does this // by adding 1 to num_srcs. We can't let them all write an // extra value, because unlike row_splits, row_ids vectors may not // start with 0 in general; so having 2 threads write that // value (the 1st of each; one past the last of each) would cause // indeterminacy. if (src_idx == num_srcs - 1) num_elems++; for (; thread_idx <= num_elems; thread_idx += num_threads) { this_dest_row_ids[this_offset + thread_idx] = this_value_offset + src_row_ids_ptr[thread_idx]; } }; int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1], target_num_loops = (tot_work > 1000000 ? 4 : 2); // bool include_final_task = false; EvalWithRedirect(streams[axis + 1], num_jobs, task_redirects_acc.Row(axis + 1), min_threads_per_job, tot_work, target_num_loops, lambda_set_row_ids); } } return ans; } RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) { K2_CHECK_GT(src.NumAxes(), 2); K2_CHECK(axis >= 0 && axis < src.NumAxes()); // note, `axes` is of dim src.NumAxes() - 1. // Also note: axes_in[i] pertains to the relationship between // axes i and i+1 in the source. src.Populate(); const std::vector<RaggedShapeDim> &axes_in = src.Axes(); std::vector<RaggedShapeDim> axes_out(axes_in.size() - 1); int32_t axes_out_size = static_cast<int32_t>(axes_out.size()); for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i]; if (axis > 0 && axis + 1 < src.NumAxes()) { axes_out[axis - 1].row_ids = axes_in[axis - 1].row_ids[axes_in[axis].row_ids]; axes_out[axis - 1].row_splits = axes_in[axis].row_splits[axes_in[axis - 1].row_splits]; axes_out[axis - 1].cached_tot_size = axes_in[axis].cached_tot_size; } for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1]; return RaggedShape(axes_out); } // transpose axes 0 and 1. RaggedShape Transpose(RaggedShape &src) { K2_CHECK_GT(src.NumAxes(), 2); int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1); K2_CHECK_EQ(src_tot_size1 % src_dim0, 0) << "Transpose(): all dims on axis 0 must be the same."; int32_t src_dim1 = src_tot_size1 / src_dim0; RaggedShape src_no_axis0 = RemoveAxis(src, 0); K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1); ContextPtr c = src.Context(); // `renumbering` is a `new2old` map, that maps from the first index in // src_no_axis0_renumbered // to the first index into src_no_axis0. Array1<int32_t> renumbering(c, src_tot_size1); int32_t *renumbering_data = renumbering.Data(); auto lambda_set_renumbering = [=] __host__ __device__(int32_t i) { int32_t j = i % src_dim1, k = i / src_dim1, i_old = j * src_dim0 + k; renumbering_data[i] = i_old; }; Eval(c, src_tot_size1, lambda_set_renumbering); RaggedShape src_no_axis0_renumbered = Renumber(src_no_axis0, renumbering); int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1, row_ids_dim = src_tot_size1; std::vector<RaggedShapeDim> ans_axis0(1); Array1<int32_t> mem(c, row_splits_dim + row_ids_dim); int32_t *mem_data = mem.Data(); auto lambda_set_row_info = [=] __host__ __device__(int32_t i) { int32_t val; if (i >= row_splits_dim) { // row_ids int32_t elem_idx = i - row_splits_dim; val = elem_idx / src_dim0; } else { // row_splits int32_t row_idx = i; val = row_idx * src_dim0; } mem_data[i] = val; }; Eval(c, row_splits_dim + row_ids_dim, lambda_set_row_info); ans_axis0[0].row_splits = mem.Range(0, row_splits_dim); ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim); ans_axis0[0].cached_tot_size = row_ids_dim; RaggedShape temp(ans_axis0); return ComposeRaggedShapes(temp, src_no_axis0_renumbered); } RaggedShape Stack(int32_t axis, int32_t num_srcs, const RaggedShape **src) { K2_CHECK_GT(num_srcs, 0); K2_CHECK(axis >= 0 && axis <= 1); ContextPtr c = src[0]->Context(); int32_t num_axes = src[0]->NumAxes(); // Check if they have the same num-axes and compatible context for (int32_t i = 1; i < num_srcs; ++i) { K2_CHECK_EQ(num_axes, src[i]->NumAxes()); K2_CHECK(c->IsCompatible(*src[i]->Context())); } std::vector<RaggedShape> unsqueezed(num_srcs); std::vector<RaggedShape *> unsqueezed_ptrs(num_srcs); { ParallelRunner pr(c); for (int32_t i = 0; i < num_srcs; i++) { With w(pr.NewStream()); unsqueezed[i] = Unsqueeze(*src[i], 0); unsqueezed_ptrs[i] = &unsqueezed[i]; } // destructor will wait for work in those launched streams to finish. // (well it won't actually wait, but it will force the current stream to // wait.) } RaggedShape ans = Append(0, num_srcs, unsqueezed_ptrs.data()); // Transpose will check if all src->Dim0() has the same value. if (axis == 1) ans = Transpose(ans); return ans; } RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) { // row_splits= [ Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems); int32_t *row_splits_data = row_splits.Data(); Array1<int32_t> row_ids(c, num_elems, 0); return RaggedShape2(&row_splits, &row_ids, num_elems); } } // namespace k2
af45308fae08756cd366c0e9a4599c5f26b5e9db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "taso/ops.h" #include "taso/cuda_helper.h" using namespace taso; __global__ void fuse_conv_batchnorm_kernel(int c_out, int c_in_h_w, DATATYPE* dst_ptr, DATATYPE* conv_w, DATATYPE* scale, DATATYPE* var) { int volume = c_out * c_in_h_w; CUDA_KERNEL_LOOP(i, volume) { int c_out_idx = i / c_in_h_w; dst_ptr[i] = scale[c_out_idx] * conv_w[i] / sqrt(abs(var[c_out_idx]) + CUDNN_BN_MIN_EPSILON); } } void FuseConvBatchNorm::map(void) { assert(inputs[0].numDim == 4); size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize)); } void FuseConvBatchNorm::unmap(void) { checkCUDA(hipFree(outputs[0].data_ptr)); } void FuseConvBatchNorm::forward(bool block) { int c_out = outputs[0].dim[0]; int c_in_h_w = outputs[0].volume() / c_out; DATATYPE* conv_w_ptr = (DATATYPE*) inputs[0].data_ptr; DATATYPE* scale_ptr = (DATATYPE*) inputs[1].data_ptr; DATATYPE* var_ptr = (DATATYPE*) inputs[4].data_ptr; hipLaunchKernelGGL(( fuse_conv_batchnorm_kernel), dim3(GET_BLOCKS(outputs[0].volume())), dim3(CUDA_NUM_THREADS), 0, 0, c_out, c_in_h_w, (DATATYPE*)outputs[0].data_ptr, conv_w_ptr, scale_ptr, var_ptr); if (block) checkCUDA(hipDeviceSynchronize()); }
af45308fae08756cd366c0e9a4599c5f26b5e9db.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "taso/ops.h" #include "taso/cuda_helper.h" using namespace taso; __global__ void fuse_conv_batchnorm_kernel(int c_out, int c_in_h_w, DATATYPE* dst_ptr, DATATYPE* conv_w, DATATYPE* scale, DATATYPE* var) { int volume = c_out * c_in_h_w; CUDA_KERNEL_LOOP(i, volume) { int c_out_idx = i / c_in_h_w; dst_ptr[i] = scale[c_out_idx] * conv_w[i] / sqrt(abs(var[c_out_idx]) + CUDNN_BN_MIN_EPSILON); } } void FuseConvBatchNorm::map(void) { assert(inputs[0].numDim == 4); size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize)); } void FuseConvBatchNorm::unmap(void) { checkCUDA(cudaFree(outputs[0].data_ptr)); } void FuseConvBatchNorm::forward(bool block) { int c_out = outputs[0].dim[0]; int c_in_h_w = outputs[0].volume() / c_out; DATATYPE* conv_w_ptr = (DATATYPE*) inputs[0].data_ptr; DATATYPE* scale_ptr = (DATATYPE*) inputs[1].data_ptr; DATATYPE* var_ptr = (DATATYPE*) inputs[4].data_ptr; fuse_conv_batchnorm_kernel<<<GET_BLOCKS(outputs[0].volume()), CUDA_NUM_THREADS>>>( c_out, c_in_h_w, (DATATYPE*)outputs[0].data_ptr, conv_w_ptr, scale_ptr, var_ptr); if (block) checkCUDA(cudaDeviceSynchronize()); }
b33f962d320ebcd8a350d6987c96dc29bd0fb2f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "eye_impl.cuh" #include <iostream> template <typename T> __global__ void EyeKernel(const size_t size, const size_t dim, T *output_addr) { for (size_t pointIdx = blockIdx.x * blockDim.x + threadIdx.x; pointIdx < (size); pointIdx += blockDim.x * gridDim.x) { size_t batchIdx = pointIdx / (dim * dim); size_t dst_x = (pointIdx - batchIdx * dim * dim) / dim; size_t dst_y = (pointIdx - batchIdx * dim * dim) % dim; if (dst_x == dst_y) { output_addr[pointIdx] = 1; } else { output_addr[pointIdx] = 0; } } } template <typename T> void Eye(const size_t size, const size_t dim, T *output_addr, hipStream_t cuda_stream) { hipLaunchKernelGGL(( EyeKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, dim, output_addr); return; } template void Eye<float>(const size_t size, const size_t dim, float *output_addr, hipStream_t cuda_stream);
b33f962d320ebcd8a350d6987c96dc29bd0fb2f3.cu
/** * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "eye_impl.cuh" #include <iostream> template <typename T> __global__ void EyeKernel(const size_t size, const size_t dim, T *output_addr) { for (size_t pointIdx = blockIdx.x * blockDim.x + threadIdx.x; pointIdx < (size); pointIdx += blockDim.x * gridDim.x) { size_t batchIdx = pointIdx / (dim * dim); size_t dst_x = (pointIdx - batchIdx * dim * dim) / dim; size_t dst_y = (pointIdx - batchIdx * dim * dim) % dim; if (dst_x == dst_y) { output_addr[pointIdx] = 1; } else { output_addr[pointIdx] = 0; } } } template <typename T> void Eye(const size_t size, const size_t dim, T *output_addr, cudaStream_t cuda_stream) { EyeKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, dim, output_addr); return; } template void Eye<float>(const size_t size, const size_t dim, float *output_addr, cudaStream_t cuda_stream);
692c72e6bc6485a24fccc8693c3e1d8cd4b24ba6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../CUDA/CUDA_func.h" #include "GramLayer.h" using namespace std; namespace NN { namespace Layers { GramLayer::GramLayer(vector<int> dependencies, int vectors) { this->dependencies = dependencies; this->vectors = vectors; output_size = vectors * vectors; hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void GramLayer::compute() { int output_size = vectors * vectors; int block_size = (output_size + 511) / 512; hipLaunchKernelGGL(( NN::CUDA::compute_gram_layer) , dim3(block_size), dim3(512) , 0, 0, input, output, input_size, output_size, vectors, vector_size); hipDeviceSynchronize(); } void GramLayer::backpropagate() { int output_size = vectors * vectors; int block_size = (input_size + 511) / 512; float denominator = float(vectors) * float(vector_size); denominator *= denominator; denominator = 1.0f; hipLaunchKernelGGL(( NN::CUDA::backprop_gram_layer), dim3(block_size), dim3(512) , 0, 0, input, input_gradient, output_gradient, input_size, output_size, vectors, vector_size, denominator); hipDeviceSynchronize(); } int GramLayer::get_parameters_size() { return 0; } void GramLayer::update_dependencies(vector<NN::Layers::Layer*> layer_dependencies) { input = layer_dependencies[0]->get_output_iterator(); input_gradient = layer_dependencies[0]->get_output_gradient_iterator(); input_size = layer_dependencies[0]->get_output_size(); vector_size = input_size / vectors; } void GramLayer::save(NN::File& file) { int id = 4; file.save(id); save_dependencies(file); file.save(vectors); } void GramLayer::load(NN::File& file) { load_dependencies(file); file.load(vectors); output_size = vectors * vectors; hipMallocManaged(&output, output_size * sizeof(float)); hipMallocManaged(&output_gradient, output_size * sizeof(float)); hipMemset(output, 0.0f, output_size * sizeof(float)); hipMemset(output_gradient, 0.0f, output_size * sizeof(float)); } GramLayer::~GramLayer() = default; } }
692c72e6bc6485a24fccc8693c3e1d8cd4b24ba6.cu
#include "../CUDA/CUDA_func.h" #include "GramLayer.h" using namespace std; namespace NN { namespace Layers { GramLayer::GramLayer(vector<int> dependencies, int vectors) { this->dependencies = dependencies; this->vectors = vectors; output_size = vectors * vectors; cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); } void GramLayer::compute() { int output_size = vectors * vectors; int block_size = (output_size + 511) / 512; NN::CUDA::compute_gram_layer <<<block_size, 512 >>> (input, output, input_size, output_size, vectors, vector_size); cudaDeviceSynchronize(); } void GramLayer::backpropagate() { int output_size = vectors * vectors; int block_size = (input_size + 511) / 512; float denominator = float(vectors) * float(vector_size); denominator *= denominator; denominator = 1.0f; NN::CUDA::backprop_gram_layer<<<block_size, 512 >>> (input, input_gradient, output_gradient, input_size, output_size, vectors, vector_size, denominator); cudaDeviceSynchronize(); } int GramLayer::get_parameters_size() { return 0; } void GramLayer::update_dependencies(vector<NN::Layers::Layer*> layer_dependencies) { input = layer_dependencies[0]->get_output_iterator(); input_gradient = layer_dependencies[0]->get_output_gradient_iterator(); input_size = layer_dependencies[0]->get_output_size(); vector_size = input_size / vectors; } void GramLayer::save(NN::File& file) { int id = 4; file.save(id); save_dependencies(file); file.save(vectors); } void GramLayer::load(NN::File& file) { load_dependencies(file); file.load(vectors); output_size = vectors * vectors; cudaMallocManaged(&output, output_size * sizeof(float)); cudaMallocManaged(&output_gradient, output_size * sizeof(float)); cudaMemset(output, 0.0f, output_size * sizeof(float)); cudaMemset(output_gradient, 0.0f, output_size * sizeof(float)); } GramLayer::~GramLayer() = default; } }
b3b5804ba8a71137bb77f0d69f79a03b8b901f27.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** ********************** CUDA Factor Project ************************************ ********************* 15-618 Spring 2015 CMU ********************************** ******************************************************************************* * * Authors: Harshavardhan Pandit ([email protected]) * Ravi Chandra Bandlamudi ([email protected]) * * qr.cu - Performs a QR factorization on the given input matrix using a blocked * version of the Householder transformation algorithm * * Reference: ************ * Kerr, Andrew, Dan Campbell, and Mark Richards. "QR decomposition on * GPUs" Proceedings of 2nd Workshop on General Purpose Processing on Graphics * Processing Units. ACM, 2009 ******************************************************************************/ #include <stdio.h> #include <time.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include "qr.h" /****************************************************************************** * cudaFactorQR : Sets up device memory to perform QR factorization and * measures performance in terms of wall-clock time ******************************************************************************/ void cudaFactorQR(double *input) { /* Set up inputs */ double *inputDevice; cudacall(hipMalloc((void **)&inputDevice, sizeof(double) * M * N)); cudacall(hipMemcpy((void *)inputDevice, (const void *) input, (M * N) * sizeof(double), hipMemcpyHostToDevice)); clock_t tic = clock(); /* Perform QR factorization */ cudaFactorQRRoutine(inputDevice); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); clock_t toc = clock(); printf("\nCUDAFactor time: %f seconds\n\n", ((double)(toc - tic)) / CLOCKS_PER_SEC); /* Copy output to host memory */ cudacall(hipMemcpy((void *)input, (const void *)inputDevice, (M * N) * sizeof(double), hipMemcpyDeviceToHost)); } /****************************************************************************** * cudaFactorQRRoutine : Uses blocked version of the Householder * transformation algorithm to compute the QR * factorization ******************************************************************************/ void cudaFactorQRRoutine(double *input) { /* CUBLAS returns a single matrix as the QR Factorization R/input is the upper triangular matrix and U is the lower triangular matrix */ double *U; cudacall(hipMalloc((void **)&U, sizeof(double) * M * N)); cudacall(hipMemset(U, 0, sizeof(double) * M * N)); #ifdef DEBUG clock_t tic, toc; double timeHouseholder = 0.0f; double timePartialColumn = 0.0f; double timeLowerTriangular = 0.0f; double timeScalarMultipliedInput = 0.0f; double timeAllMatrixMultiplications = 0.0f; double timeUpdateInput = 0.0f; double timeConcatHouseholderVectors = 0.0f; double timeComputeYW = 0.0f; double timeCurrentWY = 0.0f; double timeExtractVnew = 0.0f; double timeComputezWY = 0.0f; double timeComputeWprime = 0.0f; double timePartialInput = 0.0f; double timeFinalInputUpdate = 0.0f; double timeMergeLowerUpper = 0.0f; double timeCudaMalloc = 0.0f; #endif /* N: Number of columns, tileSize: Number of columns in one block */ for (int tileIndex=0; tileIndex < N/tileSize; tileIndex++) { int tileStartColumn = tileIndex*tileSize; double *V, *W, *Y, *B; double *BHost; /* B and BHost store scalars */ BHost = (double *)malloc(sizeof(double) * tileSize); memset(BHost, 0, sizeof(double) * tileSize); #ifdef DEBUG tic = clock(); #endif cudacall(hipMalloc((void **)&V, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(hipMalloc((void **)&W, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(hipMalloc((void **)&Y, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(hipMalloc((void **)&B, sizeof(double) * tileSize)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif /* Loop within the columns inside a block */ for (int columnInTile=0; columnInTile<tileSize; columnInTile++) { int columnIndex = tileStartColumn + columnInTile; double *x, *v; double *vVprime, *vprime; #ifdef DEBUG tic = clock(); #endif cudacall(hipMalloc((void **)&x, sizeof(double) * (M-columnIndex))); cudacall(hipMalloc((void **)&v, sizeof(double) * (M-columnIndex))); cudacall(hipMalloc((void **)&vVprime, sizeof(double) * (M-columnIndex) * (M-columnIndex))); cudacall(hipMalloc((void **)&vprime, sizeof(double) * (M-columnIndex))); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif dim3 blockDim(BLOCK_SIZE, 1); dim3 gridDim((M - columnIndex + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* Choose a part x of the current column within the input matrix */ hipLaunchKernelGGL(( kernelPartialColumn), dim3(gridDim), dim3(blockDim), 0, 0, input, x, columnIndex); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timePartialColumn += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Perform a householder transformation on the part of the current column - this is done on the host side, can also be done on the device using parallel exclusive scan, the bottleneck however are the matrix multiplications */ /* v is a householder vector */ double *vHost = (double *)malloc(sizeof(double) * (M-columnIndex)); double *xHost = (double *)malloc(sizeof(double) * (M-columnIndex)); cudacall(hipMemcpy((void *)xHost, (const void *)x, (M-columnIndex) * sizeof(double), hipMemcpyDeviceToHost)); householder(xHost, vHost, M-columnIndex, BHost, columnInTile); cudacall(hipMemcpy((void *)v, (const void *)vHost, (M-columnIndex) * sizeof(double), hipMemcpyHostToDevice)); cudacall(hipMemcpy((void *)B, (const void *)BHost, tileSize * sizeof(double), hipMemcpyHostToDevice)); #ifdef DEBUG toc = clock(); timeHouseholder += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Use output of householder to fill in the lower triangular part of the result matrix */ hipLaunchKernelGGL(( kernelLowerTriangular), dim3(gridDim), dim3(blockDim), 0, 0, U, v, vprime, columnIndex); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeLowerTriangular += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Using output of householder to update input takes several intermediate steps */ double *scalarMultipliedInput, *productBetaVVprimeInput; cudacall(hipMalloc((void **)&scalarMultipliedInput, sizeof(double)* (M-columnIndex) * (tileStartColumn + tileSize - columnIndex))); cudacall(hipMalloc((void **)&productBetaVVprimeInput, sizeof(double) * (M-columnIndex) * (tileStartColumn+tileSize-columnIndex))); dim3 blockDim2D(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim2D((M - columnIndex + blockDim2D.x - 1) / blockDim2D.x, (tileStartColumn+tileSize-columnIndex + blockDim2D.y)/ blockDim2D.y); /* Scalar multiplied input */ hipLaunchKernelGGL(( kernelScalarMultipliedInput), dim3(gridDim2D), dim3(blockDim2D), 0, 0, scalarMultipliedInput, input, B, columnInTile, columnIndex, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeScalarMultipliedInput += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Perform vv' */ matrixMultiplyDevice(v,M-columnIndex,1, vprime, 1, M-columnIndex, vVprime); /* productBetaVVprimeInput = vv'scalarMultipliedInput */ matrixMultiplyDevice(vVprime, M-columnIndex, M-columnIndex, scalarMultipliedInput, M-columnIndex, tileStartColumn+tileSize-columnIndex, productBetaVVprimeInput); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* input = input - productBetaVVprimeInput */ /* input = input - beta*vv'*input */ hipLaunchKernelGGL(( kernelUpdateInput), dim3(gridDim2D), dim3(blockDim2D), 0, 0, input, productBetaVVprimeInput, columnIndex, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeUpdateInput += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif dim3 gridDimConcat((M - tileIndex*tileSize + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* Store v in accumulative V variable */ hipLaunchKernelGGL(( kernelConcatHouseholderVectors), dim3(gridDimConcat), dim3(blockDim), 0, 0, v, V, columnInTile, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeConcatHouseholderVectors += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif /* De-allocate memory on host */ free(vHost); free(xHost); #ifdef DEBUG tic = clock(); #endif /* De-allocate memory on device */ cudacall(hipFree(x)); cudacall(hipFree(v)); cudacall(hipFree(vprime)); cudacall(hipFree(vVprime)); cudacall(hipFree(scalarMultipliedInput)); cudacall(hipFree(productBetaVVprimeInput)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif } free(BHost); dim3 blockDim(BLOCK_SIZE, 1); dim3 gridDim((M - tileIndex*tileSize + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* Y = V(:,0) W = -B(0)*V(:,0) */ hipLaunchKernelGGL(( kernelComputeYW), dim3(gridDim), dim3(blockDim), 0, 0, Y, W, V, B, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeComputeYW += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif double *vNew, *z; #ifdef DEBUG tic = clock(); #endif cudacall(hipMalloc((void **)&vNew, sizeof(double) * (M-tileIndex*tileSize))); cudacall(hipMalloc((void **)&z, sizeof(double) * (M-tileIndex*tileSize))); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif for (int columnInTile=1; columnInTile<tileSize; columnInTile++) { #ifdef DEBUG tic = clock(); #endif double *currentW, *currentYPrime; cudacall(hipMalloc((void **)&currentW, sizeof(double) * (M-tileIndex*tileSize) * columnInTile)); cudacall(hipMalloc((void **)&currentYPrime, sizeof(double) * (M-tileIndex*tileSize) * columnInTile)); dim3 blockDim2D(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim2D(((M-tileIndex*tileSize)+ blockDim2D.x - 1)/blockDim2D.x, (columnInTile + blockDim2D.y)/ blockDim2D.y); /* Store W and Y in currentW & currentYPrime */ hipLaunchKernelGGL(( kernelCurrentWY), dim3(gridDim2D), dim3(blockDim2D), 0, 0, currentW, currentYPrime, Y, W, columnInTile, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeCurrentWY += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif dim3 blockDim(BLOCK_SIZE, 1); dim3 gridDim((M - tileIndex*tileSize + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* vNew = V(:,columnInTile) */ hipLaunchKernelGGL(( kernelExtractVnew), dim3(gridDim), dim3(blockDim), 0, 0, vNew, V, columnInTile, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeExtractVnew += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif double *productWYprime, *productWYprimeVnew; cudacall(hipMalloc((void **)&productWYprime, sizeof(double) * (M-tileIndex*tileSize) * (M-tileIndex*tileSize))); cudacall(hipMalloc((void **)&productWYprimeVnew, sizeof(double) * (M-tileIndex*tileSize) * 1 )); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* productWYprime = WY' */ matrixMultiplyDevice(currentW, M-tileIndex*tileSize, columnInTile, currentYPrime, columnInTile, M-tileIndex*tileSize, productWYprime); /* productWYprimeVnew = WY'vNew*/ matrixMultiplyDevice(productWYprime, (M-tileIndex*tileSize), (M-tileIndex*tileSize), vNew, (M-tileIndex*tileSize), 1, productWYprimeVnew); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* z = -B(columnInTile)vNew - B(columnInTile)*WY'*vNew W = [W z] Y = [Y vNew] */ hipLaunchKernelGGL(( kernelComputezWY), dim3(gridDim), dim3(blockDim), 0, 0, z, W, Y, vNew, B, productWYprimeVnew, columnInTile, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeComputezWY += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif cudacall(hipFree(currentW)); cudacall(hipFree(currentYPrime)); cudacall(hipFree(productWYprime)); cudacall(hipFree(productWYprimeVnew)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif } #ifdef DEBUG tic = clock(); #endif cudacall(hipFree(vNew)); cudacall(hipFree(z)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif double *Wprime, *productYWprime; #ifdef DEBUG tic = clock(); #endif cudacall(hipMalloc((void **)&Wprime, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(hipMalloc((void **)&productYWprime, sizeof(double) * (M-tileIndex*tileSize) * (M-tileIndex*tileSize))); dim3 blockDim2D(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim2D(( (M-tileIndex*tileSize)+ blockDim2D.x - 1) / blockDim2D.x, (tileSize + blockDim2D.y)/ blockDim2D.y); hipLaunchKernelGGL(( kernelComputeWprime), dim3(gridDim2D), dim3(blockDim2D), 0, 0, W, Wprime, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeComputeWprime += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* YW' */ matrixMultiplyDevice(Y, M-tileIndex*tileSize, tileSize, Wprime, tileSize, M-tileIndex*tileSize, productYWprime); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic))/CLOCKS_PER_SEC; tic = clock(); #endif double *partialInput, *productYWprimePartialInput; cudacall(hipMalloc((void **)&partialInput, sizeof(double) * (M-tileIndex*tileSize) * (N - tileStartColumn - tileSize) )); cudacall(hipMalloc((void **)&productYWprimePartialInput, sizeof(double) * (M-tileIndex*tileSize) * (N - tileStartColumn - tileSize) )); dim3 blockDimInput(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDimInput(( (M-tileIndex*tileSize)+ blockDimInput.x - 1) / blockDimInput.x, ((N - tileStartColumn - tileSize) + blockDimInput.y)/ blockDimInput.y); /* A part of the input be used to update input matrix */ hipLaunchKernelGGL(( kernelPartialInput), dim3(gridDimInput), dim3(blockDimInput), 0, 0, partialInput, input, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timePartialInput += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* productYWprimePartialInput = YW'partialInput */ matrixMultiplyDevice(productYWprime, (M-tileIndex*tileSize), (M-tileIndex*tileSize), partialInput, (M-tileIndex*tileSize), (N-tileStartColumn-tileSize), productYWprimePartialInput); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic))/CLOCKS_PER_SEC; #endif #ifdef DEBUG tic = clock(); #endif /* Update input matrix input = input + productYWprimePartialInput */ hipLaunchKernelGGL(( kernelFinalInputUpdate), dim3(gridDimInput), dim3(blockDimInput), 0, 0, productYWprimePartialInput, input, tileStartColumn); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeFinalInputUpdate += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif cudacall(hipFree(Wprime)); cudacall(hipFree(productYWprime)); cudacall(hipFree(V)); cudacall(hipFree(W)); cudacall(hipFree(Y)); cudacall(hipFree(B)); cudacall(hipFree(productYWprimePartialInput)); cudacall(hipFree(partialInput)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif } dim3 blockDimFinal(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDimFinal(( M+ blockDimFinal.x - 1) / blockDimFinal.x, (N + blockDimFinal.y)/ blockDimFinal.y); #ifdef DEBUG tic = clock(); #endif /* Combine upper and lower triangular matrices to get final result matrix */ hipLaunchKernelGGL(( kernelMergeLowerUpper), dim3(gridDimFinal), dim3(blockDimFinal), 0, 0, input, U); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeMergeLowerUpper += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif cudacall(hipFree(U)); /* Print timing information if DEBUG on */ #ifdef DEBUG printf("\n\n************** Timing Summary **************** \n\n"); printf("Module\t\tTime taken\n"); printf("House\t\t%lf\n", timeHouseholder); printf("compute x\t%lf\n", timePartialColumn); printf("compute U\t%lf\n", timeLowerTriangular); printf("computeRtmp\t%lf\n", timeScalarMultipliedInput); printf("modifyR\t\t%lf\n", timeUpdateInput); printf("productWYprime %lf\n", timeConcatHouseholderVectors); printf("YW\t\t%lf\n", timeComputeYW); printf("curr\t\t%lf\n", timeCurrentWY); printf("vnew\t\t%lf\n", timeExtractVnew); printf("zWY\t\t%lf\n", timeComputezWY); printf("W prime\t\t%lf\n", timeComputeWprime); printf("Rtmp1\t\t%lf\n", timePartialInput); printf("Radd\t\t%lf\n", timeFinalInputUpdate); printf("Merge RU\t%lf\n", timeMergeLowerUpper); printf("Matrix mults\t%lf\n", timeAllMatrixMultiplications); printf("Memory ops\t%lf\n", timeCudaMalloc); #endif } /****************************************************************************** * computeNorm : computes the l2 norm of a vector ******************************************************************************/ double computeNorm(double *x, int lengthX) { double norm = 0.0f; for (int i=0; i < lengthX; i++) { norm += x[i]*x[i]; } return(sqrt(norm)); } /****************************************************************************** * householder : performs the householder transformation on given input vector ******************************************************************************/ void householder(double *x, double *v, int lengthX, double *B, int columnInTile) { double beta; if (lengthX == 1) { v[0] = 1; beta = 0.0f; } else { double *e = (double *)malloc(sizeof(double) * lengthX); memset(e, 0, sizeof(double) * lengthX); e[0] = 1; double norm = computeNorm(x, lengthX); double sign = 0.0f; if (x[0] > 0) sign = 1.0f; if (x[0] < 0) sign = -1.0f; for (int i=0; i < lengthX; i++) { v[i] = x[i] + e[i] * norm * sign; } double normV = computeNorm(v, lengthX); double scalar = normV * normV; beta = (double) 2/scalar; } B[columnInTile] = beta; } /****************************************************************************** * matrixMultiplyDevice : Wrapper for the matrix multiplication kernel ******************************************************************************/ void matrixMultiplyDevice(double *a, int rowsA, int colsA, double *b, int rowsB, int colsB, double *c) { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim((colsB + blockDim.x - 1) / blockDim.x, (rowsA + blockDim.y - 1) / blockDim.y); hipLaunchKernelGGL(( kernelSharedMemMatMult), dim3(gridDim),dim3(blockDim), 0, 0, a, rowsA, colsA, b, rowsB, colsB, c); cudacall(hipDeviceSynchronize()); cudacall(hipDeviceSynchronize()); }
b3b5804ba8a71137bb77f0d69f79a03b8b901f27.cu
/****************************************************************************** ********************** CUDA Factor Project ************************************ ********************* 15-618 Spring 2015 CMU ********************************** ******************************************************************************* * * Authors: Harshavardhan Pandit ([email protected]) * Ravi Chandra Bandlamudi ([email protected]) * * qr.cu - Performs a QR factorization on the given input matrix using a blocked * version of the Householder transformation algorithm * * Reference: ************ * Kerr, Andrew, Dan Campbell, and Mark Richards. "QR decomposition on * GPUs" Proceedings of 2nd Workshop on General Purpose Processing on Graphics * Processing Units. ACM, 2009 ******************************************************************************/ #include <stdio.h> #include <time.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <cuda_runtime.h> #include "qr.h" /****************************************************************************** * cudaFactorQR : Sets up device memory to perform QR factorization and * measures performance in terms of wall-clock time ******************************************************************************/ void cudaFactorQR(double *input) { /* Set up inputs */ double *inputDevice; cudacall(cudaMalloc((void **)&inputDevice, sizeof(double) * M * N)); cudacall(cudaMemcpy((void *)inputDevice, (const void *) input, (M * N) * sizeof(double), cudaMemcpyHostToDevice)); clock_t tic = clock(); /* Perform QR factorization */ cudaFactorQRRoutine(inputDevice); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); clock_t toc = clock(); printf("\nCUDAFactor time: %f seconds\n\n", ((double)(toc - tic)) / CLOCKS_PER_SEC); /* Copy output to host memory */ cudacall(cudaMemcpy((void *)input, (const void *)inputDevice, (M * N) * sizeof(double), cudaMemcpyDeviceToHost)); } /****************************************************************************** * cudaFactorQRRoutine : Uses blocked version of the Householder * transformation algorithm to compute the QR * factorization ******************************************************************************/ void cudaFactorQRRoutine(double *input) { /* CUBLAS returns a single matrix as the QR Factorization R/input is the upper triangular matrix and U is the lower triangular matrix */ double *U; cudacall(cudaMalloc((void **)&U, sizeof(double) * M * N)); cudacall(cudaMemset(U, 0, sizeof(double) * M * N)); #ifdef DEBUG clock_t tic, toc; double timeHouseholder = 0.0f; double timePartialColumn = 0.0f; double timeLowerTriangular = 0.0f; double timeScalarMultipliedInput = 0.0f; double timeAllMatrixMultiplications = 0.0f; double timeUpdateInput = 0.0f; double timeConcatHouseholderVectors = 0.0f; double timeComputeYW = 0.0f; double timeCurrentWY = 0.0f; double timeExtractVnew = 0.0f; double timeComputezWY = 0.0f; double timeComputeWprime = 0.0f; double timePartialInput = 0.0f; double timeFinalInputUpdate = 0.0f; double timeMergeLowerUpper = 0.0f; double timeCudaMalloc = 0.0f; #endif /* N: Number of columns, tileSize: Number of columns in one block */ for (int tileIndex=0; tileIndex < N/tileSize; tileIndex++) { int tileStartColumn = tileIndex*tileSize; double *V, *W, *Y, *B; double *BHost; /* B and BHost store scalars */ BHost = (double *)malloc(sizeof(double) * tileSize); memset(BHost, 0, sizeof(double) * tileSize); #ifdef DEBUG tic = clock(); #endif cudacall(cudaMalloc((void **)&V, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(cudaMalloc((void **)&W, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(cudaMalloc((void **)&Y, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(cudaMalloc((void **)&B, sizeof(double) * tileSize)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif /* Loop within the columns inside a block */ for (int columnInTile=0; columnInTile<tileSize; columnInTile++) { int columnIndex = tileStartColumn + columnInTile; double *x, *v; double *vVprime, *vprime; #ifdef DEBUG tic = clock(); #endif cudacall(cudaMalloc((void **)&x, sizeof(double) * (M-columnIndex))); cudacall(cudaMalloc((void **)&v, sizeof(double) * (M-columnIndex))); cudacall(cudaMalloc((void **)&vVprime, sizeof(double) * (M-columnIndex) * (M-columnIndex))); cudacall(cudaMalloc((void **)&vprime, sizeof(double) * (M-columnIndex))); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif dim3 blockDim(BLOCK_SIZE, 1); dim3 gridDim((M - columnIndex + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* Choose a part x of the current column within the input matrix */ kernelPartialColumn<<<gridDim, blockDim>>>(input, x, columnIndex); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timePartialColumn += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Perform a householder transformation on the part of the current column - this is done on the host side, can also be done on the device using parallel exclusive scan, the bottleneck however are the matrix multiplications */ /* v is a householder vector */ double *vHost = (double *)malloc(sizeof(double) * (M-columnIndex)); double *xHost = (double *)malloc(sizeof(double) * (M-columnIndex)); cudacall(cudaMemcpy((void *)xHost, (const void *)x, (M-columnIndex) * sizeof(double), cudaMemcpyDeviceToHost)); householder(xHost, vHost, M-columnIndex, BHost, columnInTile); cudacall(cudaMemcpy((void *)v, (const void *)vHost, (M-columnIndex) * sizeof(double), cudaMemcpyHostToDevice)); cudacall(cudaMemcpy((void *)B, (const void *)BHost, tileSize * sizeof(double), cudaMemcpyHostToDevice)); #ifdef DEBUG toc = clock(); timeHouseholder += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Use output of householder to fill in the lower triangular part of the result matrix */ kernelLowerTriangular<<<gridDim, blockDim>>>(U, v, vprime, columnIndex); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeLowerTriangular += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Using output of householder to update input takes several intermediate steps */ double *scalarMultipliedInput, *productBetaVVprimeInput; cudacall(cudaMalloc((void **)&scalarMultipliedInput, sizeof(double)* (M-columnIndex) * (tileStartColumn + tileSize - columnIndex))); cudacall(cudaMalloc((void **)&productBetaVVprimeInput, sizeof(double) * (M-columnIndex) * (tileStartColumn+tileSize-columnIndex))); dim3 blockDim2D(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim2D((M - columnIndex + blockDim2D.x - 1) / blockDim2D.x, (tileStartColumn+tileSize-columnIndex + blockDim2D.y)/ blockDim2D.y); /* Scalar multiplied input */ kernelScalarMultipliedInput<<<gridDim2D, blockDim2D>>> (scalarMultipliedInput, input, B, columnInTile, columnIndex, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeScalarMultipliedInput += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* Perform vv' */ matrixMultiplyDevice(v,M-columnIndex,1, vprime, 1, M-columnIndex, vVprime); /* productBetaVVprimeInput = vv'scalarMultipliedInput */ matrixMultiplyDevice(vVprime, M-columnIndex, M-columnIndex, scalarMultipliedInput, M-columnIndex, tileStartColumn+tileSize-columnIndex, productBetaVVprimeInput); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* input = input - productBetaVVprimeInput */ /* input = input - beta*vv'*input */ kernelUpdateInput<<<gridDim2D, blockDim2D>>> (input, productBetaVVprimeInput, columnIndex, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeUpdateInput += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif dim3 gridDimConcat((M - tileIndex*tileSize + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* Store v in accumulative V variable */ kernelConcatHouseholderVectors<<<gridDimConcat, blockDim>>> (v, V, columnInTile, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeConcatHouseholderVectors += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif /* De-allocate memory on host */ free(vHost); free(xHost); #ifdef DEBUG tic = clock(); #endif /* De-allocate memory on device */ cudacall(cudaFree(x)); cudacall(cudaFree(v)); cudacall(cudaFree(vprime)); cudacall(cudaFree(vVprime)); cudacall(cudaFree(scalarMultipliedInput)); cudacall(cudaFree(productBetaVVprimeInput)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif } free(BHost); dim3 blockDim(BLOCK_SIZE, 1); dim3 gridDim((M - tileIndex*tileSize + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* Y = V(:,0) W = -B(0)*V(:,0) */ kernelComputeYW<<<gridDim, blockDim>>>(Y, W, V, B, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeComputeYW += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif double *vNew, *z; #ifdef DEBUG tic = clock(); #endif cudacall(cudaMalloc((void **)&vNew, sizeof(double) * (M-tileIndex*tileSize))); cudacall(cudaMalloc((void **)&z, sizeof(double) * (M-tileIndex*tileSize))); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif for (int columnInTile=1; columnInTile<tileSize; columnInTile++) { #ifdef DEBUG tic = clock(); #endif double *currentW, *currentYPrime; cudacall(cudaMalloc((void **)&currentW, sizeof(double) * (M-tileIndex*tileSize) * columnInTile)); cudacall(cudaMalloc((void **)&currentYPrime, sizeof(double) * (M-tileIndex*tileSize) * columnInTile)); dim3 blockDim2D(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim2D(((M-tileIndex*tileSize)+ blockDim2D.x - 1)/blockDim2D.x, (columnInTile + blockDim2D.y)/ blockDim2D.y); /* Store W and Y in currentW & currentYPrime */ kernelCurrentWY<<<gridDim2D, blockDim2D>>> (currentW, currentYPrime, Y, W, columnInTile, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeCurrentWY += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif dim3 blockDim(BLOCK_SIZE, 1); dim3 gridDim((M - tileIndex*tileSize + blockDim.x - 1) / blockDim.x, 1); #ifdef DEBUG tic = clock(); #endif /* vNew = V(:,columnInTile) */ kernelExtractVnew<<<gridDim, blockDim>>> (vNew, V, columnInTile, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeExtractVnew += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif double *productWYprime, *productWYprimeVnew; cudacall(cudaMalloc((void **)&productWYprime, sizeof(double) * (M-tileIndex*tileSize) * (M-tileIndex*tileSize))); cudacall(cudaMalloc((void **)&productWYprimeVnew, sizeof(double) * (M-tileIndex*tileSize) * 1 )); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* productWYprime = WY' */ matrixMultiplyDevice(currentW, M-tileIndex*tileSize, columnInTile, currentYPrime, columnInTile, M-tileIndex*tileSize, productWYprime); /* productWYprimeVnew = WY'vNew*/ matrixMultiplyDevice(productWYprime, (M-tileIndex*tileSize), (M-tileIndex*tileSize), vNew, (M-tileIndex*tileSize), 1, productWYprimeVnew); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* z = -B(columnInTile)vNew - B(columnInTile)*WY'*vNew W = [W z] Y = [Y vNew] */ kernelComputezWY<<<gridDim, blockDim>>> (z, W, Y, vNew, B, productWYprimeVnew, columnInTile, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeComputezWY += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif cudacall(cudaFree(currentW)); cudacall(cudaFree(currentYPrime)); cudacall(cudaFree(productWYprime)); cudacall(cudaFree(productWYprimeVnew)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif } #ifdef DEBUG tic = clock(); #endif cudacall(cudaFree(vNew)); cudacall(cudaFree(z)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif double *Wprime, *productYWprime; #ifdef DEBUG tic = clock(); #endif cudacall(cudaMalloc((void **)&Wprime, sizeof(double) * (M-tileIndex*tileSize) * tileSize)); cudacall(cudaMalloc((void **)&productYWprime, sizeof(double) * (M-tileIndex*tileSize) * (M-tileIndex*tileSize))); dim3 blockDim2D(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim2D(( (M-tileIndex*tileSize)+ blockDim2D.x - 1) / blockDim2D.x, (tileSize + blockDim2D.y)/ blockDim2D.y); kernelComputeWprime<<<gridDim2D, blockDim2D>>>(W, Wprime, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeComputeWprime += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* YW' */ matrixMultiplyDevice(Y, M-tileIndex*tileSize, tileSize, Wprime, tileSize, M-tileIndex*tileSize, productYWprime); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic))/CLOCKS_PER_SEC; tic = clock(); #endif double *partialInput, *productYWprimePartialInput; cudacall(cudaMalloc((void **)&partialInput, sizeof(double) * (M-tileIndex*tileSize) * (N - tileStartColumn - tileSize) )); cudacall(cudaMalloc((void **)&productYWprimePartialInput, sizeof(double) * (M-tileIndex*tileSize) * (N - tileStartColumn - tileSize) )); dim3 blockDimInput(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDimInput(( (M-tileIndex*tileSize)+ blockDimInput.x - 1) / blockDimInput.x, ((N - tileStartColumn - tileSize) + blockDimInput.y)/ blockDimInput.y); /* A part of the input be used to update input matrix */ kernelPartialInput<<<gridDimInput, blockDimInput>>> (partialInput, input, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timePartialInput += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif /* productYWprimePartialInput = YW'partialInput */ matrixMultiplyDevice(productYWprime, (M-tileIndex*tileSize), (M-tileIndex*tileSize), partialInput, (M-tileIndex*tileSize), (N-tileStartColumn-tileSize), productYWprimePartialInput); #ifdef DEBUG toc = clock(); timeAllMatrixMultiplications += ((double)(toc - tic))/CLOCKS_PER_SEC; #endif #ifdef DEBUG tic = clock(); #endif /* Update input matrix input = input + productYWprimePartialInput */ kernelFinalInputUpdate<<<gridDimInput, blockDimInput>>> (productYWprimePartialInput, input, tileStartColumn); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeFinalInputUpdate += ((double)(toc - tic)) / CLOCKS_PER_SEC; tic = clock(); #endif cudacall(cudaFree(Wprime)); cudacall(cudaFree(productYWprime)); cudacall(cudaFree(V)); cudacall(cudaFree(W)); cudacall(cudaFree(Y)); cudacall(cudaFree(B)); cudacall(cudaFree(productYWprimePartialInput)); cudacall(cudaFree(partialInput)); #ifdef DEBUG toc = clock(); timeCudaMalloc += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif } dim3 blockDimFinal(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDimFinal(( M+ blockDimFinal.x - 1) / blockDimFinal.x, (N + blockDimFinal.y)/ blockDimFinal.y); #ifdef DEBUG tic = clock(); #endif /* Combine upper and lower triangular matrices to get final result matrix */ kernelMergeLowerUpper<<<gridDimFinal, blockDimFinal>>>(input, U); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); #ifdef DEBUG toc = clock(); timeMergeLowerUpper += ((double)(toc - tic)) / CLOCKS_PER_SEC; #endif cudacall(cudaFree(U)); /* Print timing information if DEBUG on */ #ifdef DEBUG printf("\n\n************** Timing Summary **************** \n\n"); printf("Module\t\tTime taken\n"); printf("House\t\t%lf\n", timeHouseholder); printf("compute x\t%lf\n", timePartialColumn); printf("compute U\t%lf\n", timeLowerTriangular); printf("computeRtmp\t%lf\n", timeScalarMultipliedInput); printf("modifyR\t\t%lf\n", timeUpdateInput); printf("productWYprime %lf\n", timeConcatHouseholderVectors); printf("YW\t\t%lf\n", timeComputeYW); printf("curr\t\t%lf\n", timeCurrentWY); printf("vnew\t\t%lf\n", timeExtractVnew); printf("zWY\t\t%lf\n", timeComputezWY); printf("W prime\t\t%lf\n", timeComputeWprime); printf("Rtmp1\t\t%lf\n", timePartialInput); printf("Radd\t\t%lf\n", timeFinalInputUpdate); printf("Merge RU\t%lf\n", timeMergeLowerUpper); printf("Matrix mults\t%lf\n", timeAllMatrixMultiplications); printf("Memory ops\t%lf\n", timeCudaMalloc); #endif } /****************************************************************************** * computeNorm : computes the l2 norm of a vector ******************************************************************************/ double computeNorm(double *x, int lengthX) { double norm = 0.0f; for (int i=0; i < lengthX; i++) { norm += x[i]*x[i]; } return(sqrt(norm)); } /****************************************************************************** * householder : performs the householder transformation on given input vector ******************************************************************************/ void householder(double *x, double *v, int lengthX, double *B, int columnInTile) { double beta; if (lengthX == 1) { v[0] = 1; beta = 0.0f; } else { double *e = (double *)malloc(sizeof(double) * lengthX); memset(e, 0, sizeof(double) * lengthX); e[0] = 1; double norm = computeNorm(x, lengthX); double sign = 0.0f; if (x[0] > 0) sign = 1.0f; if (x[0] < 0) sign = -1.0f; for (int i=0; i < lengthX; i++) { v[i] = x[i] + e[i] * norm * sign; } double normV = computeNorm(v, lengthX); double scalar = normV * normV; beta = (double) 2/scalar; } B[columnInTile] = beta; } /****************************************************************************** * matrixMultiplyDevice : Wrapper for the matrix multiplication kernel ******************************************************************************/ void matrixMultiplyDevice(double *a, int rowsA, int colsA, double *b, int rowsB, int colsB, double *c) { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); dim3 gridDim((colsB + blockDim.x - 1) / blockDim.x, (rowsA + blockDim.y - 1) / blockDim.y); kernelSharedMemMatMult<<<gridDim,blockDim>>>(a, rowsA, colsA, b, rowsB, colsB, c); cudacall(cudaThreadSynchronize()); cudacall(cudaDeviceSynchronize()); }
c77acfa1dec9523d326861f706a57679b3e67efb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated c Wed Nov 14 22:53:50 2012 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, cuFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; cuFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; cuFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConjf(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, cuFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; cuFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; cuFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConjf(*dAT); // lower := upper dA += ldda; dAT += 1; } } } extern "C" void magmablas_csymmetrize_tiles( char uplo, magma_int_t m, cuFloatComplex *dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { /* Purpose ======= CSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix dA that is valid on input. = 'U': Upper triangular part = 'L': Lower triangular part M (input) INTEGER The number of rows of the matrix dA. M >= 0. dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by m matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). ===================================================================== */ if ( m == 0 || ntile == 0 ) return; assert( m >= 0 ); assert( ldda >= m ); assert( ldda >= (ntile - 1)*mstride + m ); assert( ntile >= 0 ); assert( mstride >= 0 ); assert( nstride >= 0 ); assert( mstride >= m || nstride >= m ); // prevent tile overlap dim3 threads( NB ); dim3 grid( ntile, (m + NB - 1)/NB ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( (uplo == 'U') || (uplo == 'u') ) { hipLaunchKernelGGL(( csymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride ); } else if ( (uplo == 'L') || (uplo == 'l') ) { hipLaunchKernelGGL(( csymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride ); } else { printf( "uplo has illegal value\n" ); exit(1); } }
c77acfa1dec9523d326861f706a57679b3e67efb.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated c Wed Nov 14 22:53:50 2012 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, cuFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; cuFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; cuFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = cuConjf(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, cuFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; cuFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; cuFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = cuConjf(*dAT); // lower := upper dA += ldda; dAT += 1; } } } extern "C" void magmablas_csymmetrize_tiles( char uplo, magma_int_t m, cuFloatComplex *dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { /* Purpose ======= CSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix dA that is valid on input. = 'U': Upper triangular part = 'L': Lower triangular part M (input) INTEGER The number of rows of the matrix dA. M >= 0. dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by m matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). ===================================================================== */ if ( m == 0 || ntile == 0 ) return; assert( m >= 0 ); assert( ldda >= m ); assert( ldda >= (ntile - 1)*mstride + m ); assert( ntile >= 0 ); assert( mstride >= 0 ); assert( nstride >= 0 ); assert( mstride >= m || nstride >= m ); // prevent tile overlap dim3 threads( NB ); dim3 grid( ntile, (m + NB - 1)/NB ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( (uplo == 'U') || (uplo == 'u') ) { csymmetrize_tiles_upper<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride ); } else if ( (uplo == 'L') || (uplo == 'l') ) { csymmetrize_tiles_lower<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride ); } else { printf( "uplo has illegal value\n" ); exit(1); } }
f90cf42c52937462fa0e77197286055c08d7c185.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHTensorRandom.h" #include "THHGeneral.h" #include <thrust/functional.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_mtgp32_host.h> #include <rocrand/rocrand_mtgp32_11213.h> #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif /* Sets up generator. Allocates but does not create the generator states. */ __host__ void initializeGenerator(Generator* gen) { THCudaCheck(hipMalloc((void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t))); THCudaCheck(hipMalloc((void**)&gen->kernel_params, sizeof(mtgp32_kernel_params_t))); if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } } /* Frees memory allocated during setup. */ __host__ void destroyGenerator(Generator* gen) { if (gen->gen_states) { THCudaCheck(hipFree(gen->gen_states)); gen->gen_states = NULL; } if (gen->kernel_params) { THCudaCheck(hipFree(gen->kernel_params)); gen->kernel_params = NULL; } } /* Creates a new generator state given the seed. */ __host__ void createGeneratorState(Generator* gen, unsigned long seed) { if (hiprandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213, gen->kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } /* Initialize generator array (must be called before any other function) */ __host__ void THCRandom_init(THCudaRNGState* state, int devices, int current_device) { state->num_devices = devices; state->gen = (Generator*)malloc(state->num_devices * sizeof(Generator)); for (int i = 0; i < state->num_devices; ++i) { state->gen[i].initf = 0; state->gen[i].initial_seed = 0; state->gen[i].gen_states = NULL; state->gen[i].kernel_params = NULL; } state->current_gen = &state->gen[current_device]; // Initialize the generator for the current device. Other generators will be // initialized on-demand in THCRandom_setGenerator. initializeGenerator(state->current_gen); THCRandom_seed(state); } /* Destroy generators and free memory */ __host__ void THCRandom_shutdown(THCudaRNGState* state) { if (state->gen == NULL) return; for (int i = 0; i < state->num_devices; ++i) { destroyGenerator(&state->gen[i]); } free(state->gen); state->gen = NULL; state->current_gen = NULL; } /* Set the generator for the current device */ __host__ void THCRandom_setGenerator(THCudaRNGState* state, int device) { if (device >= state->num_devices) THError("Invalid device index."); state->current_gen = &state->gen[device]; if (state->current_gen->initf == 0) { initializeGenerator(state->current_gen); THCRandom_seed(state); } } /* Reset the generator for the current device after a device reset */ __host__ void THCRandom_resetGenerator(THCudaRNGState* state) { initializeGenerator(state->current_gen); THCRandom_manualSeed(state, state->current_gen->initial_seed); } /* Random seed */ __host__ unsigned long THCRandom_seed(THCudaRNGState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeed(state, s); return s; } __host__ unsigned long THCRandom_seedAll(THCudaRNGState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeedAll(state, s); return s; } /* Manually set the seed */ __host__ void THCRandom_manualSeed(THCudaRNGState* state, unsigned long seed) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } state->current_gen->initial_seed = seed; createGeneratorState(state->current_gen, seed); state->current_gen->initf = 1; } __host__ void THCRandom_manualSeedAll(THCudaRNGState* state, unsigned long seed) { int currentDevice; THCudaCheck(hipGetDevice(&currentDevice)); for (int i = 0; i < state->num_devices; ++i) { THCudaCheck(hipSetDevice(i)); THCRandom_setGenerator(state, i); THCRandom_manualSeed(state, seed); } THCudaCheck(hipSetDevice(currentDevice)); THCRandom_setGenerator(state, currentDevice); } /* Get the initial seed */ __host__ unsigned long THCRandom_initialSeed(THCudaRNGState* state) { return state->current_gen->initial_seed; } __host__ void THCRandom_getRNGState(THCudaRNGState* state, THByteTensor *rng_state) { // The RNG state comprises the MTPG32 states and the seed. static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), state->current_gen->gen_states, states_size, hipMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &state->current_gen->initial_seed, seed_size); } __host__ void THCRandom_setRNGState(THCudaRNGState* state, THByteTensor *rng_state) { static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(hipMemcpy(state->current_gen->gen_states, THByteTensor_data(rng_state), states_size, hipMemcpyHostToDevice)); memcpy(&state->current_gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); } #define GENERATE_KERNEL1(NAME, ARG1, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, float *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ for (int i = idx; i < size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ x = TRANSFORM; \ result[i] = x; \ } \ } #define GENERATE_KERNEL2(NAME, ARG1, ARG2, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(hiprandStateMtgp32_t *state, int size, float *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ for (int i = idx; i < size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ x = TRANSFORM; \ result[i] = x; \ } \ } GENERATE_KERNEL2(generate_uniform, double a, double b, hiprand_uniform, x * (b-a) + a) GENERATE_KERNEL1(generate_bernoulli, double p, hiprand_uniform, (float)x <= p) GENERATE_KERNEL2(generate_normal, double mean, double stdv, hiprand_normal, (x * stdv) + mean) GENERATE_KERNEL1(generate_geometric, double p, hiprand_uniform, (log(1-x) / log(p)) + 1) GENERATE_KERNEL1(generate_exponential, double lambda, hiprand_uniform, (float)(-1. / lambda * log(1-x))) GENERATE_KERNEL2(generate_cauchy, double median, double sigma, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2 /* Separate kernel because hiprand_log_normal gets extra parameters. */ __global__ void generate_log_normal(hiprandStateMtgp32_t *state, int size, float *result, float mean, float stddev) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; for (int i = idx; i < size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { result[i] = hiprand_log_normal(&state[blockIdx.x], mean, stddev); } } #define NUM_BLOCKS min((int)DIVUP(size, BLOCK_SIZE), MAX_NUM_BLOCKS) THC_API void THCudaTensor_uniform(THCudaRNGState* state, THCudaTensor *self_, double a, double b) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); hipLaunchKernelGGL(( generate_uniform), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->current_gen->gen_states, size, data, a, b); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_bernoulli(THCudaRNGState* state, THCudaTensor *self_, double p) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); hipLaunchKernelGGL(( generate_bernoulli), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_normal(THCudaRNGState* state, THCudaTensor *self_, double mean, double stdv) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_logNormal(THCudaRNGState* state, THCudaTensor *self_, double mean, double stdv) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); hipLaunchKernelGGL(( generate_log_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_geometric(THCudaRNGState* state, THCudaTensor *self_, double p) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_exponential(THCudaRNGState* state, THCudaTensor *self_, double lambda) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->current_gen->gen_states, size, data, lambda); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_cauchy(THCudaRNGState* state, THCudaTensor *self_, double median, double sigma) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, state->current_gen->gen_states, size, data, median, sigma); THCudaTensor_freeCopyTo(self, self_); }; #undef NUM_BLOCKS
f90cf42c52937462fa0e77197286055c08d7c185.cu
#include "THCTensorRandom.h" #include "THCGeneral.h" #include <thrust/functional.h> #include <curand.h> #include <curand_kernel.h> #include <curand_mtgp32_host.h> #include <curand_mtgp32dc_p_11213.h> #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #ifndef DIVUP #define DIVUP(x, y) (((x) + (y) - 1) / (y)) #endif /* Sets up generator. Allocates but does not create the generator states. */ __host__ void initializeGenerator(Generator* gen) { THCudaCheck(cudaMalloc((void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32))); THCudaCheck(cudaMalloc((void**)&gen->kernel_params, sizeof(mtgp32_kernel_params))); if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP constants failed."); } } /* Frees memory allocated during setup. */ __host__ void destroyGenerator(Generator* gen) { if (gen->gen_states) { THCudaCheck(cudaFree(gen->gen_states)); gen->gen_states = NULL; } if (gen->kernel_params) { THCudaCheck(cudaFree(gen->kernel_params)); gen->kernel_params = NULL; } } /* Creates a new generator state given the seed. */ __host__ void createGeneratorState(Generator* gen, unsigned long seed) { if (curandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213, gen->kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS) { THError("Creating MTGP kernel state failed."); } } /* Initialize generator array (must be called before any other function) */ __host__ void THCRandom_init(THCudaRNGState* state, int devices, int current_device) { state->num_devices = devices; state->gen = (Generator*)malloc(state->num_devices * sizeof(Generator)); for (int i = 0; i < state->num_devices; ++i) { state->gen[i].initf = 0; state->gen[i].initial_seed = 0; state->gen[i].gen_states = NULL; state->gen[i].kernel_params = NULL; } state->current_gen = &state->gen[current_device]; // Initialize the generator for the current device. Other generators will be // initialized on-demand in THCRandom_setGenerator. initializeGenerator(state->current_gen); THCRandom_seed(state); } /* Destroy generators and free memory */ __host__ void THCRandom_shutdown(THCudaRNGState* state) { if (state->gen == NULL) return; for (int i = 0; i < state->num_devices; ++i) { destroyGenerator(&state->gen[i]); } free(state->gen); state->gen = NULL; state->current_gen = NULL; } /* Set the generator for the current device */ __host__ void THCRandom_setGenerator(THCudaRNGState* state, int device) { if (device >= state->num_devices) THError("Invalid device index."); state->current_gen = &state->gen[device]; if (state->current_gen->initf == 0) { initializeGenerator(state->current_gen); THCRandom_seed(state); } } /* Reset the generator for the current device after a device reset */ __host__ void THCRandom_resetGenerator(THCudaRNGState* state) { initializeGenerator(state->current_gen); THCRandom_manualSeed(state, state->current_gen->initial_seed); } /* Random seed */ __host__ unsigned long THCRandom_seed(THCudaRNGState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeed(state, s); return s; } __host__ unsigned long THCRandom_seedAll(THCudaRNGState* state) { unsigned long s = (unsigned long)time(0); THCRandom_manualSeedAll(state, s); return s; } /* Manually set the seed */ __host__ void THCRandom_manualSeed(THCudaRNGState* state, unsigned long seed) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } state->current_gen->initial_seed = seed; createGeneratorState(state->current_gen, seed); state->current_gen->initf = 1; } __host__ void THCRandom_manualSeedAll(THCudaRNGState* state, unsigned long seed) { int currentDevice; THCudaCheck(cudaGetDevice(&currentDevice)); for (int i = 0; i < state->num_devices; ++i) { THCudaCheck(cudaSetDevice(i)); THCRandom_setGenerator(state, i); THCRandom_manualSeed(state, seed); } THCudaCheck(cudaSetDevice(currentDevice)); THCRandom_setGenerator(state, currentDevice); } /* Get the initial seed */ __host__ unsigned long THCRandom_initialSeed(THCudaRNGState* state) { return state->current_gen->initial_seed; } __host__ void THCRandom_getRNGState(THCudaRNGState* state, THByteTensor *rng_state) { // The RNG state comprises the MTPG32 states and the seed. static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), state->current_gen->gen_states, states_size, cudaMemcpyDeviceToHost)); memcpy(THByteTensor_data(rng_state) + states_size, &state->current_gen->initial_seed, seed_size); } __host__ void THCRandom_setRNGState(THCudaRNGState* state, THByteTensor *rng_state) { static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); static const size_t seed_size = sizeof(unsigned long); static const size_t total_size = states_size + seed_size; THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); THCudaCheck(cudaMemcpy(state->current_gen->gen_states, THByteTensor_data(rng_state), states_size, cudaMemcpyHostToDevice)); memcpy(&state->current_gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size); } #define GENERATE_KERNEL1(NAME, ARG1, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, float *result, ARG1) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ for (int i = idx; i < size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ x = TRANSFORM; \ result[i] = x; \ } \ } #define GENERATE_KERNEL2(NAME, ARG1, ARG2, CURAND_FUNC, TRANSFORM) \ __global__ void NAME(curandStateMtgp32 *state, int size, float *result, ARG1, ARG2) \ { \ int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \ for (int i = idx; i < size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \ float x = CURAND_FUNC(&state[blockIdx.x]); \ x = TRANSFORM; \ result[i] = x; \ } \ } GENERATE_KERNEL2(generate_uniform, double a, double b, curand_uniform, x * (b-a) + a) GENERATE_KERNEL1(generate_bernoulli, double p, curand_uniform, (float)x <= p) GENERATE_KERNEL2(generate_normal, double mean, double stdv, curand_normal, (x * stdv) + mean) GENERATE_KERNEL1(generate_geometric, double p, curand_uniform, (log(1-x) / log(p)) + 1) GENERATE_KERNEL1(generate_exponential, double lambda, curand_uniform, (float)(-1. / lambda * log(1-x))) GENERATE_KERNEL2(generate_cauchy, double median, double sigma, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5)))) #undef GENERATE_KERNEL1 #undef GENERATE_KERNEL2 /* Separate kernel because curand_log_normal gets extra parameters. */ __global__ void generate_log_normal(curandStateMtgp32 *state, int size, float *result, float mean, float stddev) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; for (int i = idx; i < size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { result[i] = curand_log_normal(&state[blockIdx.x], mean, stddev); } } #define NUM_BLOCKS min((int)DIVUP(size, BLOCK_SIZE), MAX_NUM_BLOCKS) THC_API void THCudaTensor_uniform(THCudaRNGState* state, THCudaTensor *self_, double a, double b) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); generate_uniform<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->current_gen->gen_states, size, data, a, b); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_bernoulli(THCudaRNGState* state, THCudaTensor *self_, double p) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); generate_bernoulli<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_normal(THCudaRNGState* state, THCudaTensor *self_, double mean, double stdv) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); generate_normal<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_logNormal(THCudaRNGState* state, THCudaTensor *self_, double mean, double stdv) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); generate_log_normal<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->current_gen->gen_states, size, data, mean, stdv); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_geometric(THCudaRNGState* state, THCudaTensor *self_, double p) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->current_gen->gen_states, size, data, p); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_exponential(THCudaRNGState* state, THCudaTensor *self_, double lambda) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->current_gen->gen_states, size, data, lambda); THCudaTensor_freeCopyTo(self, self_); }; THC_API void THCudaTensor_cauchy(THCudaRNGState* state, THCudaTensor *self_, double median, double sigma) { if (state->current_gen == NULL) { THError("Random number generators have not been initialized."); } THCudaTensor *self = THCudaTensor_newContiguous(self_); long size = THCudaTensor_nElement(self); float *data = THCudaTensor_data(self); generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE>>>( state->current_gen->gen_states, size, data, median, sigma); THCudaTensor_freeCopyTo(self, self_); }; #undef NUM_BLOCKS
867a35e7ad4760998ad3e499da6a0ca0755eed06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void oneMinusTanh(float* out, float* in, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) out[id] = 1 - in[id]; }
867a35e7ad4760998ad3e499da6a0ca0755eed06.cu
#include "includes.h" __global__ void oneMinusTanh(float* out, float* in, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) out[id] = 1 - in[id]; }
c55ad7bcfea9f6f22111eb7ee4ff3b0d9d4ecbcd.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // SAUF in GPU #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { __device__ unsigned Find(const int *s_buf, unsigned n) { unsigned label = s_buf[n]; assert(label > 0); while (label - 1 != n) { n = label - 1; label = s_buf[n]; assert(label > 0); } return n; } __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a + 1); done = (old == b + 1); b = old - 1; } else if (b < a) { int old = atomicMin(s_buf + a, b + 1); done = (old == a + 1); a = old - 1; } else { done = true; } } while (!done); } __global__ void Initialization(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = row * (img.step / img.elem_size) + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { if (img[img_index] > 0) { labels[labels_index] = labels_index + 1; } else { labels[labels_index] = 0; } } } __global__ void Merge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { #define CONDITION_P col > 0 && row > 0 && img[img_index - img.step - 1] > 0 #define CONDITION_Q row > 0 && img[img_index - img.step] > 0 #define CONDITION_R col < img.cols - 1 && row > 0 && img[img_index - img.step + 1] > 0 #define CONDITION_S col > 0 && img[img_index - 1] > 0 #define CONDITION_X img[img_index] > 0 #define ACTION_1 // nothing to do #define ACTION_2 // LabelsSolver::NewLabel(); // new label #define ACTION_3 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); //img_labels_row_prev[c - 1]; // x <- p #define ACTION_4 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size); //img_labels_row_prev[c]; // x <- q #define ACTION_5 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //img_labels_row_prev[c + 1]; // x <- r #define ACTION_6 Union(labels.data, labels_index, labels_index - 1); //img_labels_row[c - 1]; // x <- s #define ACTION_7 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); \ Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row_prev[c - 1], img_labels_row_prev[c + 1]); // x <- p + r #define ACTION_8 Union(labels.data, labels_index, labels_index - 1); \ Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row[c - 1], img_labels_row_prev[c + 1]); // x <- s + r #include "labeling_wu_2009_tree.inc" #undef ACTION_1 #undef ACTION_2 #undef ACTION_3 #undef ACTION_4 #undef ACTION_5 #undef ACTION_6 #undef ACTION_7 #undef ACTION_8 #undef CONDITION_P #undef CONDITION_Q #undef CONDITION_R #undef CONDITION_S #undef CONDITION_X } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned int val = labels[labels_index]; if (val > 0) { labels[labels_index] = Find(labels.data, labels_index) + 1; } } } } class C_SAUF : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; public: C_SAUF() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); Merge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); //Mat1i local_labels(img_.size()); //d_img_labels_.download(local_labels); //cuda::GpuMat d_global_labels; //d_img_labels_.copyTo(d_global_labels); //PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels); //// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels); //Mat1i global_labels(img_.size()); //d_global_labels.download(global_labels); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //d_img_labels_.download(img_labels_); hipDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); Merge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); Compression << <grid_size_, block_size_ >> > (d_img_labels_); hipDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(C_SAUF);
c55ad7bcfea9f6f22111eb7ee4ff3b0d9d4ecbcd.cu
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // SAUF in GPU #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; namespace { __device__ unsigned Find(const int *s_buf, unsigned n) { unsigned label = s_buf[n]; assert(label > 0); while (label - 1 != n) { n = label - 1; label = s_buf[n]; assert(label > 0); } return n; } __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a + 1); done = (old == b + 1); b = old - 1; } else if (b < a) { int old = atomicMin(s_buf + a, b + 1); done = (old == a + 1); a = old - 1; } else { done = true; } } while (!done); } __global__ void Initialization(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = row * (img.step / img.elem_size) + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { if (img[img_index] > 0) { labels[labels_index] = labels_index + 1; } else { labels[labels_index] = 0; } } } __global__ void Merge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = row * img.step + col; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { #define CONDITION_P col > 0 && row > 0 && img[img_index - img.step - 1] > 0 #define CONDITION_Q row > 0 && img[img_index - img.step] > 0 #define CONDITION_R col < img.cols - 1 && row > 0 && img[img_index - img.step + 1] > 0 #define CONDITION_S col > 0 && img[img_index - 1] > 0 #define CONDITION_X img[img_index] > 0 #define ACTION_1 // nothing to do #define ACTION_2 // LabelsSolver::NewLabel(); // new label #define ACTION_3 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); //img_labels_row_prev[c - 1]; // x <- p #define ACTION_4 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size); //img_labels_row_prev[c]; // x <- q #define ACTION_5 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //img_labels_row_prev[c + 1]; // x <- r #define ACTION_6 Union(labels.data, labels_index, labels_index - 1); //img_labels_row[c - 1]; // x <- s #define ACTION_7 Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size - 1); \ Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row_prev[c - 1], img_labels_row_prev[c + 1]); // x <- p + r #define ACTION_8 Union(labels.data, labels_index, labels_index - 1); \ Union(labels.data, labels_index, labels_index - labels.step / labels.elem_size + 1); //LabelsSolver::Merge(img_labels_row[c - 1], img_labels_row_prev[c + 1]); // x <- s + r #include "labeling_wu_2009_tree.inc" #undef ACTION_1 #undef ACTION_2 #undef ACTION_3 #undef ACTION_4 #undef ACTION_5 #undef ACTION_6 #undef ACTION_7 #undef ACTION_8 #undef CONDITION_P #undef CONDITION_Q #undef CONDITION_R #undef CONDITION_S #undef CONDITION_X } } __global__ void Compression(cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned int val = labels[labels_index]; if (val > 0) { labels[labels_index] = Find(labels.data, labels_index) + 1; } } } } class C_SAUF : public GpuLabeling2D<CONN_8> { private: dim3 grid_size_; dim3 block_size_; public: C_SAUF() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); Merge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); //Mat1i local_labels(img_.size()); //d_img_labels_.download(local_labels); //cuda::GpuMat d_global_labels; //d_img_labels_.copyTo(d_global_labels); //PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels); //// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels); //Mat1i global_labels(img_.size()); //d_global_labels.download(global_labels); Compression << <grid_size_, block_size_ >> > (d_img_labels_); //d_img_labels_.download(img_labels_); cudaDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); Merge << <grid_size_, block_size_ >> >(d_img_, d_img_labels_); Compression << <grid_size_, block_size_ >> > (d_img_labels_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(C_SAUF);
486d3f0bae50c9f472a34a837449aa717245a00e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/fastertransformer/kernels/calibrate_quantize_weight_kernels.h" #include "src/fastertransformer/kernels/reduce_kernel_utils.cuh" namespace fastertransformer { // src is [k, n] row-major // scale is [n] // grid(n) // block(k) // TODO : Improve for memory coalesing template<typename T> __global__ void ldn_calibrate_weight_per_channel(float* scale, const T* src, const int k, const int n) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; scale += bidx; float amax_val = 0.0f; for (int k_i = tidx; k_i < k; k_i += blockDim.x) { float val = fabs(static_cast<float>(src[k_i * n + bidx])); if (amax_val < val) { amax_val = val; } } const float block_amax_val = blockReduceMax(amax_val); if (tidx == 0) { scale[0] = block_amax_val / 127.0f; } } template<typename T> void invokeLdnCalibrateWeightPerChannel(float* scale, const T* src, const int k, const int n, hipStream_t stream) { dim3 grid(n); dim3 block((k + 31) / 32 * 32); if (block.x > 1024) { block.x = 1024; } hipLaunchKernelGGL(( ldn_calibrate_weight_per_channel), dim3(grid), dim3(block), 0, stream, scale, src, k, n); } template void invokeLdnCalibrateWeightPerChannel(float* scale, const float* src, const int k, const int n, hipStream_t stream); template void invokeLdnCalibrateWeightPerChannel(float* scale, const half* src, const int k, const int n, hipStream_t stream); #ifdef ENABLE_BF16 template void invokeLdnCalibrateWeightPerChannel( float* scale, const __nv_bfloat16* src, const int k, const int n, hipStream_t stream); #endif //--------------------------------------------------------------------------------- // src is [n, k] row-major // dst is [n, k] row-major // scale is [n] // grid(n) // block(k) template<typename T> __global__ void ldk_calibrate_quantize_weight_per_channel(int8_t* dst, float* scale, const T* src, const int k) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; scale += bidx; src += bidx * k; dst += bidx * k; T amax_val = 0.0f; const T zero = static_cast<T>(0.0f); for (int k_i = tidx; k_i < k; k_i += blockDim.x) { T val = src[k_i]; val = val > zero ? val : -val; if (amax_val > val) { amax_val = val; } } __shared__ float s_amax; const float block_amax_val = blockReduceMax(static_cast<float>(amax_val)); if (tidx == 0) { s_amax = block_amax_val; scale[0] = block_amax_val / 127.0f; } __syncthreads(); for (int k_i = tidx; k_i < k; k_i += blockDim.x) { T val = src[k_i]; dst[k_i] = float_to_int8_rn(127.0f * static_cast<float>(val) / s_amax); } } template<typename T> void invokeLdkCalibrateQuantizeWeightPerChannel( int8_t* dst, float* scale, const T* src, const int n, const int k, hipStream_t stream) { dim3 grid(n); dim3 block((k + 31) / 32 * 32); if (block.x > 1024) { block.x = 1024; } hipLaunchKernelGGL(( ldk_calibrate_quantize_weight_per_channel), dim3(grid), dim3(block), 0, stream, dst, scale, src, k); } template void invokeLdkCalibrateQuantizeWeightPerChannel( int8_t* dst, float* scale, const float* src, const int n, const int k, hipStream_t stream); template void invokeLdkCalibrateQuantizeWeightPerChannel( int8_t* dst, float* scale, const half* src, const int n, const int k, hipStream_t stream); //--------------------------------------------------------------- // src is [k, n] row-major // dst is [n, k] row-major template<typename T> __global__ void ldn_transpose_quantize_weight_per_channel(int8_t* dst, const float* scale, const T* src, const int k, const int n) { __shared__ T shm[32][33]; const int tidx = threadIdx.x; const int tidy = threadIdx.y; int n_idx = blockIdx.x * 32 + tidx; int k_idx = blockIdx.y * 32 + tidy; if (n_idx < n && k_idx < k) { shm[tidx][tidy] = src[k_idx * n + n_idx]; } __syncthreads(); n_idx = blockIdx.x * 32 + tidy; k_idx = blockIdx.y * 32 + tidx; if (n_idx < n && k_idx < k) { dst[n_idx * k + k_idx] = float_to_int8_rn(static_cast<float>(shm[tidy][tidx]) / scale[n_idx]); } } // src is [k, n] row-major // dst is [n, k] row-major template<typename T> void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const T* src, const int k, const int n, hipStream_t stream) { dim3 grid(n / 32, k / 32); dim3 block(32, 32); hipLaunchKernelGGL(( ldn_transpose_quantize_weight_per_channel), dim3(grid), dim3(block), 0, stream, dst, scale, src, k, n); } template void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const float* src, const int k, const int n, hipStream_t stream); template void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const half* src, const int k, const int n, hipStream_t stream); #ifdef ENABLE_BF16 template void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const __nv_bfloat16* src, const int k, const int n, hipStream_t stream); #endif } // namespace fastertransformer
486d3f0bae50c9f472a34a837449aa717245a00e.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "src/fastertransformer/kernels/calibrate_quantize_weight_kernels.h" #include "src/fastertransformer/kernels/reduce_kernel_utils.cuh" namespace fastertransformer { // src is [k, n] row-major // scale is [n] // grid(n) // block(k) // TODO : Improve for memory coalesing template<typename T> __global__ void ldn_calibrate_weight_per_channel(float* scale, const T* src, const int k, const int n) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; scale += bidx; float amax_val = 0.0f; for (int k_i = tidx; k_i < k; k_i += blockDim.x) { float val = fabs(static_cast<float>(src[k_i * n + bidx])); if (amax_val < val) { amax_val = val; } } const float block_amax_val = blockReduceMax(amax_val); if (tidx == 0) { scale[0] = block_amax_val / 127.0f; } } template<typename T> void invokeLdnCalibrateWeightPerChannel(float* scale, const T* src, const int k, const int n, cudaStream_t stream) { dim3 grid(n); dim3 block((k + 31) / 32 * 32); if (block.x > 1024) { block.x = 1024; } ldn_calibrate_weight_per_channel<<<grid, block, 0, stream>>>(scale, src, k, n); } template void invokeLdnCalibrateWeightPerChannel(float* scale, const float* src, const int k, const int n, cudaStream_t stream); template void invokeLdnCalibrateWeightPerChannel(float* scale, const half* src, const int k, const int n, cudaStream_t stream); #ifdef ENABLE_BF16 template void invokeLdnCalibrateWeightPerChannel( float* scale, const __nv_bfloat16* src, const int k, const int n, cudaStream_t stream); #endif //--------------------------------------------------------------------------------- // src is [n, k] row-major // dst is [n, k] row-major // scale is [n] // grid(n) // block(k) template<typename T> __global__ void ldk_calibrate_quantize_weight_per_channel(int8_t* dst, float* scale, const T* src, const int k) { const int tidx = threadIdx.x; const int bidx = blockIdx.x; scale += bidx; src += bidx * k; dst += bidx * k; T amax_val = 0.0f; const T zero = static_cast<T>(0.0f); for (int k_i = tidx; k_i < k; k_i += blockDim.x) { T val = src[k_i]; val = val > zero ? val : -val; if (amax_val > val) { amax_val = val; } } __shared__ float s_amax; const float block_amax_val = blockReduceMax(static_cast<float>(amax_val)); if (tidx == 0) { s_amax = block_amax_val; scale[0] = block_amax_val / 127.0f; } __syncthreads(); for (int k_i = tidx; k_i < k; k_i += blockDim.x) { T val = src[k_i]; dst[k_i] = float_to_int8_rn(127.0f * static_cast<float>(val) / s_amax); } } template<typename T> void invokeLdkCalibrateQuantizeWeightPerChannel( int8_t* dst, float* scale, const T* src, const int n, const int k, cudaStream_t stream) { dim3 grid(n); dim3 block((k + 31) / 32 * 32); if (block.x > 1024) { block.x = 1024; } ldk_calibrate_quantize_weight_per_channel<<<grid, block, 0, stream>>>(dst, scale, src, k); } template void invokeLdkCalibrateQuantizeWeightPerChannel( int8_t* dst, float* scale, const float* src, const int n, const int k, cudaStream_t stream); template void invokeLdkCalibrateQuantizeWeightPerChannel( int8_t* dst, float* scale, const half* src, const int n, const int k, cudaStream_t stream); //--------------------------------------------------------------- // src is [k, n] row-major // dst is [n, k] row-major template<typename T> __global__ void ldn_transpose_quantize_weight_per_channel(int8_t* dst, const float* scale, const T* src, const int k, const int n) { __shared__ T shm[32][33]; const int tidx = threadIdx.x; const int tidy = threadIdx.y; int n_idx = blockIdx.x * 32 + tidx; int k_idx = blockIdx.y * 32 + tidy; if (n_idx < n && k_idx < k) { shm[tidx][tidy] = src[k_idx * n + n_idx]; } __syncthreads(); n_idx = blockIdx.x * 32 + tidy; k_idx = blockIdx.y * 32 + tidx; if (n_idx < n && k_idx < k) { dst[n_idx * k + k_idx] = float_to_int8_rn(static_cast<float>(shm[tidy][tidx]) / scale[n_idx]); } } // src is [k, n] row-major // dst is [n, k] row-major template<typename T> void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const T* src, const int k, const int n, cudaStream_t stream) { dim3 grid(n / 32, k / 32); dim3 block(32, 32); ldn_transpose_quantize_weight_per_channel<<<grid, block, 0, stream>>>(dst, scale, src, k, n); } template void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const float* src, const int k, const int n, cudaStream_t stream); template void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const half* src, const int k, const int n, cudaStream_t stream); #ifdef ENABLE_BF16 template void invokeLdnTransposeQuantizeWeightPerChannel( int8_t* dst, const float* scale, const __nv_bfloat16* src, const int k, const int n, cudaStream_t stream); #endif } // namespace fastertransformer
5b5f58f6fdbb2f9243caf781032b046772ab53e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_poisson_kernel_update; int xdim0_poisson_kernel_update_h = -1; int ydim0_poisson_kernel_update_h = -1; __constant__ int xdim1_poisson_kernel_update; int xdim1_poisson_kernel_update_h = -1; int ydim1_poisson_kernel_update_h = -1; #define OPS_ACC0(x,y) (x+xdim0_poisson_kernel_update*(y)) #define OPS_ACC1(x,y) (x+xdim1_poisson_kernel_update*(y)) //user function __device__ void poisson_kernel_update(const double *u2, double *u) { u[OPS_ACC1(0,0)] = u2[OPS_ACC0(0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_poisson_kernel_update( const double* __restrict arg0, double* __restrict arg1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_poisson_kernel_update; arg1 += idx_x * 1 + idx_y * 1 * xdim1_poisson_kernel_update; if (idx_x < size0 && idx_y < size1) { poisson_kernel_update(arg0, arg1); } } // host stub function void ops_par_loop_poisson_kernel_update(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_arg args[2] = { arg0, arg1}; ops_timing_realloc(3,"poisson_kernel_update"); OPS_kernels[3].count++; //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_poisson_kernel_update_h || xdim1 != xdim1_poisson_kernel_update_h) { hipMemcpyToSymbol( xdim0_poisson_kernel_update, &xdim0, sizeof(int) ); xdim0_poisson_kernel_update_h = xdim0; hipMemcpyToSymbol( xdim1_poisson_kernel_update, &xdim1, sizeof(int) ); xdim1_poisson_kernel_update_h = xdim1; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); ops_timers_core(&c1,&t1); OPS_kernels[3].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_poisson_kernel_update), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[3].time += t2-t1; ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[3].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[3].transfer += ops_compute_transfer(dim, range, &arg1); }
5b5f58f6fdbb2f9243caf781032b046772ab53e9.cu
// // auto-generated by ops.py // __constant__ int xdim0_poisson_kernel_update; int xdim0_poisson_kernel_update_h = -1; int ydim0_poisson_kernel_update_h = -1; __constant__ int xdim1_poisson_kernel_update; int xdim1_poisson_kernel_update_h = -1; int ydim1_poisson_kernel_update_h = -1; #define OPS_ACC0(x,y) (x+xdim0_poisson_kernel_update*(y)) #define OPS_ACC1(x,y) (x+xdim1_poisson_kernel_update*(y)) //user function __device__ void poisson_kernel_update(const double *u2, double *u) { u[OPS_ACC1(0,0)] = u2[OPS_ACC0(0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_poisson_kernel_update( const double* __restrict arg0, double* __restrict arg1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_poisson_kernel_update; arg1 += idx_x * 1 + idx_y * 1 * xdim1_poisson_kernel_update; if (idx_x < size0 && idx_y < size1) { poisson_kernel_update(arg0, arg1); } } // host stub function void ops_par_loop_poisson_kernel_update(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_arg args[2] = { arg0, arg1}; ops_timing_realloc(3,"poisson_kernel_update"); OPS_kernels[3].count++; //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_poisson_kernel_update_h || xdim1 != xdim1_poisson_kernel_update_h) { cudaMemcpyToSymbol( xdim0_poisson_kernel_update, &xdim0, sizeof(int) ); xdim0_poisson_kernel_update_h = xdim0; cudaMemcpyToSymbol( xdim1_poisson_kernel_update, &xdim1, sizeof(int) ); xdim1_poisson_kernel_update_h = xdim1; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[2]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); ops_timers_core(&c1,&t1); OPS_kernels[3].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_poisson_kernel_update<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],x_size, y_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[3].time += t2-t1; ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[3].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[3].transfer += ops_compute_transfer(dim, range, &arg1); }
660203b198d10466d588e84da72a4ba248354eea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // LICENSE // -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- // -------------------------------------------------------- // Roi pooling layer (only two CUDA kernels) // Authored by Ross Girshick // These Kernels were modified to run on Matlab // -------------------------------------------------------- __global__ void ROIPoolForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float* bottom_rois, float* top_data, int* argmax_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x){ // (ph, pw, c, n) is an element in the pooled output int ph = index % pooled_height; int pw = (index / pooled_height) % pooled_weight; int c = (index / pooled_height / pooled_width) % channels; int n = index / pooled_height / pooled_width / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_height = max(roi_end_h - roi_start_h + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); int hstart = static_cast<int>(floor(static_cast<float>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<float>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<float>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<float>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero float maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int w = wstart; w < wend; ++w) { for (int h = hstart; h < hend; ++h) { int bottom_index = w * height + h; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } __global__ void ROIPoolBackward(const int nthreads, const float* top_diff, const int* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, float* bottom_diff, const float* bottom_rois) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x){ // (h, w, c, n) coords in bottom data int w = index % height; int h = (index / height) % width; int c = (index / height / width) % channels; int n = index / height / width / channels; float gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const float* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const float* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); int phstart = floor(static_cast<float>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<float>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<float>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<float>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int pw = pwstart; pw < pwend; ++pw) { for (int ph = phstart; ph < phend; ++ph) { if (offset_argmax_data[pw * pooled_height + ph] == (w * height + h)) { gradient += offset_top_diff[pw * pooled_height + ph]; } } } } bottom_diff[index] = gradient; } }
660203b198d10466d588e84da72a4ba248354eea.cu
// LICENSE // -------------------------------------------------------- // Fast R-CNN // Copyright (c) Microsoft. All rights reserved. // Written by Ross Girshick, 2015. // Licensed under the BSD 2-clause "Simplified" license. // See LICENSE in the Fast R-CNN project root for license // information. // -------------------------------------------------------- // -------------------------------------------------------- // Roi pooling layer (only two CUDA kernels) // Authored by Ross Girshick // These Kernels were modified to run on Matlab // -------------------------------------------------------- __global__ void ROIPoolForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const float* bottom_rois, float* top_data, int* argmax_data) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x){ // (ph, pw, c, n) is an element in the pooled output int ph = index % pooled_height; int pw = (index / pooled_height) % pooled_weight; int c = (index / pooled_height / pooled_width) % channels; int n = index / pooled_height / pooled_width / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_height = max(roi_end_h - roi_start_h + 1, 1); int roi_width = max(roi_end_w - roi_start_w + 1, 1); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); int hstart = static_cast<int>(floor(static_cast<float>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<float>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<float>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<float>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero float maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; for (int w = wstart; w < wend; ++w) { for (int h = hstart; h < hend; ++h) { int bottom_index = w * height + h; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } __global__ void ROIPoolBackward(const int nthreads, const float* top_diff, const int* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, float* bottom_diff, const float* bottom_rois) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x){ // (h, w, c, n) coords in bottom data int w = index % height; int h = (index / height) % width; int c = (index / height / width) % channels; int n = index / height / width / channels; float gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const float* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const float* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(pooled_height); float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(pooled_width); int phstart = floor(static_cast<float>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<float>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<float>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<float>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); for (int pw = pwstart; pw < pwend; ++pw) { for (int ph = phstart; ph < phend; ++ph) { if (offset_argmax_data[pw * pooled_height + ph] == (w * height + h)) { gradient += offset_top_diff[pw * pooled_height + ph]; } } } } bottom_diff[index] = gradient; } }
7e0775aa13c5ec4df74a8d815be5c2cb355d83a7.hip
// !!! This is a file automatically generated by hipify!!! // // Compile: // // $ nvcc list_gpus.cu -o list_gpus // // #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> int main() { int deviceCount; hipGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, device); printf("Device Index %d, %s, Compute Capability %d.%d\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); } }
7e0775aa13c5ec4df74a8d815be5c2cb355d83a7.cu
// // Compile: // // $ nvcc list_gpus.cu -o list_gpus // // #include <cuda.h> #include <curand_kernel.h> #include <stdio.h> int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); int device; for (device = 0; device < deviceCount; ++device) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); printf("Device Index %d, %s, Compute Capability %d.%d\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); } }
daffdecb1980470f30141d78914e3143dc865c47.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_a = NULL; hipMalloc(&d_a, XSIZE*YSIZE); float *d_b = NULL; hipMalloc(&d_b, XSIZE*YSIZE); float *d_c = NULL; hipMalloc(&d_c, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_a,d_b,d_c,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
daffdecb1980470f30141d78914e3143dc865c47.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_a = NULL; cudaMalloc(&d_a, XSIZE*YSIZE); float *d_b = NULL; cudaMalloc(&d_b, XSIZE*YSIZE); float *d_c = NULL; cudaMalloc(&d_c, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Add<<<gridBlock,threadBlock>>>(d_a,d_b,d_c,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Add<<<gridBlock,threadBlock>>>(d_a,d_b,d_c,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Add<<<gridBlock,threadBlock>>>(d_a,d_b,d_c,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
503b143f5d5982920cf81f4e6df9ff3c32a4c6aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cuda_hello(){ printf("Hello World from GPU!\n"); }
503b143f5d5982920cf81f4e6df9ff3c32a4c6aa.cu
#include "includes.h" __global__ void cuda_hello(){ printf("Hello World from GPU!\n"); }
386e92897a047f60e8bfe9e15e2bcd2bbea7b544.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <chrono> #include <functional> #include <random> #include "XLib.hpp" using namespace xlib; enum ThreadReduceOP { INCLUSIVE, INCLUSIVE_ILP, EXCLUSIVE }; __global__ void threadReduceTest(int* DataIN, int* DataOUT) { int Local_data[32]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::Add(Local_data); DataOUT[SUM_OP] = Local_data[0]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::Min(Local_data); DataOUT[MIN_OP] = Local_data[0]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::Max(Local_data); DataOUT[MAX_OP] = Local_data[0]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::LogicAnd(Local_data); DataOUT[LOGIC_AND_OP] = Local_data[0]; } int main() { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); std::uniform_int_distribution<int> distribution(-50, 50); const int INPUT_SIZE = 32; const int N_OF_OPERATIONS = 4; int DataIN[INPUT_SIZE]; int DataOUT[N_OF_OPERATIONS]; int DataOUT_copy[N_OF_OPERATIONS]; int* devDataIN, *devDataOUT; __SAFE_CALL( hipMalloc(&devDataIN, sizeof(DataIN)) ); __SAFE_CALL( hipMalloc(&devDataOUT, sizeof(DataOUT)) ); for (int i = 0; i < INPUT_SIZE; i++) DataIN[i] = distribution(generator); xlib::printArray(DataIN, 32); __SAFE_CALL( hipMemcpy(devDataIN, DataIN, sizeof(DataIN), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( threadReduceTest), dim3(1), dim3(1), 0, 0, devDataIN, devDataOUT); __SAFE_CALL( hipMemcpy(DataOUT_copy, devDataOUT, sizeof(DataOUT), hipMemcpyDeviceToHost) ); DataOUT[SUM_OP] = std::accumulate(DataIN, DataIN + INPUT_SIZE, 0); if (DataOUT[SUM_OP] != DataOUT_copy[SUM_OP]) { ERROR("ThreadReduce (SUM) : " << DataOUT[SUM_OP] << "\t" << DataOUT_copy[SUM_OP]); } DataOUT[MIN_OP] = *std::min_element(DataIN, DataIN + INPUT_SIZE); if (DataOUT[MIN_OP] != DataOUT_copy[MIN_OP]) { ERROR("ThreadReduce (Min) : " << DataOUT[MIN_OP] << "\t" << DataOUT_copy[MIN_OP]); } DataOUT[MAX_OP] = *std::max_element(DataIN, DataIN + INPUT_SIZE); if (DataOUT[MAX_OP] != DataOUT_copy[MAX_OP]) { ERROR("ThreadReduce (Max) : " << DataOUT[MAX_OP] << "\t" << DataOUT_copy[MAX_OP]); } DataOUT[LOGIC_AND_OP] = DataIN[0]; for (int i = 1; i < INPUT_SIZE; i++) DataOUT[LOGIC_AND_OP] = DataOUT[LOGIC_AND_OP] && DataIN[i]; if (DataOUT[LOGIC_AND_OP] != DataOUT_copy[LOGIC_AND_OP]) { ERROR("ThreadReduce (AND) : " << DataOUT[LOGIC_AND_OP] << "\t" << DataOUT_copy[LOGIC_AND_OP]); } }
386e92897a047f60e8bfe9e15e2bcd2bbea7b544.cu
#include <iostream> #include <chrono> #include <functional> #include <random> #include "XLib.hpp" using namespace xlib; enum ThreadReduceOP { INCLUSIVE, INCLUSIVE_ILP, EXCLUSIVE }; __global__ void threadReduceTest(int* DataIN, int* DataOUT) { int Local_data[32]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::Add(Local_data); DataOUT[SUM_OP] = Local_data[0]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::Min(Local_data); DataOUT[MIN_OP] = Local_data[0]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::Max(Local_data); DataOUT[MAX_OP] = Local_data[0]; for (int i = 0; i < 32; i++) Local_data[i] = DataIN[i]; ThreadReduce::LogicAnd(Local_data); DataOUT[LOGIC_AND_OP] = Local_data[0]; } int main() { unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); std::uniform_int_distribution<int> distribution(-50, 50); const int INPUT_SIZE = 32; const int N_OF_OPERATIONS = 4; int DataIN[INPUT_SIZE]; int DataOUT[N_OF_OPERATIONS]; int DataOUT_copy[N_OF_OPERATIONS]; int* devDataIN, *devDataOUT; __SAFE_CALL( cudaMalloc(&devDataIN, sizeof(DataIN)) ); __SAFE_CALL( cudaMalloc(&devDataOUT, sizeof(DataOUT)) ); for (int i = 0; i < INPUT_SIZE; i++) DataIN[i] = distribution(generator); xlib::printArray(DataIN, 32); __SAFE_CALL( cudaMemcpy(devDataIN, DataIN, sizeof(DataIN), cudaMemcpyHostToDevice) ); threadReduceTest<<<1, 1>>>(devDataIN, devDataOUT); __SAFE_CALL( cudaMemcpy(DataOUT_copy, devDataOUT, sizeof(DataOUT), cudaMemcpyDeviceToHost) ); DataOUT[SUM_OP] = std::accumulate(DataIN, DataIN + INPUT_SIZE, 0); if (DataOUT[SUM_OP] != DataOUT_copy[SUM_OP]) { ERROR("ThreadReduce (SUM) : " << DataOUT[SUM_OP] << "\t" << DataOUT_copy[SUM_OP]); } DataOUT[MIN_OP] = *std::min_element(DataIN, DataIN + INPUT_SIZE); if (DataOUT[MIN_OP] != DataOUT_copy[MIN_OP]) { ERROR("ThreadReduce (Min) : " << DataOUT[MIN_OP] << "\t" << DataOUT_copy[MIN_OP]); } DataOUT[MAX_OP] = *std::max_element(DataIN, DataIN + INPUT_SIZE); if (DataOUT[MAX_OP] != DataOUT_copy[MAX_OP]) { ERROR("ThreadReduce (Max) : " << DataOUT[MAX_OP] << "\t" << DataOUT_copy[MAX_OP]); } DataOUT[LOGIC_AND_OP] = DataIN[0]; for (int i = 1; i < INPUT_SIZE; i++) DataOUT[LOGIC_AND_OP] = DataOUT[LOGIC_AND_OP] && DataIN[i]; if (DataOUT[LOGIC_AND_OP] != DataOUT_copy[LOGIC_AND_OP]) { ERROR("ThreadReduce (AND) : " << DataOUT[LOGIC_AND_OP] << "\t" << DataOUT_copy[LOGIC_AND_OP]); } }
7bd211dcaaf0db1af203febe13d25775dca56083.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 128, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 2, 1>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, hipStream_t stream); #pragma GCC diagnostic pop #endif
7bd211dcaaf0db1af203febe13d25775dca56083.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_gemv_batched_strided_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/matrix_mul/fp32_simt_gemv/matrix_mul_float_simt_gemv_batched_strided_cutlass_wrapper.cuinl" using ThreadBlockShape = cutlass::gemm::GemmShape<1, 128, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 2, 1>; using GemvKernel = cutlass::gemm::kernel::DefaultGemv< ThreadBlockShape, ThreadShape, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor, float, cutlass::layout::RowMajor>; template void megdnn::cuda::cutlass_wrapper:: cutlass_vector_matrix_mul_batched_strided_wrapper<GemvKernel>( BatchedGemmCoord const& problem_size, const typename GemvKernel::ElementA* d_A, size_t lda, size_t batch_stride_a, const typename GemvKernel::ElementB* d_B, size_t ldb, size_t batch_stride_b, typename GemvKernel::ElementCD* d_C, size_t ldc, size_t batch_stride_c, cudaStream_t stream); #pragma GCC diagnostic pop #endif
86f5a65964839aab0e881ca82fb9cb44e828bf36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <memory> #include <iostream> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int add(int argc, char** argv) { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
86f5a65964839aab0e881ca82fb9cb44e828bf36.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <memory> #include <iostream> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int add(int argc, char** argv) { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
01bfab1f214eab83e29fc902399a05e2ce21d21b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2012, Thomas Schutzmeier // FreeBSD License // See https://github.com/unvirtual/cukd/blob/master/LICENSE #include <cutil_inline.h> #include "../utils.h" #include "../kdtree.h" #include "../algorithms/reduction.h" #include "dev_structs.h" #include "../utils/intersection.h" namespace cukd { namespace device { class TreeStack { public: __device__ TreeStack() : ptr(0) {}; __inline__ __device__ void push(unsigned int nodeidx, float min, float max) { p_min[ptr] = min; p_max[ptr] = max; node[ptr] = nodeidx; ptr++; } __inline__ __device__ bool pop(unsigned int & nodeidx, float & min, float & max) { if(ptr > 0) { ptr--; min = p_min[ptr]; max = p_max[ptr]; nodeidx = node[ptr]; return true; } return false; } __inline__ __device__ bool empty() { return (ptr == 0); } private: int ptr; unsigned int node[STACK_SIZE]; float p_min[STACK_SIZE], p_max[STACK_SIZE]; }; // direct implementation of tree traversal algorithm taken from // "Heuristic cukdRay shooting Algorithms", Vlastimil Havran __device__ int ray_traverse_device(const cukdRay & ray, const UAABB & root, unsigned int *preorder, DevTriangleArray & tri_vertices, float & alpha, float & x1, float & x2, int & cost) { TreeStack stack; int ret_tri = -1; cost = 0; float p_min = 0; float p_max = 0; unsigned int current_node = 0; bool is_leaf, is_empty, left_empty, right_empty, ray_split_relative, tri_intersect; unsigned int node_first_value, left_node, right_node, split_axis, node_first, node_second, n_elements, index; float split_position, p_split, current_max, current_min, xalpha, xx1, xx2; if(!intersect_aabb(ray, root, p_min, p_max)) return -1; current_min = p_min; current_max = p_max; alpha = current_max; do { cost++; stack.pop(current_node, current_min, current_max); node_first_value = preorder[current_node]; is_leaf = (node_first_value & leaf_mask) != 0; is_empty = false; while(!is_leaf) { cost++; left_node = current_node + 2; right_node = node_first_value & right_node_mask; left_empty = (node_first_value & left_empty_mask) != 0; right_empty = (node_first_value & right_empty_mask) != 0; if(left_empty) left_node = 0; if(right_empty) right_node = 0; split_axis = (node_first_value & split_axis_mask) >> split_axis_shift; split_position = *(float*) & preorder[current_node + 1]; p_split = (split_position - ray.origin.component[split_axis]) * 1.f/ray.direction.component[split_axis]; ray_split_relative = ray.origin.component[split_axis] <= split_position; if(ray_split_relative) { node_first = left_node; node_second = right_node; } else { node_first = right_node; node_second = left_node; } if(fabsf(ray.origin.component[split_axis] - split_position) < 1e-8f) { if(1.f/ray.direction.component[split_axis] > 0) { current_node = node_second; } else { current_node = node_first; } } else if (p_split > current_max || p_split < 0.f) { current_node = node_first; } else if (p_split < current_min) { current_node = node_second; } else { if(((node_second == left_node) && !left_empty) || ((node_second == right_node) && !right_empty)) stack.push(node_second, p_split, current_max); current_node = node_first; current_max = p_split; } if( ((current_node == left_node) && left_empty) || ((current_node == right_node) && right_empty)) { is_empty = true; is_leaf = false; break; } node_first_value = preorder[current_node]; is_leaf = (node_first_value & leaf_mask) != 0; } if(current_max >= p_max) { break; } if (is_empty) { continue; }; n_elements = n_element_mask & preorder[current_node]; for(int i = 1; i <= n_elements; ++i){ cost++; index = preorder[current_node + i]; Triangle tri; tri.v[0] = tri_vertices.v[0][index]; tri.v[1] = tri_vertices.v[1][index]; tri.v[2] = tri_vertices.v[2][index]; tri_intersect = intersect_triangle(ray, tri, xalpha, xx1, xx2); if(tri_intersect && xalpha > 1e-8f && xalpha < alpha) { ret_tri = index; alpha = xalpha; x1 = xx1; x2 = xx2; } } if(alpha < current_max) { break; } } while (!stack.empty()); return ret_tri; } /********************************************************************************** * * KDTree Kernels * **********************************************************************************/ __global__ void ray_bunch_traverse_kernel(int width, int height, DevRayArray rays, UAABB root, unsigned int *preorder_tree, DevTriangleArray triangles, int* hits, int* costs, float *alphas, float* x1s, float* x2s) { int tid = blockIdx.x*blockDim.x + threadIdx.x; cukdRay ray; // TODO: return hit coordinates if(tid < width*height) { ray.origin = rays.origins[tid]; ray.direction = rays.directions[tid]; hits[tid] = ray_traverse_device(ray, root, preorder_tree, triangles, alphas[tid], x1s[tid], x2s[tid], costs[tid]); } }; __global__ void append_empty_nodes_kernel(int* cut_dir, int* offset, int* active_indices, device::NodeChunkArray active, int act_n_nodes, int n_tree_nodes, device::KDTreeNodeArray tree) { int tid = blockDim.x*blockIdx.x + threadIdx.x; if(tid >= act_n_nodes) return; float aabb_data[6]; #pragma unroll 3 for(int i = 0; i < 3; ++i) { aabb_data[2*i] = active.node_aabb.maxima[tid].component[i]; aabb_data[2*i+1] = active.node_aabb.minima[tid].component[i]; } float xdiff = aabb_data[0] - aabb_data[1]; float ydiff = aabb_data[2] - aabb_data[3]; float zdiff = aabb_data[4] - aabb_data[5]; // length = 4, 2 active -> first empty_space == 6 correct int current_node = (n_tree_nodes) + tid; int next_index = (n_tree_nodes) + act_n_nodes + offset[tid]; int next_split_index = (n_tree_nodes) + act_n_nodes + offset[act_n_nodes] + tid; int count = 0; int mask = 1; int right_cut = 0; int node_depth = active.na.depth[tid]; if(cut_dir[tid] != 0) { // generate empty splits for(; mask <= 0x20; mask <<= 1, ++count) { if(cut_dir[tid] & mask) { tree.na.split_position[current_node] = aabb_data[count]; tree.na.split_axis[current_node] = count / 2; // if non zero, right was cut right_cut = 0x15 & mask; tree.na.left_nodes[current_node] = 0; tree.na.right_nodes[current_node] = 0; if(right_cut != 0) tree.na.left_nodes[current_node] = next_index; else tree.na.right_nodes[current_node] = next_index; tree.na.depth[current_node] = node_depth++; current_node = next_index;; next_index++; } } } active_indices[tid] = current_node; if(xdiff > ydiff) { if(xdiff > zdiff) { tree.na.split_axis[current_node] = 0; tree.na.split_position[current_node] = 0.5f*xdiff + aabb_data[1]; } else { tree.na.split_axis[current_node] = 2; tree.na.split_position[current_node] = 0.5f*zdiff + aabb_data[5]; } } else { if(ydiff > zdiff) { tree.na.split_axis[current_node] = 1; tree.na.split_position[current_node] = 0.5f*ydiff + aabb_data[3]; } else { tree.na.split_axis[current_node] = 2; tree.na.split_position[current_node] = 0.5f*zdiff + aabb_data[5]; } } __syncthreads(); active.na.split_axis[tid] = tree.na.split_axis[current_node]; active.na.split_position[tid] = tree.na.split_position[current_node]; //set indices to median cut children in final list tree.na.left_nodes[current_node] = next_split_index; tree.na.right_nodes[current_node] = next_split_index + act_n_nodes; active.na.depth[tid] = node_depth; tree.na.depth[current_node] = node_depth; } __global__ void split_small_nodes_kernel(device::SplitCandidateArray sca, int* split_indices, int* leaf_tags, int* split_index_diff, int old_small_nodes, int old_tree_n_nodes, device::SmallNodeArray active, device::SmallNodeArray next, device::KDTreeNodeArray tree, int old_n_leaves ) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float split_position; int split_direction; int split_index; UFloat4 left_aabb_min, left_aabb_max, right_aabb_min, right_aabb_max; UInt64 node_elements, left_elements, right_elements; UInt64 root_idx; int tree_idx; int tree_left_child_idx = -100; int tree_right_child_idx = -200; int next_left_child_idx; int next_right_child_idx; int active_n_nodes; int new_left_n_nodes; if(tid < old_small_nodes) { active_n_nodes = old_small_nodes; new_left_n_nodes = active_n_nodes - split_index_diff[active_n_nodes-1]; node_elements = active.element_bits[tid]; root_idx = active.root_node_idx[tid]; tree_idx = old_tree_n_nodes + tid; tree_left_child_idx = active_n_nodes + tree_idx - split_index_diff[tid]; tree_right_child_idx = tree_left_child_idx + new_left_n_nodes; next_left_child_idx = tid - split_index_diff[tid]; next_right_child_idx = tid - split_index_diff[tid] + new_left_n_nodes; split_index = split_indices[tid]; split_position = sca.split_position[split_index]; split_direction = sca.split_direction[split_index]; if(leaf_tags[tid] == 1) { tree.na.left_nodes[tree_idx] = -1; tree.na.right_nodes[tree_idx] = -1; tree.na.split_position[tree_idx] = split_position; tree.na.split_axis[tree_idx] = split_direction; tree.na.depth[tree_idx] = active.na.na.depth[tid]; tree.leaf_idx[tree_idx] = split_index_diff[tid] - 1 + old_n_leaves; if(active.na.na.depth[tid] > *tree.max_depth) *tree.max_depth = active.na.na.depth[tid]; } else { tree.na.left_nodes[tree_idx] = tree_left_child_idx; tree.na.right_nodes[tree_idx] = tree_right_child_idx; tree.na.split_position[tree_idx] = split_position; tree.na.split_axis[tree_idx] = split_direction; tree.na.depth[tree_idx] = active.na.na.depth[tid]; left_elements = node_elements & sca.left_elements[split_index]; right_elements = node_elements & sca.right_elements[split_index]; left_aabb_min = active.na.node_aabb.minima[tid]; left_aabb_max = active.na.node_aabb.maxima[tid]; right_aabb_min = active.na.node_aabb.minima[tid]; right_aabb_max = active.na.node_aabb.maxima[tid]; left_aabb_max.component[split_direction] = split_position; right_aabb_min.component[split_direction] = split_position; next.na.node_aabb.minima[next_left_child_idx] = left_aabb_min; next.na.node_aabb.maxima[next_left_child_idx] = left_aabb_max; next.na.node_aabb.minima[next_right_child_idx] = right_aabb_min; next.na.node_aabb.maxima[next_right_child_idx] = right_aabb_max; next.element_bits[next_left_child_idx] = left_elements; next.element_bits[next_right_child_idx] = right_elements; next.root_node_idx[next_left_child_idx] = root_idx; next.root_node_idx[next_right_child_idx] = root_idx; next.na.na.depth[next_left_child_idx] = active.na.na.depth[tid] + 1; next.na.na.depth[next_right_child_idx] = active.na.na.depth[tid] + 1; } } } __global__ void preorder_bottom_up_kernel(device::KDTreeNodeArray tree, int tree_n_nodes, int level, unsigned int* preorder_node_sizes) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int left_node, right_node, result = 2; if(tid < tree_n_nodes && tree.na.depth[tid] == level) { left_node = tree.na.left_nodes[tid]; right_node = tree.na.right_nodes[tid]; if(left_node == -1 && right_node == -1) { result = tree.na.node_size[tree.leaf_idx[tid]] + 1; } else { if (left_node != 0) { result += preorder_node_sizes[left_node]; } if (right_node != 0) { result += preorder_node_sizes[right_node]; } } preorder_node_sizes[tid] = result; } }; __global__ void preorder_top_down_kernel(device::KDTreeNodeArray tree, int tree_n_nodes, int level, unsigned int* preorder_node_sizes, unsigned int* addresses, unsigned int* preorder_tree) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int left_node, right_node, address, right_index, left_index, leaf_idx; int first_elem_idx, i; if(tid < tree_n_nodes && tree.na.depth[tid] == level) { left_node = tree.na.left_nodes[tid]; right_node = tree.na.right_nodes[tid]; address = addresses[tid]; preorder_tree[address] = 0; if(left_node == -1 && right_node == -1) { leaf_idx = tree.leaf_idx[tid]; first_elem_idx = tree.na.node_element_first_idx[leaf_idx]; for(i = 0; i < tree.na.node_size[leaf_idx]; ++i) { preorder_tree[address + i + 1] = (unsigned int) tree.na.element_idx[first_elem_idx + i]; } preorder_tree[address] |= leaf_mask; preorder_tree[address] |= (unsigned int) tree.na.node_size[leaf_idx]; } if(left_node != right_node) { // just set the right address left_index = address + 2; right_index = address + preorder_node_sizes[left_node] + 2; if(left_node == 0) right_index = left_index; addresses[right_node] = right_index; addresses[left_node] = left_index; // if empty node, jump to the next free index preorder_tree[address + 1] = *(unsigned int*) &tree.na.split_position[tid]; preorder_tree[address] = (unsigned int) right_index; preorder_tree[address] |= ((unsigned int) tree.na.split_axis[tid] << split_axis_shift); } if(left_node == 0) { preorder_tree[address] |= (unsigned int) left_empty_mask; } if(right_node == 0) { preorder_tree[address] |= (unsigned int) right_empty_mask; } } }; /********************************************************************************** * * KDTree Kernel wrappers * **********************************************************************************/ void append_empty_nodes(cukd::NodeChunkArray & active_nca, cukd::KDTreeNodeArray & tree_nca, int old_tree_nodes, DevVector<int> & cut_dirs, DevVector<int> & offsets, DevVector<int> & active_indices) { dim3 grid(IntegerDivide(256)(active_nca.n_nodes()),1,1); dim3 blocks(256,1,1); hipLaunchKernelGGL(( append_empty_nodes_kernel), dim3(grid),dim3(blocks), 0, 0, cut_dirs.pointer(), offsets.pointer(), active_indices.pointer(), active_nca.dev_array(), active_nca.n_nodes(), old_tree_nodes, tree_nca.dev_array()); CUT_CHECK_ERROR("append_empty_nodes_kernel failed"); } void split_small_nodes(cukd::SplitCandidateArray & sca, DevVector<int> & split_indices, DevVector<int> & leaf_tags, DevVector<int> & split_index_diff, cukd::SmallNodeArray & active, int old_small_nodes, cukd::SmallNodeArray & next, cukd::KDTreeNodeArray & tree, int old_tree_nodes, int old_n_leaves) { dim3 grid(IntegerDivide(256)(old_small_nodes),1,1); dim3 blocks(256,1,1); hipLaunchKernelGGL(( split_small_nodes_kernel), dim3(grid), dim3(blocks), 0, 0, sca.dev_array(), split_indices.pointer(), leaf_tags.pointer(), split_index_diff.pointer(), old_small_nodes, old_tree_nodes, active.dev_array(), next.dev_array(), tree.dev_array(), old_n_leaves); CUT_CHECK_ERROR("split_small_nodes_kernel failed"); } } // namespace device } // namespace cukd
01bfab1f214eab83e29fc902399a05e2ce21d21b.cu
// Copyright (c) 2012, Thomas Schutzmeier // FreeBSD License // See https://github.com/unvirtual/cukd/blob/master/LICENSE #include <cutil_inline.h> #include "../utils.h" #include "../kdtree.h" #include "../algorithms/reduction.h" #include "dev_structs.h" #include "../utils/intersection.h" namespace cukd { namespace device { class TreeStack { public: __device__ TreeStack() : ptr(0) {}; __inline__ __device__ void push(unsigned int nodeidx, float min, float max) { p_min[ptr] = min; p_max[ptr] = max; node[ptr] = nodeidx; ptr++; } __inline__ __device__ bool pop(unsigned int & nodeidx, float & min, float & max) { if(ptr > 0) { ptr--; min = p_min[ptr]; max = p_max[ptr]; nodeidx = node[ptr]; return true; } return false; } __inline__ __device__ bool empty() { return (ptr == 0); } private: int ptr; unsigned int node[STACK_SIZE]; float p_min[STACK_SIZE], p_max[STACK_SIZE]; }; // direct implementation of tree traversal algorithm taken from // "Heuristic cukdRay shooting Algorithms", Vlastimil Havran __device__ int ray_traverse_device(const cukdRay & ray, const UAABB & root, unsigned int *preorder, DevTriangleArray & tri_vertices, float & alpha, float & x1, float & x2, int & cost) { TreeStack stack; int ret_tri = -1; cost = 0; float p_min = 0; float p_max = 0; unsigned int current_node = 0; bool is_leaf, is_empty, left_empty, right_empty, ray_split_relative, tri_intersect; unsigned int node_first_value, left_node, right_node, split_axis, node_first, node_second, n_elements, index; float split_position, p_split, current_max, current_min, xalpha, xx1, xx2; if(!intersect_aabb(ray, root, p_min, p_max)) return -1; current_min = p_min; current_max = p_max; alpha = current_max; do { cost++; stack.pop(current_node, current_min, current_max); node_first_value = preorder[current_node]; is_leaf = (node_first_value & leaf_mask) != 0; is_empty = false; while(!is_leaf) { cost++; left_node = current_node + 2; right_node = node_first_value & right_node_mask; left_empty = (node_first_value & left_empty_mask) != 0; right_empty = (node_first_value & right_empty_mask) != 0; if(left_empty) left_node = 0; if(right_empty) right_node = 0; split_axis = (node_first_value & split_axis_mask) >> split_axis_shift; split_position = *(float*) & preorder[current_node + 1]; p_split = (split_position - ray.origin.component[split_axis]) * 1.f/ray.direction.component[split_axis]; ray_split_relative = ray.origin.component[split_axis] <= split_position; if(ray_split_relative) { node_first = left_node; node_second = right_node; } else { node_first = right_node; node_second = left_node; } if(fabsf(ray.origin.component[split_axis] - split_position) < 1e-8f) { if(1.f/ray.direction.component[split_axis] > 0) { current_node = node_second; } else { current_node = node_first; } } else if (p_split > current_max || p_split < 0.f) { current_node = node_first; } else if (p_split < current_min) { current_node = node_second; } else { if(((node_second == left_node) && !left_empty) || ((node_second == right_node) && !right_empty)) stack.push(node_second, p_split, current_max); current_node = node_first; current_max = p_split; } if( ((current_node == left_node) && left_empty) || ((current_node == right_node) && right_empty)) { is_empty = true; is_leaf = false; break; } node_first_value = preorder[current_node]; is_leaf = (node_first_value & leaf_mask) != 0; } if(current_max >= p_max) { break; } if (is_empty) { continue; }; n_elements = n_element_mask & preorder[current_node]; for(int i = 1; i <= n_elements; ++i){ cost++; index = preorder[current_node + i]; Triangle tri; tri.v[0] = tri_vertices.v[0][index]; tri.v[1] = tri_vertices.v[1][index]; tri.v[2] = tri_vertices.v[2][index]; tri_intersect = intersect_triangle(ray, tri, xalpha, xx1, xx2); if(tri_intersect && xalpha > 1e-8f && xalpha < alpha) { ret_tri = index; alpha = xalpha; x1 = xx1; x2 = xx2; } } if(alpha < current_max) { break; } } while (!stack.empty()); return ret_tri; } /********************************************************************************** * * KDTree Kernels * **********************************************************************************/ __global__ void ray_bunch_traverse_kernel(int width, int height, DevRayArray rays, UAABB root, unsigned int *preorder_tree, DevTriangleArray triangles, int* hits, int* costs, float *alphas, float* x1s, float* x2s) { int tid = blockIdx.x*blockDim.x + threadIdx.x; cukdRay ray; // TODO: return hit coordinates if(tid < width*height) { ray.origin = rays.origins[tid]; ray.direction = rays.directions[tid]; hits[tid] = ray_traverse_device(ray, root, preorder_tree, triangles, alphas[tid], x1s[tid], x2s[tid], costs[tid]); } }; __global__ void append_empty_nodes_kernel(int* cut_dir, int* offset, int* active_indices, device::NodeChunkArray active, int act_n_nodes, int n_tree_nodes, device::KDTreeNodeArray tree) { int tid = blockDim.x*blockIdx.x + threadIdx.x; if(tid >= act_n_nodes) return; float aabb_data[6]; #pragma unroll 3 for(int i = 0; i < 3; ++i) { aabb_data[2*i] = active.node_aabb.maxima[tid].component[i]; aabb_data[2*i+1] = active.node_aabb.minima[tid].component[i]; } float xdiff = aabb_data[0] - aabb_data[1]; float ydiff = aabb_data[2] - aabb_data[3]; float zdiff = aabb_data[4] - aabb_data[5]; // length = 4, 2 active -> first empty_space == 6 correct int current_node = (n_tree_nodes) + tid; int next_index = (n_tree_nodes) + act_n_nodes + offset[tid]; int next_split_index = (n_tree_nodes) + act_n_nodes + offset[act_n_nodes] + tid; int count = 0; int mask = 1; int right_cut = 0; int node_depth = active.na.depth[tid]; if(cut_dir[tid] != 0) { // generate empty splits for(; mask <= 0x20; mask <<= 1, ++count) { if(cut_dir[tid] & mask) { tree.na.split_position[current_node] = aabb_data[count]; tree.na.split_axis[current_node] = count / 2; // if non zero, right was cut right_cut = 0x15 & mask; tree.na.left_nodes[current_node] = 0; tree.na.right_nodes[current_node] = 0; if(right_cut != 0) tree.na.left_nodes[current_node] = next_index; else tree.na.right_nodes[current_node] = next_index; tree.na.depth[current_node] = node_depth++; current_node = next_index;; next_index++; } } } active_indices[tid] = current_node; if(xdiff > ydiff) { if(xdiff > zdiff) { tree.na.split_axis[current_node] = 0; tree.na.split_position[current_node] = 0.5f*xdiff + aabb_data[1]; } else { tree.na.split_axis[current_node] = 2; tree.na.split_position[current_node] = 0.5f*zdiff + aabb_data[5]; } } else { if(ydiff > zdiff) { tree.na.split_axis[current_node] = 1; tree.na.split_position[current_node] = 0.5f*ydiff + aabb_data[3]; } else { tree.na.split_axis[current_node] = 2; tree.na.split_position[current_node] = 0.5f*zdiff + aabb_data[5]; } } __syncthreads(); active.na.split_axis[tid] = tree.na.split_axis[current_node]; active.na.split_position[tid] = tree.na.split_position[current_node]; //set indices to median cut children in final list tree.na.left_nodes[current_node] = next_split_index; tree.na.right_nodes[current_node] = next_split_index + act_n_nodes; active.na.depth[tid] = node_depth; tree.na.depth[current_node] = node_depth; } __global__ void split_small_nodes_kernel(device::SplitCandidateArray sca, int* split_indices, int* leaf_tags, int* split_index_diff, int old_small_nodes, int old_tree_n_nodes, device::SmallNodeArray active, device::SmallNodeArray next, device::KDTreeNodeArray tree, int old_n_leaves ) { int tid = blockIdx.x*blockDim.x + threadIdx.x; float split_position; int split_direction; int split_index; UFloat4 left_aabb_min, left_aabb_max, right_aabb_min, right_aabb_max; UInt64 node_elements, left_elements, right_elements; UInt64 root_idx; int tree_idx; int tree_left_child_idx = -100; int tree_right_child_idx = -200; int next_left_child_idx; int next_right_child_idx; int active_n_nodes; int new_left_n_nodes; if(tid < old_small_nodes) { active_n_nodes = old_small_nodes; new_left_n_nodes = active_n_nodes - split_index_diff[active_n_nodes-1]; node_elements = active.element_bits[tid]; root_idx = active.root_node_idx[tid]; tree_idx = old_tree_n_nodes + tid; tree_left_child_idx = active_n_nodes + tree_idx - split_index_diff[tid]; tree_right_child_idx = tree_left_child_idx + new_left_n_nodes; next_left_child_idx = tid - split_index_diff[tid]; next_right_child_idx = tid - split_index_diff[tid] + new_left_n_nodes; split_index = split_indices[tid]; split_position = sca.split_position[split_index]; split_direction = sca.split_direction[split_index]; if(leaf_tags[tid] == 1) { tree.na.left_nodes[tree_idx] = -1; tree.na.right_nodes[tree_idx] = -1; tree.na.split_position[tree_idx] = split_position; tree.na.split_axis[tree_idx] = split_direction; tree.na.depth[tree_idx] = active.na.na.depth[tid]; tree.leaf_idx[tree_idx] = split_index_diff[tid] - 1 + old_n_leaves; if(active.na.na.depth[tid] > *tree.max_depth) *tree.max_depth = active.na.na.depth[tid]; } else { tree.na.left_nodes[tree_idx] = tree_left_child_idx; tree.na.right_nodes[tree_idx] = tree_right_child_idx; tree.na.split_position[tree_idx] = split_position; tree.na.split_axis[tree_idx] = split_direction; tree.na.depth[tree_idx] = active.na.na.depth[tid]; left_elements = node_elements & sca.left_elements[split_index]; right_elements = node_elements & sca.right_elements[split_index]; left_aabb_min = active.na.node_aabb.minima[tid]; left_aabb_max = active.na.node_aabb.maxima[tid]; right_aabb_min = active.na.node_aabb.minima[tid]; right_aabb_max = active.na.node_aabb.maxima[tid]; left_aabb_max.component[split_direction] = split_position; right_aabb_min.component[split_direction] = split_position; next.na.node_aabb.minima[next_left_child_idx] = left_aabb_min; next.na.node_aabb.maxima[next_left_child_idx] = left_aabb_max; next.na.node_aabb.minima[next_right_child_idx] = right_aabb_min; next.na.node_aabb.maxima[next_right_child_idx] = right_aabb_max; next.element_bits[next_left_child_idx] = left_elements; next.element_bits[next_right_child_idx] = right_elements; next.root_node_idx[next_left_child_idx] = root_idx; next.root_node_idx[next_right_child_idx] = root_idx; next.na.na.depth[next_left_child_idx] = active.na.na.depth[tid] + 1; next.na.na.depth[next_right_child_idx] = active.na.na.depth[tid] + 1; } } } __global__ void preorder_bottom_up_kernel(device::KDTreeNodeArray tree, int tree_n_nodes, int level, unsigned int* preorder_node_sizes) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int left_node, right_node, result = 2; if(tid < tree_n_nodes && tree.na.depth[tid] == level) { left_node = tree.na.left_nodes[tid]; right_node = tree.na.right_nodes[tid]; if(left_node == -1 && right_node == -1) { result = tree.na.node_size[tree.leaf_idx[tid]] + 1; } else { if (left_node != 0) { result += preorder_node_sizes[left_node]; } if (right_node != 0) { result += preorder_node_sizes[right_node]; } } preorder_node_sizes[tid] = result; } }; __global__ void preorder_top_down_kernel(device::KDTreeNodeArray tree, int tree_n_nodes, int level, unsigned int* preorder_node_sizes, unsigned int* addresses, unsigned int* preorder_tree) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int left_node, right_node, address, right_index, left_index, leaf_idx; int first_elem_idx, i; if(tid < tree_n_nodes && tree.na.depth[tid] == level) { left_node = tree.na.left_nodes[tid]; right_node = tree.na.right_nodes[tid]; address = addresses[tid]; preorder_tree[address] = 0; if(left_node == -1 && right_node == -1) { leaf_idx = tree.leaf_idx[tid]; first_elem_idx = tree.na.node_element_first_idx[leaf_idx]; for(i = 0; i < tree.na.node_size[leaf_idx]; ++i) { preorder_tree[address + i + 1] = (unsigned int) tree.na.element_idx[first_elem_idx + i]; } preorder_tree[address] |= leaf_mask; preorder_tree[address] |= (unsigned int) tree.na.node_size[leaf_idx]; } if(left_node != right_node) { // just set the right address left_index = address + 2; right_index = address + preorder_node_sizes[left_node] + 2; if(left_node == 0) right_index = left_index; addresses[right_node] = right_index; addresses[left_node] = left_index; // if empty node, jump to the next free index preorder_tree[address + 1] = *(unsigned int*) &tree.na.split_position[tid]; preorder_tree[address] = (unsigned int) right_index; preorder_tree[address] |= ((unsigned int) tree.na.split_axis[tid] << split_axis_shift); } if(left_node == 0) { preorder_tree[address] |= (unsigned int) left_empty_mask; } if(right_node == 0) { preorder_tree[address] |= (unsigned int) right_empty_mask; } } }; /********************************************************************************** * * KDTree Kernel wrappers * **********************************************************************************/ void append_empty_nodes(cukd::NodeChunkArray & active_nca, cukd::KDTreeNodeArray & tree_nca, int old_tree_nodes, DevVector<int> & cut_dirs, DevVector<int> & offsets, DevVector<int> & active_indices) { dim3 grid(IntegerDivide(256)(active_nca.n_nodes()),1,1); dim3 blocks(256,1,1); append_empty_nodes_kernel<<<grid,blocks>>>(cut_dirs.pointer(), offsets.pointer(), active_indices.pointer(), active_nca.dev_array(), active_nca.n_nodes(), old_tree_nodes, tree_nca.dev_array()); CUT_CHECK_ERROR("append_empty_nodes_kernel failed"); } void split_small_nodes(cukd::SplitCandidateArray & sca, DevVector<int> & split_indices, DevVector<int> & leaf_tags, DevVector<int> & split_index_diff, cukd::SmallNodeArray & active, int old_small_nodes, cukd::SmallNodeArray & next, cukd::KDTreeNodeArray & tree, int old_tree_nodes, int old_n_leaves) { dim3 grid(IntegerDivide(256)(old_small_nodes),1,1); dim3 blocks(256,1,1); split_small_nodes_kernel<<<grid, blocks>>>(sca.dev_array(), split_indices.pointer(), leaf_tags.pointer(), split_index_diff.pointer(), old_small_nodes, old_tree_nodes, active.dev_array(), next.dev_array(), tree.dev_array(), old_n_leaves); CUT_CHECK_ERROR("split_small_nodes_kernel failed"); } } // namespace device } // namespace cukd
0ee4ab0525d1e0336c2ba191471636f055570ae5.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <iostream> #include "hip/hip_runtime.h" #include "kernel.h" #include <stdlib.h> using namespace std; __global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; float tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } void matrixMultiplication(float *A, float *B, float *C, int N){ // declare the number of blocks per grid and the number of threads per block // use 1 to 512 threads per block dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 512){ threadsPerBlock.x = 512; threadsPerBlock.y = 512; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, A, B, C, N); }
0ee4ab0525d1e0336c2ba191471636f055570ae5.cu
#include <math.h> #include <iostream> #include "cuda_runtime.h" #include "kernel.h" #include <stdlib.h> using namespace std; __global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; float tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } void matrixMultiplication(float *A, float *B, float *C, int N){ // declare the number of blocks per grid and the number of threads per block // use 1 to 512 threads per block dim3 threadsPerBlock(N, N); dim3 blocksPerGrid(1, 1); if (N*N > 512){ threadsPerBlock.x = 512; threadsPerBlock.y = 512; blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y)); } matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N); }
4a9cb6787c0fb77f81d571e171f53056a3798ee9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel_cudaPrepareProjectionIndexes(char *d_v_is_projection, int *d_nearest_neighbour_indexes, int number_of_points) { int ind=blockIdx.x*blockDim.x+threadIdx.x; if(ind<number_of_points) { if(d_v_is_projection[ind] == 0) { d_nearest_neighbour_indexes[ind] = -1; }else { d_nearest_neighbour_indexes[ind] = ind; } } }
4a9cb6787c0fb77f81d571e171f53056a3798ee9.cu
#include "includes.h" __global__ void kernel_cudaPrepareProjectionIndexes(char *d_v_is_projection, int *d_nearest_neighbour_indexes, int number_of_points) { int ind=blockIdx.x*blockDim.x+threadIdx.x; if(ind<number_of_points) { if(d_v_is_projection[ind] == 0) { d_nearest_neighbour_indexes[ind] = -1; }else { d_nearest_neighbour_indexes[ind] = ind; } } }
3f1bce3e4d52c126524971385096916a0449ab55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> typedef unsigned long long ul; typedef unsigned int uint; int banyakdata = 1024; int dimensigrid = 8; int dimensiblok = 128; typedef struct { char size; uint* value; }big; typedef struct { short size; char* value; }stringnumber; __host__ __device__ short ukuranbit(big *a); __host__ __device__ char getbit(big* a, short count); __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser); __host__ __device__ void kali(big *a, big *b, big* res); __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff); __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff); __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __global__ void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff); hipError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res); hipError_t CUDAdek(big *c, big *e, big* p, big *res); void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y); void maindekripsi(big* c,big* x,big* p,big* res2); void tambah(big* a, char b, big* res); void kurang(big* a, big *b, big* res); void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff); void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff); void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2); void copybig(big* a, big* res); void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten); void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff); void printsn(stringnumber* sn); void teskonversi(); __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff) { // BLok 1 Cipher modexp(g,k,p,res,minbuff->value,mulbuff); // Blok 2 Cipher modexp(y, k, p, res + 1,minbuff->value,mulbuff); kali(res + 1, m, mulbuff); modulo(mulbuff, p, res+1, minbuff->value); } __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff) { modexp(c1,e,p,res,minbuff->value,mulbuff); kali(res, c2, mulbuff); modulo(mulbuff, p, res, minbuff->value); } __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff){ int i = threadIdx.x + blockIdx.x * blockDim.x; enkripsi(m + i, k + i, g, p, y, res + 2 * i, minbuff+i, mulbuff+i); } __global__ void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff){ int i = threadIdx.x + blockIdx.x * blockDim.x; dekripsi(c + 2*i, c + 2*i+1, e, p, res+i, minbuff+i, mulbuff+i); } hipError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res) { hipError_t cudaStatus; hipSetDevice(0); //=====================BAGIAN G, P, DAN Y ====================================// big *devg, *devp, *devy; hipMalloc((void**)&devg, sizeof(big)); hipMalloc((void**)&devp, sizeof(big)); hipMalloc((void**)&devy, sizeof(big)); uint *darrg, *darrp, *darry; hipMalloc((void**)&darrg, g->size * sizeof(uint)); hipMalloc((void**)&darrp, p->size * sizeof(uint)); hipMalloc((void**)&darry, y->size * sizeof(uint)); big tempg; hipMemcpy(darrg, g->value, (sizeof(uint) * g->size), hipMemcpyHostToDevice); tempg.size = g->size; tempg.value = darrg; hipMemcpy((devg), &tempg, (sizeof(big)), hipMemcpyHostToDevice); big tempp; hipMemcpy(darrp, p->value, (sizeof(uint) * p->size), hipMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; hipMemcpy((devp), &tempp, (sizeof(big)), hipMemcpyHostToDevice); big tempy; hipMemcpy(darry, y->value, (sizeof(uint) * y->size), hipMemcpyHostToDevice); tempy.size = y->size; tempy.value = darry; hipMemcpy((devy), &tempy, (sizeof(big)), hipMemcpyHostToDevice); //=====================BAGIAN M[] DAN K[] ====================================// big *devm, *devk, *devres, *minbuff, *mulbuff; hipMalloc((void**)&devm, banyakdata * sizeof(big)); hipMalloc((void**)&devk, banyakdata * sizeof(big)); hipMalloc((void**)&devres, banyakdata * 2 *sizeof(big)); hipMalloc((void**)&minbuff, banyakdata * sizeof(big)); hipMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3a = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3b = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue5 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp; hipMalloc((void**)&tempvalue[i], (sizeof(uint) * m[0].size)); hipMemcpy(tempvalue[i], m[0].value, (sizeof(uint) * m[0].size), hipMemcpyHostToDevice); temp.size = m[0].size; temp.value = tempvalue[i]; hipMemcpy((devm + i), &temp, (sizeof(big)), hipMemcpyHostToDevice); big temp2; hipMalloc((void**)&tempvalue2[i], (sizeof(uint) * k[0].size)); hipMemcpy(tempvalue2[i], k[0].value, (sizeof(uint) * k[0].size), hipMemcpyHostToDevice); temp2.size = k[0].size; temp2.value = tempvalue2[i]; hipMemcpy((devk + i), &temp2, (sizeof(big)), hipMemcpyHostToDevice); big temp3a; hipMalloc((void**)&tempvalue3a[i], (sizeof(uint) * p->size * 2)); temp3a.value = tempvalue3a[i]; hipMemcpy((devres + 2 * i), &temp3a, (sizeof(big)), hipMemcpyHostToDevice); big temp3b; hipMalloc((void**)&tempvalue3b[i], (sizeof(uint) * p->size * 2)); temp3b.value = tempvalue3b[i]; hipMemcpy((devres + 2 * i + 1), &temp3b, (sizeof(big)), hipMemcpyHostToDevice); big temp4; hipMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; hipMemcpy((minbuff + i), &temp4, (sizeof(big)), hipMemcpyHostToDevice); big temp5; hipMalloc((void**)&tempvalue5[i], (sizeof(uint) * p->size * 2)); temp5.value = tempvalue5[i]; hipMemcpy((mulbuff + i), &temp5, (sizeof(big)), hipMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // hipMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); kernelenk << <dimensigrid, dimensiblok >> >(devm, devk, devg, devp, devy, devres, minbuff, mulbuff); cudaStatus = hipGetLastError(); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != hipSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } else { // printf("Success\n"); } hipDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * 2 * sizeof(big)); for (int i = 0; i < banyakdata*2; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } hipMemcpy(tempres, devres, (sizeof(big) * 2 * banyakdata), hipMemcpyDeviceToHost); for (int i = 0; i < banyakdata*2; i++){ res[i].size = tempres[i].size; hipMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, hipMemcpyDeviceToHost); } hipFree(darrg); hipFree(darrp); hipFree(darry); hipFree(devg); hipFree(devp); hipFree(devy); for (int i = 0; i < banyakdata; i++) { hipFree(tempvalue[i]); hipFree(tempvalue2[i]); hipFree(tempvalue3a[i]); hipFree(tempvalue3b[i]); hipFree(tempvalue4[i]); hipFree(tempvalue5[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3a); free(tempvalue3b); free(tempvalue4); free(tempvalue5); hipFree(devm); hipFree(devk); hipFree(devres); hipFree(minbuff); hipFree(mulbuff); free(tempres); //hipProfilerStop(); //free(med); return cudaStatus; } hipError_t CUDAdek(big *c, big *e, big* p, big *res) { hipError_t cudaStatus; hipSetDevice(0); //=====================BAGIAN p dan e ( eksponen) ====================================// big *devp, *deve; hipMalloc((void**)&devp, sizeof(big)); hipMalloc((void**)&deve, sizeof(big)); uint *darrp, *darre; hipMalloc((void**)&darrp, p->size * sizeof(uint)); hipMalloc((void**)&darre, e->size * sizeof(uint)); big tempp; hipMemcpy(darrp, p->value, (sizeof(uint) * p->size), hipMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; hipMemcpy((devp), &tempp, (sizeof(big)), hipMemcpyHostToDevice); big tempe; hipMemcpy(darre, e->value, (sizeof(uint) * e->size), hipMemcpyHostToDevice); tempe.size = e->size; tempe.value = darre; hipMemcpy((deve), &tempe, (sizeof(big)), hipMemcpyHostToDevice); //======================================BAGIAN C[] ====================================// big *devc, *devres, *minbuff, *mulbuff; hipMalloc((void**)&devc, banyakdata * 2 * sizeof(big)); hipMalloc((void**)&devres, banyakdata * sizeof(big)); hipMalloc((void**)&minbuff, banyakdata * sizeof(big)); hipMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata*2); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp11; hipMalloc((void**)&tempvalue[2*i], (sizeof(uint) * c[0].size)); hipMemcpy(tempvalue[2*i], c[0].value, (sizeof(uint) * c[0].size), hipMemcpyHostToDevice); temp11.size = c[0].size; temp11.value = tempvalue[2*i]; hipMemcpy((devc + 2*i), &temp11, (sizeof(big)), hipMemcpyHostToDevice); big temp12; hipMalloc((void**)&tempvalue[2*i+1], (sizeof(uint) * c[1].size)); hipMemcpy(tempvalue[2*i+1], c[1].value, (sizeof(uint) * c[1].size), hipMemcpyHostToDevice); temp12.size = c[1].size; temp12.value = tempvalue[2*i+1]; hipMemcpy((devc + 2*i+1), &temp12, (sizeof(big)), hipMemcpyHostToDevice); big temp2; hipMalloc((void**)&tempvalue2[i], (sizeof(uint) * p->size * 2)); temp2.value = tempvalue2[i]; hipMemcpy((devres + i), &temp2, (sizeof(big)), hipMemcpyHostToDevice); big temp3; hipMalloc((void**)&tempvalue3[i], (sizeof(uint) * p->size * 2)); temp3.value = tempvalue3[i]; hipMemcpy((minbuff + i), &temp3, (sizeof(big)), hipMemcpyHostToDevice); big temp4; hipMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; hipMemcpy((mulbuff + i), &temp4, (sizeof(big)), hipMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // hipMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); kerneldek << <dimensigrid, dimensiblok >> >(devc, deve, devp, devres, minbuff, mulbuff); cudaStatus = hipGetLastError(); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != hipSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } else { // printf("Success\n"); } hipDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } hipMemcpy(tempres, devres, (sizeof(big) * banyakdata), hipMemcpyDeviceToHost); for (int i = 0; i < banyakdata; i++){ res[i].size = tempres[i].size; hipMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, hipMemcpyDeviceToHost); } hipFree(darrp); hipFree(darre); hipFree(devp); hipFree(deve); for (int i = 0; i < 2 * banyakdata; i++) { hipFree(tempvalue[i]); } for (int i = 0; i < banyakdata; i++) { hipFree(tempvalue2[i]); hipFree(tempvalue3[i]); hipFree(tempvalue4[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3); free(tempvalue4); hipFree(devc); hipFree(devres); hipFree(minbuff); hipFree(mulbuff); free(tempres); return cudaStatus; } void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y){ printf("Encrypting...\n"); //========================================================// hipError_t cudaStatus = CUDAenk(m, k, g, p, y, res); if (cudaStatus != hipSuccess) { fprintf(stderr, "\nenkripsiCUDA failed!"); } cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); } // for (int i = 0; i < 5; i++) // { // printf("Cipher %d size %d : %u\n",i, res[i].size, res[i].value[0]); // } // printf("Cipher ... : ...\n"); // printf("Cipher %d size %d : %u\n",banyakdata*2-2, res[banyakdata*2-2].size, res[banyakdata*2-2].value[0]); // printf("Cipher %d size %d : %u\n",banyakdata*2-1, res[banyakdata*2-2].size, res[banyakdata*2-1].value[0]); } void maindekripsi(big* c,big* e,big* p,big* res2){ printf("Decrypting...\n"); //========================================================// hipError_t cudaStatus = CUDAdek(c, e, p, res2); if (cudaStatus != hipSuccess) { fprintf(stderr, "\ndekripsiCUDA failed!"); } cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); } // for (int i = 0; i < 5; i++) // { // printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[0]); // printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[1]); // } // printf("Plain ... : ...\n"); // printf("Plain %d size %d : %u\n",banyakdata-1, res2[banyakdata-1].size, res2[banyakdata-1].value[0]); } void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){ modexp(g,x,p,y,minbuff,mulbuff); } void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2){ // Kunci publik p p->size = 8; p->value = (uint*) malloc(p->size * sizeof(uint)); for (int i = 0; i < p->size; i++) { // p->value[i] = 2357; p->value[i] = UINT_MAX-82; } // p->value[0] = UINT_MAX-4; // p->value[0] = 2387; // p->value[1] = 2357; // Kunci publik g g->size = 8; g->value = (uint*) malloc(g->size * sizeof(uint)); for (int i = 0; i < g->size; i++) { // g->value[i] = 2; g->value[i] = UINT_MAX-902; } // Kunci privat x x->size = 8; x->value = (uint*) malloc(x->size * sizeof(uint)); for (int i = 0; i < x->size; i++) { // x->value[i] = 1751; x->value[i] = UINT_MAX-86262; } // Cari nilai eksponen e = (p-x-1) untuk dekripsi big *xplus1 = (big*) malloc(sizeof(big)); xplus1->value = (uint*) malloc(p->size * sizeof(uint)); e->value = (uint*) malloc(p->size * sizeof(uint)); tambah(x, 1, xplus1); kurang(p,xplus1,e); free(xplus1->value); free(xplus1); // Cari nilai kunci publik y = (g^x) mod p big* mulbuff = (big*) malloc(sizeof(big)); mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2); uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2); y->value = (uint*) malloc(p->size * 2 * sizeof(uint)); carikunciy(g,x,p,y,minbuff,mulbuff); // printf("y 0 : %u\n", y->value[0]); // printf("y 0 : %u\n", y->value[1]); //========================================================// // Blok plainteks m->size = 8; m->value = (uint*) malloc(m->size * sizeof(uint)); for (int i = 0; i < m->size; i++) { // m->value[i] = 1001; m->value[i] = UINT_MAX-5522; } // Nilai k masing-masing blok k->size = 8; k->value = (uint*) malloc(k->size * sizeof(uint)); for (int i = 0; i < k->size; i++) { // k->value[i] = 77; k->value[i] = UINT_MAX-38227; } // Alokasi memori untuk result for (int i = 0; i < banyakdata*2; i++) { res[i].value = (uint*) malloc(sizeof(uint) * p->size); } // Alokasi memori untuk result 2 for (int i = 0; i < banyakdata; i++) { res2[i].value = (uint*) malloc(sizeof(uint) * p->size); } } int main(){ big *p, *g, *x, *e, *y, *m, *k, *res, *res2; p = (big*)malloc(sizeof(big)); g = (big*)malloc(sizeof(big)); x = (big*)malloc(sizeof(big)); e = (big*)malloc(sizeof(big)); y = (big*)malloc(sizeof(big)); m = (big*)malloc(banyakdata * sizeof(big)); k = (big*)malloc(banyakdata * sizeof(big)); res = (big*)malloc(banyakdata * 2 * sizeof(big)); res2 = (big*)malloc(banyakdata * sizeof(big)); init(p,g,x,e,y,m,k,res,res2); mainenkripsi(m,k,res,g,p,y); printf(" ========================= \n"); maindekripsi(res,e,p,res2); free(p->value); free(p); free(g->value); free(g); free(x->value); free(x); free(e->value); free(e); free(y->value); free(y); free(m->value); free(m); free(k->value); free(k); free(res->value); free(res); free(res2->value); free(res2); //teskonversi(); return 0; } __host__ __device__ short ukuranbit(big *a) { uint lastval = a->value[a->size-1]; short res = 0; while (lastval != 0) { lastval >>= 1; res++; } return res + (a->size - 1) * 32; } __host__ __device__ char getbit(big* a, short count) { return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0; } __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) { uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser)); uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser); return part1 | part2; } __host__ __device__ void kali(big *a, big *b, big* res) { if (a->size == 0 || b->size == 0) { res->size = 0; return ; } char ukurana = a->size; char ukuranb = b->size; char ukuranres = ukurana + ukuranb; res->size = ukuranres; for (char i = 0; i < ukuranres; i++) { res->value[i] = 0; } for (char i = 0; i < ukurana; i++) { uint aval = a->value[i]; if (aval==0){ continue; } uint lebih = 0; for (char j = 0, lebih = 0; j < ukuranb; j++) { uint bval = b->value[j]; ul temp = res->value[i+j] + aval * bval + lebih; res->value[i+j] = temp % UINT_MAX; lebih = temp / UINT_MAX; } res->value[i+ukuranb] = lebih; } if (res->value[res->size - 1] == 0){ res->size--; } } __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){ res->size = 1; res->value[0] = 1; short i = ukuranbit(b); while (i > 0) { i--; kali(res,res,mulbuff); modulo(mulbuff,c,res,minbuff); if (getbit(b,i)) { kali(res, a, mulbuff); modulo(mulbuff, c, res, minbuff); } } } __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) { res->size = a->size; for(char i = 0 ; i < res->size ;i++){ res->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; res->value[res->size] = 0; res->size++; i = ukurana - ukuranb + 1; while (i > 0) { i--; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = res->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > res->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (res->value[k] == 0); minbuff[k] = res->value[k] - 1; } if (!borrowIn) { while (k > i) { k--; res->value[k] = minbuff[k]; } } } } while (res->size > 0 && res->value[res->size - 1] == 0) res->size--; } void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff) { modres->size = a->size; for(char i = 0 ; i < modres->size ;i++){ modres->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; modres->value[modres->size] = 0; modres->size++; divres->size = ukurana - ukuranb + 1; for (i = 0; i < divres->size; i++) divres->value[i] = 0; i = ukurana - ukuranb + 1; while (i > 0) { i--; divres->value[i] = 0; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = modres->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > modres->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (modres->value[k] == 0); minbuff[k] = modres->value[k] - 1; } if (!borrowIn) { divres->value[i] |= ((uint) 1 << i2); while (k > i) { k--; modres->value[k] = minbuff[k]; } } } } if (divres->value[divres->size - 1] == 0) divres->size--; while (modres->size > 0 && modres->value[modres->size - 1] == 0) modres->size--; } void tambah(big* a, char b, big* res) { if (a->size == 0) { res->size = 1; res->value[0] = uint(b); return; } char carryIn = 0; uint temp; res->size = a->size + 1; res->value[0] = a->value[0] + (uint)b; carryIn = (res->value[0] < a->value[0]); char i = 1; for (; i < a->size && carryIn; i++) { temp = a->value[i] + (uint)1; carryIn = (temp == 0); res->value[i] = temp; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (carryIn) res->value[i] = 1; else res->size--; } void kurang(big* a, big *b, big* res) { res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = 0; } if (b->size == 0) { return; } char borrowIn, borrowOut; uint temp; char i; for (i = 0, borrowIn = 0; i < b->size; i++) { temp = a->value[i] - b->value[i]; borrowOut = (temp > a->value[i]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } res->value[i] = temp; borrowIn = borrowOut; } for (; i < a->size && borrowIn; i++) { borrowIn = (a->value[i] == 0); res->value[i] = a->value[i] - 1; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (res->value[res->size - 1] == 0){ res->size--; } } void copybig(big* a, big* res){ res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = a->value[i]; } } void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten){ res->size = 0; for (int i = sn->size-1; i >= 0; i--){ kali(res, ten, mulbuff); tambah(mulbuff, sn->value[i], res); } } void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff) { copybig(x,xbuff); short snlength = 0; while (xbuff->size != 0 ) { divandmod(xbuff,ten,divbuff,modbuff,minbuff); sn->value[snlength] = (char) modbuff->value[0]; snlength++; copybig(divbuff,xbuff); } sn->size = snlength; } void printsn(stringnumber* sn){ for (int i = 0; i < sn->size; ++i){ printf("%d", sn->value[sn->size-i-1]); } printf("\n"); } void teskonversi(){ int seed = time(NULL); srand(seed); stringnumber *sn = (stringnumber*) malloc(sizeof(stringnumber)); sn->size = 25; sn->value = (char *) malloc(sn->size); for (int i = 0; i < sn->size; i++) { sn->value[i] = rand() % 10; } big* konversi = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); big* ten = (big*) malloc(sizeof(big)); konversi->value = (uint*) malloc(sizeof(10)); mulbuff->value = (uint*) malloc(sizeof(10)); ten->value = (uint*) malloc(sizeof(1)); ten->size = 1; ten->value[0] = 10; printf("Stringnumber awal : "); printsn(sn); stringtobig(sn, konversi, mulbuff, ten); printf("konversi size %d\n", konversi->size); printf("konversi value 0 %u\n", konversi->value[0]); printf("konversi value 0 %u\n", konversi->value[1]); stringnumber *sn2 = (stringnumber*) malloc(sizeof(stringnumber)); big* xbuff = (big*) malloc(sizeof(big)); big* divbuff = (big*) malloc(sizeof(big)); big* modbuff = (big*) malloc(sizeof(big)); sn2->value = (char *) malloc(100); xbuff->value = (uint *) malloc(sizeof(uint) * 10); divbuff->value = (uint *) malloc(sizeof(uint) * 10); modbuff->value = (uint *) malloc(sizeof(uint) * 10); uint* minbuff = (uint*) malloc(sizeof(uint) * 10); bigtostring(konversi,sn2,ten,xbuff,divbuff,modbuff,minbuff); printf("Stringnumber akhir : "); printsn(sn2); }
3f1bce3e4d52c126524971385096916a0449ab55.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> typedef unsigned long long ul; typedef unsigned int uint; int banyakdata = 1024; int dimensigrid = 8; int dimensiblok = 128; typedef struct { char size; uint* value; }big; typedef struct { short size; char* value; }stringnumber; __host__ __device__ short ukuranbit(big *a); __host__ __device__ char getbit(big* a, short count); __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser); __host__ __device__ void kali(big *a, big *b, big* res); __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff); __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff); __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); __global__ void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff); cudaError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res); cudaError_t CUDAdek(big *c, big *e, big* p, big *res); void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y); void maindekripsi(big* c,big* x,big* p,big* res2); void tambah(big* a, char b, big* res); void kurang(big* a, big *b, big* res); void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff); void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff); void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2); void copybig(big* a, big* res); void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten); void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff); void printsn(stringnumber* sn); void teskonversi(); __device__ void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff) { // BLok 1 Cipher modexp(g,k,p,res,minbuff->value,mulbuff); // Blok 2 Cipher modexp(y, k, p, res + 1,minbuff->value,mulbuff); kali(res + 1, m, mulbuff); modulo(mulbuff, p, res+1, minbuff->value); } __device__ void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff) { modexp(c1,e,p,res,minbuff->value,mulbuff); kali(res, c2, mulbuff); modulo(mulbuff, p, res, minbuff->value); } __global__ void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff){ int i = threadIdx.x + blockIdx.x * blockDim.x; enkripsi(m + i, k + i, g, p, y, res + 2 * i, minbuff+i, mulbuff+i); } __global__ void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff){ int i = threadIdx.x + blockIdx.x * blockDim.x; dekripsi(c + 2*i, c + 2*i+1, e, p, res+i, minbuff+i, mulbuff+i); } cudaError_t CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res) { cudaError_t cudaStatus; cudaSetDevice(0); //=====================BAGIAN G, P, DAN Y ====================================// big *devg, *devp, *devy; cudaMalloc((void**)&devg, sizeof(big)); cudaMalloc((void**)&devp, sizeof(big)); cudaMalloc((void**)&devy, sizeof(big)); uint *darrg, *darrp, *darry; cudaMalloc((void**)&darrg, g->size * sizeof(uint)); cudaMalloc((void**)&darrp, p->size * sizeof(uint)); cudaMalloc((void**)&darry, y->size * sizeof(uint)); big tempg; cudaMemcpy(darrg, g->value, (sizeof(uint) * g->size), cudaMemcpyHostToDevice); tempg.size = g->size; tempg.value = darrg; cudaMemcpy((devg), &tempg, (sizeof(big)), cudaMemcpyHostToDevice); big tempp; cudaMemcpy(darrp, p->value, (sizeof(uint) * p->size), cudaMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; cudaMemcpy((devp), &tempp, (sizeof(big)), cudaMemcpyHostToDevice); big tempy; cudaMemcpy(darry, y->value, (sizeof(uint) * y->size), cudaMemcpyHostToDevice); tempy.size = y->size; tempy.value = darry; cudaMemcpy((devy), &tempy, (sizeof(big)), cudaMemcpyHostToDevice); //=====================BAGIAN M[] DAN K[] ====================================// big *devm, *devk, *devres, *minbuff, *mulbuff; cudaMalloc((void**)&devm, banyakdata * sizeof(big)); cudaMalloc((void**)&devk, banyakdata * sizeof(big)); cudaMalloc((void**)&devres, banyakdata * 2 *sizeof(big)); cudaMalloc((void**)&minbuff, banyakdata * sizeof(big)); cudaMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3a = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3b = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue5 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp; cudaMalloc((void**)&tempvalue[i], (sizeof(uint) * m[0].size)); cudaMemcpy(tempvalue[i], m[0].value, (sizeof(uint) * m[0].size), cudaMemcpyHostToDevice); temp.size = m[0].size; temp.value = tempvalue[i]; cudaMemcpy((devm + i), &temp, (sizeof(big)), cudaMemcpyHostToDevice); big temp2; cudaMalloc((void**)&tempvalue2[i], (sizeof(uint) * k[0].size)); cudaMemcpy(tempvalue2[i], k[0].value, (sizeof(uint) * k[0].size), cudaMemcpyHostToDevice); temp2.size = k[0].size; temp2.value = tempvalue2[i]; cudaMemcpy((devk + i), &temp2, (sizeof(big)), cudaMemcpyHostToDevice); big temp3a; cudaMalloc((void**)&tempvalue3a[i], (sizeof(uint) * p->size * 2)); temp3a.value = tempvalue3a[i]; cudaMemcpy((devres + 2 * i), &temp3a, (sizeof(big)), cudaMemcpyHostToDevice); big temp3b; cudaMalloc((void**)&tempvalue3b[i], (sizeof(uint) * p->size * 2)); temp3b.value = tempvalue3b[i]; cudaMemcpy((devres + 2 * i + 1), &temp3b, (sizeof(big)), cudaMemcpyHostToDevice); big temp4; cudaMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; cudaMemcpy((minbuff + i), &temp4, (sizeof(big)), cudaMemcpyHostToDevice); big temp5; cudaMalloc((void**)&tempvalue5[i], (sizeof(uint) * p->size * 2)); temp5.value = tempvalue5[i]; cudaMemcpy((mulbuff + i), &temp5, (sizeof(big)), cudaMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // cudaMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); kernelenk << <dimensigrid, dimensiblok >> >(devm, devk, devg, devp, devy, devres, minbuff, mulbuff); cudaStatus = cudaGetLastError(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } else { // printf("Success\n"); } cudaDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * 2 * sizeof(big)); for (int i = 0; i < banyakdata*2; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } cudaMemcpy(tempres, devres, (sizeof(big) * 2 * banyakdata), cudaMemcpyDeviceToHost); for (int i = 0; i < banyakdata*2; i++){ res[i].size = tempres[i].size; cudaMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, cudaMemcpyDeviceToHost); } cudaFree(darrg); cudaFree(darrp); cudaFree(darry); cudaFree(devg); cudaFree(devp); cudaFree(devy); for (int i = 0; i < banyakdata; i++) { cudaFree(tempvalue[i]); cudaFree(tempvalue2[i]); cudaFree(tempvalue3a[i]); cudaFree(tempvalue3b[i]); cudaFree(tempvalue4[i]); cudaFree(tempvalue5[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3a); free(tempvalue3b); free(tempvalue4); free(tempvalue5); cudaFree(devm); cudaFree(devk); cudaFree(devres); cudaFree(minbuff); cudaFree(mulbuff); free(tempres); //cudaProfilerStop(); //free(med); return cudaStatus; } cudaError_t CUDAdek(big *c, big *e, big* p, big *res) { cudaError_t cudaStatus; cudaSetDevice(0); //=====================BAGIAN p dan e ( eksponen) ====================================// big *devp, *deve; cudaMalloc((void**)&devp, sizeof(big)); cudaMalloc((void**)&deve, sizeof(big)); uint *darrp, *darre; cudaMalloc((void**)&darrp, p->size * sizeof(uint)); cudaMalloc((void**)&darre, e->size * sizeof(uint)); big tempp; cudaMemcpy(darrp, p->value, (sizeof(uint) * p->size), cudaMemcpyHostToDevice); tempp.size = p->size; tempp.value = darrp; cudaMemcpy((devp), &tempp, (sizeof(big)), cudaMemcpyHostToDevice); big tempe; cudaMemcpy(darre, e->value, (sizeof(uint) * e->size), cudaMemcpyHostToDevice); tempe.size = e->size; tempe.value = darre; cudaMemcpy((deve), &tempe, (sizeof(big)), cudaMemcpyHostToDevice); //======================================BAGIAN C[] ====================================// big *devc, *devres, *minbuff, *mulbuff; cudaMalloc((void**)&devc, banyakdata * 2 * sizeof(big)); cudaMalloc((void**)&devres, banyakdata * sizeof(big)); cudaMalloc((void**)&minbuff, banyakdata * sizeof(big)); cudaMalloc((void**)&mulbuff, banyakdata * sizeof(big)); uint **tempvalue = (uint**)malloc(sizeof(uint*)*banyakdata*2); uint **tempvalue2 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue3 = (uint**)malloc(sizeof(uint*)*banyakdata); uint **tempvalue4 = (uint**)malloc(sizeof(uint*)*banyakdata); // Alokasi Memori untuk blok m dan k for (int i = 0; i < banyakdata; i++) { big temp11; cudaMalloc((void**)&tempvalue[2*i], (sizeof(uint) * c[0].size)); cudaMemcpy(tempvalue[2*i], c[0].value, (sizeof(uint) * c[0].size), cudaMemcpyHostToDevice); temp11.size = c[0].size; temp11.value = tempvalue[2*i]; cudaMemcpy((devc + 2*i), &temp11, (sizeof(big)), cudaMemcpyHostToDevice); big temp12; cudaMalloc((void**)&tempvalue[2*i+1], (sizeof(uint) * c[1].size)); cudaMemcpy(tempvalue[2*i+1], c[1].value, (sizeof(uint) * c[1].size), cudaMemcpyHostToDevice); temp12.size = c[1].size; temp12.value = tempvalue[2*i+1]; cudaMemcpy((devc + 2*i+1), &temp12, (sizeof(big)), cudaMemcpyHostToDevice); big temp2; cudaMalloc((void**)&tempvalue2[i], (sizeof(uint) * p->size * 2)); temp2.value = tempvalue2[i]; cudaMemcpy((devres + i), &temp2, (sizeof(big)), cudaMemcpyHostToDevice); big temp3; cudaMalloc((void**)&tempvalue3[i], (sizeof(uint) * p->size * 2)); temp3.value = tempvalue3[i]; cudaMemcpy((minbuff + i), &temp3, (sizeof(big)), cudaMemcpyHostToDevice); big temp4; cudaMalloc((void**)&tempvalue4[i], (sizeof(uint) * p->size * 2)); temp4.value = tempvalue4[i]; cudaMemcpy((mulbuff + i), &temp4, (sizeof(big)), cudaMemcpyHostToDevice); } // size_t free_byte ; // size_t total_byte ; // cudaMemGetInfo( &free_byte, &total_byte ) ; // double free_db = (double)free_byte ; // double total_db = (double)total_byte ; // double used_db = total_db - free_db ; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); kerneldek << <dimensigrid, dimensiblok >> >(devc, deve, devp, devres, minbuff, mulbuff); cudaStatus = cudaGetLastError(); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Durasi = %f milidetik\n", milliseconds); // printf("GPU Memory: used = %f, free = %f MB, total = %f MB\n",used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } else { // printf("Success\n"); } cudaDeviceSynchronize(); // COPY FROM DEVICE TO HOST HERE big* tempres = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++){ tempres[i].value = (uint*) malloc(sizeof(uint) * p->size); } cudaMemcpy(tempres, devres, (sizeof(big) * banyakdata), cudaMemcpyDeviceToHost); for (int i = 0; i < banyakdata; i++){ res[i].size = tempres[i].size; cudaMemcpy(res[i].value, tempres[i].value, sizeof(uint) * p->size, cudaMemcpyDeviceToHost); } cudaFree(darrp); cudaFree(darre); cudaFree(devp); cudaFree(deve); for (int i = 0; i < 2 * banyakdata; i++) { cudaFree(tempvalue[i]); } for (int i = 0; i < banyakdata; i++) { cudaFree(tempvalue2[i]); cudaFree(tempvalue3[i]); cudaFree(tempvalue4[i]); } free(tempvalue); free(tempvalue2); free(tempvalue3); free(tempvalue4); cudaFree(devc); cudaFree(devres); cudaFree(minbuff); cudaFree(mulbuff); free(tempres); return cudaStatus; } void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y){ printf("Encrypting...\n"); //========================================================// cudaError_t cudaStatus = CUDAenk(m, k, g, p, y, res); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\nenkripsiCUDA failed!"); } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); } // for (int i = 0; i < 5; i++) // { // printf("Cipher %d size %d : %u\n",i, res[i].size, res[i].value[0]); // } // printf("Cipher ... : ...\n"); // printf("Cipher %d size %d : %u\n",banyakdata*2-2, res[banyakdata*2-2].size, res[banyakdata*2-2].value[0]); // printf("Cipher %d size %d : %u\n",banyakdata*2-1, res[banyakdata*2-2].size, res[banyakdata*2-1].value[0]); } void maindekripsi(big* c,big* e,big* p,big* res2){ printf("Decrypting...\n"); //========================================================// cudaError_t cudaStatus = CUDAdek(c, e, p, res2); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\ndekripsiCUDA failed!"); } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); } // for (int i = 0; i < 5; i++) // { // printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[0]); // printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[1]); // } // printf("Plain ... : ...\n"); // printf("Plain %d size %d : %u\n",banyakdata-1, res2[banyakdata-1].size, res2[banyakdata-1].value[0]); } void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){ modexp(g,x,p,y,minbuff,mulbuff); } void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2){ // Kunci publik p p->size = 8; p->value = (uint*) malloc(p->size * sizeof(uint)); for (int i = 0; i < p->size; i++) { // p->value[i] = 2357; p->value[i] = UINT_MAX-82; } // p->value[0] = UINT_MAX-4; // p->value[0] = 2387; // p->value[1] = 2357; // Kunci publik g g->size = 8; g->value = (uint*) malloc(g->size * sizeof(uint)); for (int i = 0; i < g->size; i++) { // g->value[i] = 2; g->value[i] = UINT_MAX-902; } // Kunci privat x x->size = 8; x->value = (uint*) malloc(x->size * sizeof(uint)); for (int i = 0; i < x->size; i++) { // x->value[i] = 1751; x->value[i] = UINT_MAX-86262; } // Cari nilai eksponen e = (p-x-1) untuk dekripsi big *xplus1 = (big*) malloc(sizeof(big)); xplus1->value = (uint*) malloc(p->size * sizeof(uint)); e->value = (uint*) malloc(p->size * sizeof(uint)); tambah(x, 1, xplus1); kurang(p,xplus1,e); free(xplus1->value); free(xplus1); // Cari nilai kunci publik y = (g^x) mod p big* mulbuff = (big*) malloc(sizeof(big)); mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2); uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2); y->value = (uint*) malloc(p->size * 2 * sizeof(uint)); carikunciy(g,x,p,y,minbuff,mulbuff); // printf("y 0 : %u\n", y->value[0]); // printf("y 0 : %u\n", y->value[1]); //========================================================// // Blok plainteks m->size = 8; m->value = (uint*) malloc(m->size * sizeof(uint)); for (int i = 0; i < m->size; i++) { // m->value[i] = 1001; m->value[i] = UINT_MAX-5522; } // Nilai k masing-masing blok k->size = 8; k->value = (uint*) malloc(k->size * sizeof(uint)); for (int i = 0; i < k->size; i++) { // k->value[i] = 77; k->value[i] = UINT_MAX-38227; } // Alokasi memori untuk result for (int i = 0; i < banyakdata*2; i++) { res[i].value = (uint*) malloc(sizeof(uint) * p->size); } // Alokasi memori untuk result 2 for (int i = 0; i < banyakdata; i++) { res2[i].value = (uint*) malloc(sizeof(uint) * p->size); } } int main(){ big *p, *g, *x, *e, *y, *m, *k, *res, *res2; p = (big*)malloc(sizeof(big)); g = (big*)malloc(sizeof(big)); x = (big*)malloc(sizeof(big)); e = (big*)malloc(sizeof(big)); y = (big*)malloc(sizeof(big)); m = (big*)malloc(banyakdata * sizeof(big)); k = (big*)malloc(banyakdata * sizeof(big)); res = (big*)malloc(banyakdata * 2 * sizeof(big)); res2 = (big*)malloc(banyakdata * sizeof(big)); init(p,g,x,e,y,m,k,res,res2); mainenkripsi(m,k,res,g,p,y); printf(" ========================= \n"); maindekripsi(res,e,p,res2); free(p->value); free(p); free(g->value); free(g); free(x->value); free(x); free(e->value); free(e); free(y->value); free(y); free(m->value); free(m); free(k->value); free(k); free(res->value); free(res); free(res2->value); free(res2); //teskonversi(); return 0; } __host__ __device__ short ukuranbit(big *a) { uint lastval = a->value[a->size-1]; short res = 0; while (lastval != 0) { lastval >>= 1; res++; } return res + (a->size - 1) * 32; } __host__ __device__ char getbit(big* a, short count) { return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0; } __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) { uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser)); uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser); return part1 | part2; } __host__ __device__ void kali(big *a, big *b, big* res) { if (a->size == 0 || b->size == 0) { res->size = 0; return ; } char ukurana = a->size; char ukuranb = b->size; char ukuranres = ukurana + ukuranb; res->size = ukuranres; for (char i = 0; i < ukuranres; i++) { res->value[i] = 0; } for (char i = 0; i < ukurana; i++) { uint aval = a->value[i]; if (aval==0){ continue; } uint lebih = 0; for (char j = 0, lebih = 0; j < ukuranb; j++) { uint bval = b->value[j]; ul temp = res->value[i+j] + aval * bval + lebih; res->value[i+j] = temp % UINT_MAX; lebih = temp / UINT_MAX; } res->value[i+ukuranb] = lebih; } if (res->value[res->size - 1] == 0){ res->size--; } } __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){ res->size = 1; res->value[0] = 1; short i = ukuranbit(b); while (i > 0) { i--; kali(res,res,mulbuff); modulo(mulbuff,c,res,minbuff); if (getbit(b,i)) { kali(res, a, mulbuff); modulo(mulbuff, c, res, minbuff); } } } __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) { res->size = a->size; for(char i = 0 ; i < res->size ;i++){ res->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; res->value[res->size] = 0; res->size++; i = ukurana - ukuranb + 1; while (i > 0) { i--; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = res->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > res->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (res->value[k] == 0); minbuff[k] = res->value[k] - 1; } if (!borrowIn) { while (k > i) { k--; res->value[k] = minbuff[k]; } } } } while (res->size > 0 && res->value[res->size - 1] == 0) res->size--; } void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff) { modres->size = a->size; for(char i = 0 ; i < modres->size ;i++){ modres->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; modres->value[modres->size] = 0; modres->size++; divres->size = ukurana - ukuranb + 1; for (i = 0; i < divres->size; i++) divres->value[i] = 0; i = ukurana - ukuranb + 1; while (i > 0) { i--; divres->value[i] = 0; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = modres->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > modres->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (modres->value[k] == 0); minbuff[k] = modres->value[k] - 1; } if (!borrowIn) { divres->value[i] |= ((uint) 1 << i2); while (k > i) { k--; modres->value[k] = minbuff[k]; } } } } if (divres->value[divres->size - 1] == 0) divres->size--; while (modres->size > 0 && modres->value[modres->size - 1] == 0) modres->size--; } void tambah(big* a, char b, big* res) { if (a->size == 0) { res->size = 1; res->value[0] = uint(b); return; } char carryIn = 0; uint temp; res->size = a->size + 1; res->value[0] = a->value[0] + (uint)b; carryIn = (res->value[0] < a->value[0]); char i = 1; for (; i < a->size && carryIn; i++) { temp = a->value[i] + (uint)1; carryIn = (temp == 0); res->value[i] = temp; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (carryIn) res->value[i] = 1; else res->size--; } void kurang(big* a, big *b, big* res) { res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = 0; } if (b->size == 0) { return; } char borrowIn, borrowOut; uint temp; char i; for (i = 0, borrowIn = 0; i < b->size; i++) { temp = a->value[i] - b->value[i]; borrowOut = (temp > a->value[i]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } res->value[i] = temp; borrowIn = borrowOut; } for (; i < a->size && borrowIn; i++) { borrowIn = (a->value[i] == 0); res->value[i] = a->value[i] - 1; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (res->value[res->size - 1] == 0){ res->size--; } } void copybig(big* a, big* res){ res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = a->value[i]; } } void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten){ res->size = 0; for (int i = sn->size-1; i >= 0; i--){ kali(res, ten, mulbuff); tambah(mulbuff, sn->value[i], res); } } void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff) { copybig(x,xbuff); short snlength = 0; while (xbuff->size != 0 ) { divandmod(xbuff,ten,divbuff,modbuff,minbuff); sn->value[snlength] = (char) modbuff->value[0]; snlength++; copybig(divbuff,xbuff); } sn->size = snlength; } void printsn(stringnumber* sn){ for (int i = 0; i < sn->size; ++i){ printf("%d", sn->value[sn->size-i-1]); } printf("\n"); } void teskonversi(){ int seed = time(NULL); srand(seed); stringnumber *sn = (stringnumber*) malloc(sizeof(stringnumber)); sn->size = 25; sn->value = (char *) malloc(sn->size); for (int i = 0; i < sn->size; i++) { sn->value[i] = rand() % 10; } big* konversi = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); big* ten = (big*) malloc(sizeof(big)); konversi->value = (uint*) malloc(sizeof(10)); mulbuff->value = (uint*) malloc(sizeof(10)); ten->value = (uint*) malloc(sizeof(1)); ten->size = 1; ten->value[0] = 10; printf("Stringnumber awal : "); printsn(sn); stringtobig(sn, konversi, mulbuff, ten); printf("konversi size %d\n", konversi->size); printf("konversi value 0 %u\n", konversi->value[0]); printf("konversi value 0 %u\n", konversi->value[1]); stringnumber *sn2 = (stringnumber*) malloc(sizeof(stringnumber)); big* xbuff = (big*) malloc(sizeof(big)); big* divbuff = (big*) malloc(sizeof(big)); big* modbuff = (big*) malloc(sizeof(big)); sn2->value = (char *) malloc(100); xbuff->value = (uint *) malloc(sizeof(uint) * 10); divbuff->value = (uint *) malloc(sizeof(uint) * 10); modbuff->value = (uint *) malloc(sizeof(uint) * 10); uint* minbuff = (uint*) malloc(sizeof(uint) * 10); bigtostring(konversi,sn2,ten,xbuff,divbuff,modbuff,minbuff); printf("Stringnumber akhir : "); printsn(sn2); }
e215e7366177cbb11b242323eafeebc5e546ee18.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include <device_launch_parameters.h> #include<time.h> #include<stdlib.h> #define N 128 #define BLOCK_SIZE 8 typedef struct { int width; int height; float *elements; } Matrix; clock_t start_cpu, stop_cpu, start_gpu, stop_gpu; double czas_cpu, czas_gpu; __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // kady wtek oblicza jeden element macierzy C // akumulujc wynik w zmiennej Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } void MatMul(const Matrix A, const Matrix B, Matrix C) { // kopiujemy macierze A i B to globalnej pamici urzdzenia // najpierw A Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc((void **)&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); // potem B Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc((void **)&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // przydzielamy macierz C w globalnej pamici urzdzenia Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc((void**)&d_C.elements, size); // preparujemy rodowisko i wywoujemy kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); start_gpu = clock(); MatMulKernel << <dimGrid, dimBlock >> >(d_A, d_B, d_C); hipDeviceSynchronize(); stop_gpu = clock(); czas_gpu = (double)1000 * (stop_gpu - start_gpu) / CLOCKS_PER_SEC; // odbieramy obliczon macierz C z pamici globalnej urzdzenia hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); printf("CZAS GPU %f\n", czas_gpu); printf("Macierz GPU \n"); /* for (int i=0;i<C.height;i++){ for (int j=0;j<C.width;j++){ printf("%.2f ", C.elements[i*C.height+j]); } } //*/ printf("\n"); // zwalniamy pami hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } int main(int argc, char** argv) { int devCnt; Matrix A, B, C; hipGetDeviceCount(&devCnt); if (devCnt == 0) { perror("No CUDA devices available -- exiting."); return 1; } A.width = N; A.height = N; A.elements = (float*)malloc(A.width*A.height * sizeof(float)); for (int i = 0; i<A.width*A.height; i++) { A.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50); } B.width = N; B.height = N; B.elements = (float*)malloc(B.width*B.height * sizeof(float)); for (int i = 0; i<B.width*B.height; i++) { B.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50); } C.width = B.width; C.height = A.height; C.elements = (float*)malloc(C.width*C.height * sizeof(float)); float **A2D = new float*[A.width]; for (int i = 0; i < A.height; i++) A2D[i] = new float[A.width]; float **B2D = new float*[A.width]; for (int i = 0; i < A.height; i++) B2D[i] = new float[A.width]; float **C2D = new float*[A.width]; for (int i = 0; i < A.height; i++) C2D[i] = new float[A.width]; for (int i = 0; i<A.width; i++) { for (int j = 0; j<A.height; j++) { A2D[i][j] = A.elements[i*A.width + j]; } } for (int i = 0; i<B.width; i++) { for (int j = 0; j<B.height; j++) { B2D[i][j] = B.elements[i*B.width + j]; } } for (int i = 0; i<C.width; i++) { for (int j = 0; j<C.height; j++) { C2D[i][j] = 0; } } start_cpu = clock(); for (int i = 0; i<A.height; i++) { for (int j = 0; j<A.width; j++) { for (int k = 0; k<B.width; k++) { C2D[i][k] += A2D[i][j] * B2D[j][k]; } } } stop_cpu = clock(); czas_cpu = (double)1000 * (stop_cpu - start_cpu) / CLOCKS_PER_SEC; printf("Czas dla CPU %.2f \n", czas_cpu); printf("Macierz CPU \n"); /* for (int i=0;i<C.height;i++){ for (int j=0;j<C.width;j++){ printf("%.2f ", C2D[i][j]); } } //*/ printf("\n"); MatMul(A, B, C); //printf("N = 8\n"); //printf("Block SIZE = 4\n"); for (int i = 0; i < A.width; i++) { delete [] A2D[i]; delete [] B2D[i]; delete [] C2D[i]; } delete[] A2D; delete[] B2D; delete[] C2D; free(A.elements); free(B.elements); free(C.elements); }
e215e7366177cbb11b242323eafeebc5e546ee18.cu
#include <stdio.h> #include <cuda_runtime.h> #include <math.h> #include <device_launch_parameters.h> #include<time.h> #include<stdlib.h> #define N 128 #define BLOCK_SIZE 8 typedef struct { int width; int height; float *elements; } Matrix; clock_t start_cpu, stop_cpu, start_gpu, stop_gpu; double czas_cpu, czas_gpu; __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // każdy wątek oblicza jeden element macierzy C // akumulując wynik w zmiennej Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } void MatMul(const Matrix A, const Matrix B, Matrix C) { // kopiujemy macierze A i B to globalnej pamięci urządzenia // najpierw A Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc((void **)&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); // potem B Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc((void **)&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // przydzielamy macierz C w globalnej pamięci urządzenia Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc((void**)&d_C.elements, size); // preparujemy środowisko i wywołujemy kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); start_gpu = clock(); MatMulKernel << <dimGrid, dimBlock >> >(d_A, d_B, d_C); cudaThreadSynchronize(); stop_gpu = clock(); czas_gpu = (double)1000 * (stop_gpu - start_gpu) / CLOCKS_PER_SEC; // odbieramy obliczoną macierz C z pamięci globalnej urządzenia cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); printf("CZAS GPU %f\n", czas_gpu); printf("Macierz GPU \n"); /* for (int i=0;i<C.height;i++){ for (int j=0;j<C.width;j++){ printf("%.2f ", C.elements[i*C.height+j]); } } //*/ printf("\n"); // zwalniamy pamięć cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } int main(int argc, char** argv) { int devCnt; Matrix A, B, C; cudaGetDeviceCount(&devCnt); if (devCnt == 0) { perror("No CUDA devices available -- exiting."); return 1; } A.width = N; A.height = N; A.elements = (float*)malloc(A.width*A.height * sizeof(float)); for (int i = 0; i<A.width*A.height; i++) { A.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50); } B.width = N; B.height = N; B.elements = (float*)malloc(B.width*B.height * sizeof(float)); for (int i = 0; i<B.width*B.height; i++) { B.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50); } C.width = B.width; C.height = A.height; C.elements = (float*)malloc(C.width*C.height * sizeof(float)); float **A2D = new float*[A.width]; for (int i = 0; i < A.height; i++) A2D[i] = new float[A.width]; float **B2D = new float*[A.width]; for (int i = 0; i < A.height; i++) B2D[i] = new float[A.width]; float **C2D = new float*[A.width]; for (int i = 0; i < A.height; i++) C2D[i] = new float[A.width]; for (int i = 0; i<A.width; i++) { for (int j = 0; j<A.height; j++) { A2D[i][j] = A.elements[i*A.width + j]; } } for (int i = 0; i<B.width; i++) { for (int j = 0; j<B.height; j++) { B2D[i][j] = B.elements[i*B.width + j]; } } for (int i = 0; i<C.width; i++) { for (int j = 0; j<C.height; j++) { C2D[i][j] = 0; } } start_cpu = clock(); for (int i = 0; i<A.height; i++) { for (int j = 0; j<A.width; j++) { for (int k = 0; k<B.width; k++) { C2D[i][k] += A2D[i][j] * B2D[j][k]; } } } stop_cpu = clock(); czas_cpu = (double)1000 * (stop_cpu - start_cpu) / CLOCKS_PER_SEC; printf("Czas dla CPU %.2f \n", czas_cpu); printf("Macierz CPU \n"); /* for (int i=0;i<C.height;i++){ for (int j=0;j<C.width;j++){ printf("%.2f ", C2D[i][j]); } } //*/ printf("\n"); MatMul(A, B, C); //printf("N = 8\n"); //printf("Block SIZE = 4\n"); for (int i = 0; i < A.width; i++) { delete [] A2D[i]; delete [] B2D[i]; delete [] C2D[i]; } delete[] A2D; delete[] B2D; delete[] C2D; free(A.elements); free(B.elements); free(C.elements); }
ba3ce3c3a74d748667926022a4b57975cdbe80a1.hip
// !!! This is a file automatically generated by hipify!!! #include "gsl\gsl_cblas.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <chrono> #include <iostream> #include <hip/hip_runtime.h> #include <cusolverDn.h> using namespace std; using namespace chrono; double Cholesky(int N, bool debug) { steady_clock::time_point timeStart, timeEnd;// variables for timing double accum; // elapsed time variable double *A, *B, *B1; // declare arrays on the host // prepare memory on the host A = (double *)malloc(N*N * sizeof(double)); // NxN coeff . matrix B = (double *)malloc(N * sizeof(double)); // N- vector rhs B=A*B1 B1 = (double *)malloc(N * sizeof(double)); // auxiliary N- vect . for (int i = 0; i < N*N; i++) A[i] = rand() / (double)RAND_MAX; for (int i = 0; i < N; i++) B[i] = 0.0; for (int i = 0; i < N; i++) B1[i] = 1.0; // N- vector of ones for (int i = 0; i < N; i++) { A[i*N + i] = A[i*N + i] + (double)N; // make A positive definite for (int j = 0; j < i; j++) A[i*N + j] = A[j*N + i]; // and symmetric } double al = 1.0, bet = 0.0; // constants for dgemv int incx = 1, incy = 1; cblas_dgemv(CblasColMajor, CblasNoTrans, N, N, al, A, N, B1, incx, bet, B, incy); // B=A*B1 // GPU hipError_t cudaStatus; cusolverStatus_t cusolverStatus; hipsolverDnHandle_t handle; // device versions of double *d_A, *d_B, *Work; // matrix A, rhs B and worksp . int *d_info, Lwork; // device version of info , worksp . size int info_gpu = 0; // device info copied to host cudaStatus = hipGetDevice(0); cusolverStatus = hipsolverDnCreate(&handle); // create handle hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER; // DEVICE housekeeping timeStart = steady_clock::now(); cudaStatus = hipMalloc((void **)& d_A, N*N * sizeof(double)); cudaStatus = hipMalloc((void **)& d_B, N * sizeof(double)); cudaStatus = hipMalloc((void **)& d_info, sizeof(int)); cudaStatus = hipMemcpy(d_A, A, N*N * sizeof(double), hipMemcpyHostToDevice); // copy A->d_A cudaStatus = hipMemcpy(d_B, B, N * sizeof(double), hipMemcpyHostToDevice); // copy B->d_B // COMPUTE workspace size and prepare workspace cusolverStatus = hipsolverDnDpotrf_bufferSize(handle, uplo, N, d_A, N, &Lwork); cudaStatus = hipMalloc((void **)& Work, Lwork * sizeof(double)); cusolverStatus = hipsolverDnDpotrf(handle, uplo, N, d_A, N, Work, Lwork, d_info); // solve d_A *X=d_B , where d_A is factorized by potrf function // d_B is overwritten by the solution cusolverStatus = hipsolverDnDpotrs(handle, uplo, N, 1, d_A, N, d_B, N, d_info); cudaStatus = hipDeviceSynchronize(); auto CholeskyRUN = steady_clock::now() - timeStart; if (debug) { printf(" solution : "); printf(" Dpotrf + Dpotrs time : %lf sec .\n", CholeskyRUN); // pr.el. time cudaStatus = hipMemcpy(&info_gpu, d_info, sizeof(int), hipMemcpyDeviceToHost); // copy d_info -> info_gpu printf(" after Dpotrf + Dpotrs : info_gpu = %d\n", info_gpu); cudaStatus = hipMemcpy(B, d_B, N * sizeof(double), hipMemcpyDeviceToHost); // copy solution to host d_B ->B for (int i = 0; i < 5; i++) printf("%g, ", B[i]); // print printf(" ... "); // first components of the solution printf("\n"); } // Housekeeping cudaStatus = hipFree(d_A); cudaStatus = hipFree(d_B); cudaStatus = hipFree(d_info); cudaStatus = hipFree(Work); cusolverStatus = hipsolverDnDestroy(handle); cudaStatus = hipDeviceReset(); double returnVal = duration_cast<milliseconds>(CholeskyRUN).count(); return returnVal; }
ba3ce3c3a74d748667926022a4b57975cdbe80a1.cu
#include "gsl\gsl_cblas.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <chrono> #include <iostream> #include <cuda_runtime.h> #include <cusolverDn.h> using namespace std; using namespace chrono; double Cholesky(int N, bool debug) { steady_clock::time_point timeStart, timeEnd;// variables for timing double accum; // elapsed time variable double *A, *B, *B1; // declare arrays on the host // prepare memory on the host A = (double *)malloc(N*N * sizeof(double)); // NxN coeff . matrix B = (double *)malloc(N * sizeof(double)); // N- vector rhs B=A*B1 B1 = (double *)malloc(N * sizeof(double)); // auxiliary N- vect . for (int i = 0; i < N*N; i++) A[i] = rand() / (double)RAND_MAX; for (int i = 0; i < N; i++) B[i] = 0.0; for (int i = 0; i < N; i++) B1[i] = 1.0; // N- vector of ones for (int i = 0; i < N; i++) { A[i*N + i] = A[i*N + i] + (double)N; // make A positive definite for (int j = 0; j < i; j++) A[i*N + j] = A[j*N + i]; // and symmetric } double al = 1.0, bet = 0.0; // constants for dgemv int incx = 1, incy = 1; cblas_dgemv(CblasColMajor, CblasNoTrans, N, N, al, A, N, B1, incx, bet, B, incy); // B=A*B1 // GPU cudaError cudaStatus; cusolverStatus_t cusolverStatus; cusolverDnHandle_t handle; // device versions of double *d_A, *d_B, *Work; // matrix A, rhs B and worksp . int *d_info, Lwork; // device version of info , worksp . size int info_gpu = 0; // device info copied to host cudaStatus = cudaGetDevice(0); cusolverStatus = cusolverDnCreate(&handle); // create handle cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER; // DEVICE housekeeping timeStart = steady_clock::now(); cudaStatus = cudaMalloc((void **)& d_A, N*N * sizeof(double)); cudaStatus = cudaMalloc((void **)& d_B, N * sizeof(double)); cudaStatus = cudaMalloc((void **)& d_info, sizeof(int)); cudaStatus = cudaMemcpy(d_A, A, N*N * sizeof(double), cudaMemcpyHostToDevice); // copy A->d_A cudaStatus = cudaMemcpy(d_B, B, N * sizeof(double), cudaMemcpyHostToDevice); // copy B->d_B // COMPUTE workspace size and prepare workspace cusolverStatus = cusolverDnDpotrf_bufferSize(handle, uplo, N, d_A, N, &Lwork); cudaStatus = cudaMalloc((void **)& Work, Lwork * sizeof(double)); cusolverStatus = cusolverDnDpotrf(handle, uplo, N, d_A, N, Work, Lwork, d_info); // solve d_A *X=d_B , where d_A is factorized by potrf function // d_B is overwritten by the solution cusolverStatus = cusolverDnDpotrs(handle, uplo, N, 1, d_A, N, d_B, N, d_info); cudaStatus = cudaDeviceSynchronize(); auto CholeskyRUN = steady_clock::now() - timeStart; if (debug) { printf(" solution : "); printf(" Dpotrf + Dpotrs time : %lf sec .\n", CholeskyRUN); // pr.el. time cudaStatus = cudaMemcpy(&info_gpu, d_info, sizeof(int), cudaMemcpyDeviceToHost); // copy d_info -> info_gpu printf(" after Dpotrf + Dpotrs : info_gpu = %d\n", info_gpu); cudaStatus = cudaMemcpy(B, d_B, N * sizeof(double), cudaMemcpyDeviceToHost); // copy solution to host d_B ->B for (int i = 0; i < 5; i++) printf("%g, ", B[i]); // print printf(" ... "); // first components of the solution printf("\n"); } // Housekeeping cudaStatus = cudaFree(d_A); cudaStatus = cudaFree(d_B); cudaStatus = cudaFree(d_info); cudaStatus = cudaFree(Work); cusolverStatus = cusolverDnDestroy(handle); cudaStatus = cudaDeviceReset(); double returnVal = duration_cast<milliseconds>(CholeskyRUN).count(); return returnVal; }
c6995320699340a7c287dc3313fdd439ebd68701.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2012 by Jrn Dinkla, www.dinkla.com, All rights reserved. */ #include "smooth2.h" __device__ inline void add(int4& a, const uchar4& b) { a.x += b.x; a.y += b.y; a.z += b.z; } __global__ void smooth_kernel2(const uchar4* d_input, uchar4* d_output, const CExtent extent, const int windowSize) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (extent.inBounds(x, y)) { int4 a = make_int4(0, 0, 0, 0); int count = 0; for (int dy = -windowSize; dy <= windowSize; dy ++) { for (int dx = -windowSize; dx <= windowSize; dx ++) { const int nx = x + dx; const int ny = y + dy; if (extent.inBoundsStrict(nx, ny)) { add(a, d_input[extent.index(nx, ny)]); count++; } } } d_output[extent.index(x, y)] = make_uchar4(a.x / count, a.y / count, a.z / count, 255); } } void smooth2(const CExecConfig& config, const uchar4* d_input, uchar4* d_output, const CExtent& extent, const int windowSize) { hipLaunchKernelGGL(( smooth_kernel2), dim3(config.grid),dim3(config.threads), 0, 0, d_input, d_output, extent, windowSize); }
c6995320699340a7c287dc3313fdd439ebd68701.cu
/* * Copyright (c) 2012 by Jörn Dinkla, www.dinkla.com, All rights reserved. */ #include "smooth2.h" __device__ inline void add(int4& a, const uchar4& b) { a.x += b.x; a.y += b.y; a.z += b.z; } __global__ void smooth_kernel2(const uchar4* d_input, uchar4* d_output, const CExtent extent, const int windowSize) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (extent.inBounds(x, y)) { int4 a = make_int4(0, 0, 0, 0); int count = 0; for (int dy = -windowSize; dy <= windowSize; dy ++) { for (int dx = -windowSize; dx <= windowSize; dx ++) { const int nx = x + dx; const int ny = y + dy; if (extent.inBoundsStrict(nx, ny)) { add(a, d_input[extent.index(nx, ny)]); count++; } } } d_output[extent.index(x, y)] = make_uchar4(a.x / count, a.y / count, a.z / count, 255); } } void smooth2(const CExecConfig& config, const uchar4* d_input, uchar4* d_output, const CExtent& extent, const int windowSize) { smooth_kernel2<<<config.grid,config.threads>>>(d_input, d_output, extent, windowSize); }
78fde7e386c827cac44ae31b1a9de43bdcaeea7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include <sys/time.h> #include "caffe/layers/cudnn_conv_layer.hpp" #include <sys/time.h> namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //Hush //clock_gettime(CLOCK_MONOTONIC_RAW, &ts); //timespec ts; //clock_gettime(CLOCK_REALTIME, &ts); //uint64_t timestamp = ts.tv_sec*1000000+ts.tv_nsec; //struct timeval tv; //gettimeofday(&tv, NULL); //unsigned long timestamp = 1000000 * tv.tv_sec + tv.tv_usec; //std::cout << "[" << timestamp << "] conv layer" << std::endl; //usleep(10000); const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
78fde7e386c827cac44ae31b1a9de43bdcaeea7e.cu
#ifdef USE_CUDNN #include <vector> #include <sys/time.h> #include "caffe/layers/cudnn_conv_layer.hpp" #include <sys/time.h> namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //Hush //clock_gettime(CLOCK_MONOTONIC_RAW, &ts); //timespec ts; //clock_gettime(CLOCK_REALTIME, &ts); //uint64_t timestamp = ts.tv_sec*1000000+ts.tv_nsec; //struct timeval tv; //gettimeofday(&tv, NULL); //unsigned long timestamp = 1000000 * tv.tv_sec + tv.tv_usec; //std::cout << "[" << timestamp << "] conv layer" << std::endl; //usleep(10000); const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
82c097fb3cfea4b6fd1e2ee9a9bb0a1269edfce6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <book.h> #define N (1024 * 1024) #define FULL_DATA_SIZE (N * 20) __global__ void kernel(int *a, int *b, int *c) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main(void) { hipEvent_t start, stop; float elapsedTime; hipStream_t stream0, stream1; int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; // start the timers HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); // initialize the streams HANDLE_ERROR(hipStreamCreate(&stream0)); HANDLE_ERROR(hipStreamCreate(&stream1)); // allocate the memory on the GPU HANDLE_ERROR(hipMalloc((void**)&d_a0, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_b0, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_c0, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_a1, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_b1, N * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_c1, N * sizeof(int))); // allocate host locked memory, used to stream HANDLE_ERROR(hipHostMalloc((void**)&h_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void**)&h_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault)); HANDLE_ERROR(hipHostMalloc((void**)&h_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault)); for (int i = 0; i<FULL_DATA_SIZE; i++) { h_a[i] = rand(); h_b[i] = rand(); } HANDLE_ERROR(hipEventRecord(start, 0)); // now loop over full data, in bite-sized chunks for (int i = 0; i<FULL_DATA_SIZE; i += N * 2) { // copy the locked memory to the device, async HANDLE_ERROR(hipMemcpyAsync(d_a0, h_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream0)); HANDLE_ERROR(hipMemcpyAsync(d_b0, h_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream0)); hipLaunchKernelGGL(( kernel), dim3(N / 256), dim3(256), 0, stream0 , d_a0, d_b0, d_c0); // copy the data from device to locked memory HANDLE_ERROR(hipMemcpyAsync(h_c + i, d_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0)); // copy the locked memory to the device, async HANDLE_ERROR(hipMemcpyAsync(d_a1, h_a + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1)); HANDLE_ERROR(hipMemcpyAsync(d_b1, h_b + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1)); hipLaunchKernelGGL(( kernel), dim3(N / 256), dim3(256), 0, stream1 , d_a1, d_b1, d_c1); // copy the data from device to locked memory HANDLE_ERROR(hipMemcpyAsync(h_c + i + N, d_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1)); } HANDLE_ERROR(hipStreamSynchronize(stream0)); HANDLE_ERROR(hipStreamSynchronize(stream1)); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); printf("Time taken: %3.2f ms\n", elapsedTime); // cleanup the streams and memory HANDLE_ERROR(hipHostFree(h_a)); HANDLE_ERROR(hipHostFree(h_b)); HANDLE_ERROR(hipHostFree(h_c)); HANDLE_ERROR(hipFree(d_a0)); HANDLE_ERROR(hipFree(d_b0)); HANDLE_ERROR(hipFree(d_c0)); HANDLE_ERROR(hipFree(d_a1)); HANDLE_ERROR(hipFree(d_b1)); HANDLE_ERROR(hipFree(d_c1)); HANDLE_ERROR(hipStreamDestroy(stream0)); HANDLE_ERROR(hipStreamDestroy(stream1)); return 0; }
82c097fb3cfea4b6fd1e2ee9a9bb0a1269edfce6.cu
#include <book.h> #define N (1024 * 1024) #define FULL_DATA_SIZE (N * 20) __global__ void kernel(int *a, int *b, int *c) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { int idx1 = (idx + 1) % 256; int idx2 = (idx + 2) % 256; float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f; float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f; c[idx] = (as + bs) / 2; } } int main(void) { cudaEvent_t start, stop; float elapsedTime; cudaStream_t stream0, stream1; int *h_a, *h_b, *h_c; int *d_a0, *d_b0, *d_c0; int *d_a1, *d_b1, *d_c1; // start the timers HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); // initialize the streams HANDLE_ERROR(cudaStreamCreate(&stream0)); HANDLE_ERROR(cudaStreamCreate(&stream1)); // allocate the memory on the GPU HANDLE_ERROR(cudaMalloc((void**)&d_a0, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_b0, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_c0, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_a1, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_b1, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_c1, N * sizeof(int))); // allocate host locked memory, used to stream HANDLE_ERROR(cudaHostAlloc((void**)&h_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void**)&h_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault)); HANDLE_ERROR(cudaHostAlloc((void**)&h_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault)); for (int i = 0; i<FULL_DATA_SIZE; i++) { h_a[i] = rand(); h_b[i] = rand(); } HANDLE_ERROR(cudaEventRecord(start, 0)); // now loop over full data, in bite-sized chunks for (int i = 0; i<FULL_DATA_SIZE; i += N * 2) { // copy the locked memory to the device, async HANDLE_ERROR(cudaMemcpyAsync(d_a0, h_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); HANDLE_ERROR(cudaMemcpyAsync(d_b0, h_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0)); kernel<<<N / 256, 256, 0, stream0 >>>(d_a0, d_b0, d_c0); // copy the data from device to locked memory HANDLE_ERROR(cudaMemcpyAsync(h_c + i, d_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0)); // copy the locked memory to the device, async HANDLE_ERROR(cudaMemcpyAsync(d_a1, h_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); HANDLE_ERROR(cudaMemcpyAsync(d_b1, h_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1)); kernel<<<N / 256, 256, 0, stream1 >>>(d_a1, d_b1, d_c1); // copy the data from device to locked memory HANDLE_ERROR(cudaMemcpyAsync(h_c + i + N, d_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1)); } HANDLE_ERROR(cudaStreamSynchronize(stream0)); HANDLE_ERROR(cudaStreamSynchronize(stream1)); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("Time taken: %3.2f ms\n", elapsedTime); // cleanup the streams and memory HANDLE_ERROR(cudaFreeHost(h_a)); HANDLE_ERROR(cudaFreeHost(h_b)); HANDLE_ERROR(cudaFreeHost(h_c)); HANDLE_ERROR(cudaFree(d_a0)); HANDLE_ERROR(cudaFree(d_b0)); HANDLE_ERROR(cudaFree(d_c0)); HANDLE_ERROR(cudaFree(d_a1)); HANDLE_ERROR(cudaFree(d_b1)); HANDLE_ERROR(cudaFree(d_c1)); HANDLE_ERROR(cudaStreamDestroy(stream0)); HANDLE_ERROR(cudaStreamDestroy(stream1)); return 0; }
6e3882fd034b6902bd9d60ed63a7be5367d0a1fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void multiply(int n, float *a, float *b, float *sum) { int ind = threadIdx.x + blockDim.x * blockIdx.x; int i = ind ; int j = ind % n; if (j<n) { sum[i] = a[i] * b[j]; } }
6e3882fd034b6902bd9d60ed63a7be5367d0a1fa.cu
extern "C" __global__ void multiply(int n, float *a, float *b, float *sum) { int ind = threadIdx.x + blockDim.x * blockIdx.x; int i = ind ; int j = ind % n; if (j<n) { sum[i] = a[i] * b[j]; } }
7102eda7aa505d9cbb2561bdae3b7c93f5ef916d.hip
// !!! This is a file automatically generated by hipify!!! #include "../../include/layers/softmax_layer.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "../../include/util/common.h" template<typename dtype> __global__ void gpu_find_max_value(const dtype* input, const int num, const int single_size,dtype* output) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { const int pos = single_size * index; dtype max = input[pos]; for (int i = 1; i < single_size; ++i) { if (input[pos + i] > max) { max = input[pos + i]; } } output[index] = max; } } template<typename dtype> __global__ void gpu_exp(const dtype* a, const int size, const int num, const dtype* max, dtype* b) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { const int pos = index / num; b[index] = exp(a[index] - max[pos]); } } template<typename dtype> __global__ void gpu_exp_sum(const dtype* a, const int num, const int row, dtype* b) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { for (int i = 0; i < row; ++i) { b[index] += a[index*row + i]; } } } template<typename dtype> __global__ void gpu_exp_div(const dtype* a, const int size, const int num, dtype* b) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { const int pos = index / num; b[index] /= a[pos]; } } namespace BigBang { template<typename dtype> void SoftmaxLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) { const dtype* bottom_data = bottom->gpu_data(); const int nums = bottom->shape(0); const int size = bottom->size(); const int per_data_size = size / nums; softmax_sum_->Reset(); dtype* softmax_sum_data = softmax_sum_->mutable_gpu_data(); dtype* top_data = top->mutable_gpu_data(); dtype* mutable_max_num = max_num_->mutable_gpu_data(); gpu_find_max_value << <BigBangGetBlocks(nums), THREAD_MAX_NUMS >> > (bottom_data, nums, per_data_size, mutable_max_num); gpu_exp << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> >(bottom_data, size, per_data_size, mutable_max_num, top_data); gpu_exp_sum << <BigBangGetBlocks(nums), THREAD_MAX_NUMS >> > (top_data, nums, per_data_size, softmax_sum_data); gpu_exp_div << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (softmax_sum_data, size, per_data_size, top_data); } INSTANTIATE_CLASS_GPU_FUNCTION(SoftmaxLayer); }
7102eda7aa505d9cbb2561bdae3b7c93f5ef916d.cu
#include "../../include/layers/softmax_layer.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "../../include/util/common.h" template<typename dtype> __global__ void gpu_find_max_value(const dtype* input, const int num, const int single_size,dtype* output) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { const int pos = single_size * index; dtype max = input[pos]; for (int i = 1; i < single_size; ++i) { if (input[pos + i] > max) { max = input[pos + i]; } } output[index] = max; } } template<typename dtype> __global__ void gpu_exp(const dtype* a, const int size, const int num, const dtype* max, dtype* b) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { const int pos = index / num; b[index] = exp(a[index] - max[pos]); } } template<typename dtype> __global__ void gpu_exp_sum(const dtype* a, const int num, const int row, dtype* b) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < num) { for (int i = 0; i < row; ++i) { b[index] += a[index*row + i]; } } } template<typename dtype> __global__ void gpu_exp_div(const dtype* a, const int size, const int num, dtype* b) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { const int pos = index / num; b[index] /= a[pos]; } } namespace BigBang { template<typename dtype> void SoftmaxLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) { const dtype* bottom_data = bottom->gpu_data(); const int nums = bottom->shape(0); const int size = bottom->size(); const int per_data_size = size / nums; softmax_sum_->Reset(); dtype* softmax_sum_data = softmax_sum_->mutable_gpu_data(); dtype* top_data = top->mutable_gpu_data(); dtype* mutable_max_num = max_num_->mutable_gpu_data(); gpu_find_max_value << <BigBangGetBlocks(nums), THREAD_MAX_NUMS >> > (bottom_data, nums, per_data_size, mutable_max_num); gpu_exp << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> >(bottom_data, size, per_data_size, mutable_max_num, top_data); gpu_exp_sum << <BigBangGetBlocks(nums), THREAD_MAX_NUMS >> > (top_data, nums, per_data_size, softmax_sum_data); gpu_exp_div << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (softmax_sum_data, size, per_data_size, top_data); } INSTANTIATE_CLASS_GPU_FUNCTION(SoftmaxLayer); }
bcaf105a54465c8a154fc5061f0b55e831fd42bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <THH/THHAtomics.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { // The maximum block size in CUDA constexpr int MAX_BLOCK_SIZE = 1024; /* This code computes the sum of the weights in two-steps: 1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces` 2) Each partial-sum from 1) are summed and scatter into `grad_weight` Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the kernel execution. If it is high, the size of the thread blocks will be too small to achieve good occupancy. Similarly, a very low value will make the size of the thread blocks in the final sum in step 2) too small. */ constexpr int NROWS_PER_THREAD = 10; // Fast ceil division (no overflow checking) __host__ __device__ __forceinline__ int64_t ceil_div(int64_t x, int64_t y) { return (x + y - 1) / y; } template <typename index_t> __global__ void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets, int64_t num_of_segments, int64_t numel) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { const int64_t idx_start = segment_offsets[id]; const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; const int64_t size = idx_end - idx_start; ret[id] = ceil_div(size, NROWS_PER_THREAD); } } template <typename index_t> __global__ void krn_partial_segment_offset( index_t *ret, const index_t *partials_per_segment, const index_t *partials_per_segment_offset, const index_t *segment_offsets, int64_t num_of_segments) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { index_t idx = partials_per_segment_offset[id]; const index_t num_partials = partials_per_segment[id]; const index_t segment_offset = segment_offsets[id]; for (int64_t i=0; i<num_partials; ++i) { ret[idx++] = segment_offset + i * NROWS_PER_THREAD; } } } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight_bags( index_t *indices, scalar_t *gradOutput, index_t *offset2bag, index_t *count, ptrdiff_t numel, int64_t stride, int mode_mean, const index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const int origRow = indices[idx]; const int seq_number = offset2bag[origRow]; const int gradOutputRow = seq_number * stride; acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0; if (per_sample_weights) { scale *= per_sample_weights[origRow * per_sample_weights_stride]; } acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature]; if (mode_mean) { gradient /= bag_size[seq_number]; } weight += gradient * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight( index_t *indices, scalar_t *gradOutput, index_t *count, ptrdiff_t numel, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { using accscalar_t = acc_type<scalar_t, true>; const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; accscalar_t weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const index_t target_row = indices[idx]; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; weight += gradOutput[target_row * stride + startFeature] * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } // This kernel assumes that all input tensors are contiguous. template <typename scalar_t, typename index_t> __global__ void sum_and_scatter( index_t *input, scalar_t *gradWeight, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, const acc_type<scalar_t, true> *grad_weight_per_segment, const index_t *segment_sizes_offsets, int64_t num_of_partial_segments, const int64_t padding_idx, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_sizes_offsets[id]; const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { weight += grad_weight_per_segment[idx*stride + startFeature]; } int64_t target_row = input[segment_offsets[id]]; if (target_row != padding_idx) { gradWeight[target_row * stride + startFeature] = weight; } } } // anon namespace Tensor embedding_backward_cuda_kernel( const Tensor &grad, const Tensor &orig_indices, const Tensor &sorted_indices, const Tensor &count, int64_t num_weights, int padding_idx, bool scale_grad_by_freq, bool mode_mean, const Tensor &offset2bag, const Tensor &bag_size, const Tensor &per_sample_weights) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options()); const int64_t stride = grad_weight.stride(0); // Compute the number of segments and their start position so that we do not have to // spawn a warp per index. In this context, a segment is a number of rows that should // be summarized. // Unit: index in `sorted_indices` and `orig_indices` AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () { auto segment_offsets = at::empty({numel}, orig_indices.options()); int64_t num_of_segments; { auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); num_of_segments = thrust::get<0>(ends) - dummy_dev; } // We split the segments up into sizes of `NROWS_PER_THREAD` // Compute the number partial-segments per segment (some partial-segments // may not be the full `NROWS_PER_THREAD` number of rows) auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options()); { hipLaunchKernelGGL(( krn_partials_per_segment), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream, partials_per_segment.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments, numel); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } // In order to compute `partial_segment_offset`, which is the start index // of each partial-segment in `sorted_indices`, we need to compute the // start position of each _segment_ in `partial_segment_offset`. // Unit: index in `partial_segment_offset` auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options()); thrust::exclusive_scan( policy, thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()), thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments), thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>())); // The total number of partial-segments is the sum of `partials_per_segment_offset` const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() + partials_per_segment_offset[num_of_segments-1].item<index_t>(); // Now we can compute the start position of each partial-segment // Unit: index in `sorted_indices` and `orig_indices` auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options()); { hipLaunchKernelGGL(( krn_partial_segment_offset), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream, partial_segment_offset.data_ptr<index_t>(), partials_per_segment.data_ptr<index_t>(), partials_per_segment_offset.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE; const int block = ::min(stride_warped, MAX_BLOCK_SIZE); const int grid = ceil_div(num_of_partial_segments*stride_warped, block); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_bag_backward_cuda_compute_grad_weight", [&] { // For numerical stability, the dtype of `grad_weight_per_segment` // should match `acc_type` using partial_weight_t = acc_type<scalar_t, true>; TensorOptions op; if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) { op = grad.options().dtype(at::kFloat); } else { op = grad.options(); } auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op); // Compute the sum of each partial-segment and handle bags if (offset2bag.defined()) { hipLaunchKernelGGL(( compute_grad_weight_bags<scalar_t>), dim3(grid), dim3(block), 0, stream, orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, mode_mean, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( compute_grad_weight<scalar_t>), dim3(grid), dim3(block), 0, stream, orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } // Finally, we sum all the partial-sums and scatter them // into `grad_weight`. const int grid2 = ceil_div(num_of_segments*stride_warped, block); hipLaunchKernelGGL(( sum_and_scatter<scalar_t>), dim3(grid2), dim3(block), 0, stream, sorted_indices.data_ptr<index_t>(), grad_weight.data_ptr<scalar_t>(), stride, segment_offsets.data_ptr<index_t>(), num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), partials_per_segment_offset.data_ptr<index_t>(), num_of_partial_segments, padding_idx, stride_warped); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); }); }); }); return grad_weight; } }}
bcaf105a54465c8a154fc5061f0b55e831fd42bf.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/AccumulateType.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <THC/THCAtomics.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { // The maximum block size in CUDA constexpr int MAX_BLOCK_SIZE = 1024; /* This code computes the sum of the weights in two-steps: 1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces` 2) Each partial-sum from 1) are summed and scatter into `grad_weight` Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the kernel execution. If it is high, the size of the thread blocks will be too small to achieve good occupancy. Similarly, a very low value will make the size of the thread blocks in the final sum in step 2) too small. */ constexpr int NROWS_PER_THREAD = 10; // Fast ceil division (no overflow checking) __host__ __device__ __forceinline__ int64_t ceil_div(int64_t x, int64_t y) { return (x + y - 1) / y; } template <typename index_t> __global__ void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets, int64_t num_of_segments, int64_t numel) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { const int64_t idx_start = segment_offsets[id]; const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; const int64_t size = idx_end - idx_start; ret[id] = ceil_div(size, NROWS_PER_THREAD); } } template <typename index_t> __global__ void krn_partial_segment_offset( index_t *ret, const index_t *partials_per_segment, const index_t *partials_per_segment_offset, const index_t *segment_offsets, int64_t num_of_segments) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { index_t idx = partials_per_segment_offset[id]; const index_t num_partials = partials_per_segment[id]; const index_t segment_offset = segment_offsets[id]; for (int64_t i=0; i<num_partials; ++i) { ret[idx++] = segment_offset + i * NROWS_PER_THREAD; } } } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight_bags( index_t *indices, scalar_t *gradOutput, index_t *offset2bag, index_t *count, ptrdiff_t numel, int64_t stride, int mode_mean, const index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const int origRow = indices[idx]; const int seq_number = offset2bag[origRow]; const int gradOutputRow = seq_number * stride; acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0; if (per_sample_weights) { scale *= per_sample_weights[origRow * per_sample_weights_stride]; } acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature]; if (mode_mean) { gradient /= bag_size[seq_number]; } weight += gradient * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight( index_t *indices, scalar_t *gradOutput, index_t *count, ptrdiff_t numel, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { using accscalar_t = acc_type<scalar_t, true>; const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; accscalar_t weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const index_t target_row = indices[idx]; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; weight += gradOutput[target_row * stride + startFeature] * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } // This kernel assumes that all input tensors are contiguous. template <typename scalar_t, typename index_t> __global__ void sum_and_scatter( index_t *input, scalar_t *gradWeight, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, const acc_type<scalar_t, true> *grad_weight_per_segment, const index_t *segment_sizes_offsets, int64_t num_of_partial_segments, const int64_t padding_idx, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_sizes_offsets[id]; const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { weight += grad_weight_per_segment[idx*stride + startFeature]; } int64_t target_row = input[segment_offsets[id]]; if (target_row != padding_idx) { gradWeight[target_row * stride + startFeature] = weight; } } } // anon namespace Tensor embedding_backward_cuda_kernel( const Tensor &grad, const Tensor &orig_indices, const Tensor &sorted_indices, const Tensor &count, int64_t num_weights, int padding_idx, bool scale_grad_by_freq, bool mode_mean, const Tensor &offset2bag, const Tensor &bag_size, const Tensor &per_sample_weights) { auto stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options()); const int64_t stride = grad_weight.stride(0); // Compute the number of segments and their start position so that we do not have to // spawn a warp per index. In this context, a segment is a number of rows that should // be summarized. // Unit: index in `sorted_indices` and `orig_indices` AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () { auto segment_offsets = at::empty({numel}, orig_indices.options()); int64_t num_of_segments; { auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); num_of_segments = thrust::get<0>(ends) - dummy_dev; } // We split the segments up into sizes of `NROWS_PER_THREAD` // Compute the number partial-segments per segment (some partial-segments // may not be the full `NROWS_PER_THREAD` number of rows) auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options()); { krn_partials_per_segment<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> ( partials_per_segment.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments, numel); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } // In order to compute `partial_segment_offset`, which is the start index // of each partial-segment in `sorted_indices`, we need to compute the // start position of each _segment_ in `partial_segment_offset`. // Unit: index in `partial_segment_offset` auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options()); thrust::exclusive_scan( policy, thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()), thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments), thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>())); // The total number of partial-segments is the sum of `partials_per_segment_offset` const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() + partials_per_segment_offset[num_of_segments-1].item<index_t>(); // Now we can compute the start position of each partial-segment // Unit: index in `sorted_indices` and `orig_indices` auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options()); { krn_partial_segment_offset<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> ( partial_segment_offset.data_ptr<index_t>(), partials_per_segment.data_ptr<index_t>(), partials_per_segment_offset.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE; const int block = std::min(stride_warped, MAX_BLOCK_SIZE); const int grid = ceil_div(num_of_partial_segments*stride_warped, block); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_bag_backward_cuda_compute_grad_weight", [&] { // For numerical stability, the dtype of `grad_weight_per_segment` // should match `acc_type` using partial_weight_t = acc_type<scalar_t, true>; TensorOptions op; if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) { op = grad.options().dtype(at::kFloat); } else { op = grad.options(); } auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op); // Compute the sum of each partial-segment and handle bags if (offset2bag.defined()) { compute_grad_weight_bags<scalar_t><<<grid, block, 0, stream>>>( orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, mode_mean, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } else { compute_grad_weight<scalar_t><<<grid, block, 0, stream>>>( orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } // Finally, we sum all the partial-sums and scatter them // into `grad_weight`. const int grid2 = ceil_div(num_of_segments*stride_warped, block); sum_and_scatter<scalar_t><<<grid2, block, 0, stream>>>( sorted_indices.data_ptr<index_t>(), grad_weight.data_ptr<scalar_t>(), stride, segment_offsets.data_ptr<index_t>(), num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), partials_per_segment_offset.data_ptr<index_t>(), num_of_partial_segments, padding_idx, stride_warped); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); }); }); }); return grad_weight; } }}
c4b7ac17de8900bf541cefb9ea6a249e261ad512.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define BLOCK_SIZE 16 // CUDA tutorial: http://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory // A is shape (m,n), B is shape (n,k) and C is shape (m,k) __global__ void gemm(float* A, float* B, float* C, int m, int n, int k) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Each thread block computes one sub-matrix Csub of C float* Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol]; // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Each thread computes one element of Csub // by accumulating results into Cvalue // block_size = 16 -> 256 threads, one per Csub element float Cvalue = 0.0; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int i = 0; i < (n / BLOCK_SIZE); ++i) { // Get sub-matrix Asub of A float* Asub = &A[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i]; // Get sub-matrix Bsub of B float* Bsub = &B[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = Asub[row*n+col]; Bs[row][col] = Bsub[row*k+col]; // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int j = 0; j < BLOCK_SIZE; ++j) Cvalue += As[row][j] * Bs[j][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element if(col + blockCol* BLOCK_SIZE< k && row + blockRow* BLOCK_SIZE< m) Csub[row*k+col] = Cvalue; } // 32 single float array -> 32 bits unsigned int __device__ unsigned int concatenate(float* array) { unsigned int rvalue=0; unsigned int sign; for (int i = 0; i < 32; i++) { sign = (array[i]>=0); rvalue = rvalue | (sign<<i); } return rvalue; } __global__ void concatenate_rows_kernel(float *a, unsigned int *b, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<size) b[i] = concatenate(&a[i*32]); } __global__ void concatenate_cols_kernel(float *a, unsigned int *b, int m, int n) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j<n){ float * array = new float[32]; for(int i=0; i<m; i+=32){ for(int k=0; k<32;k++) array[k] = a[j + n*(i+k)]; b[j+n*i/32]=concatenate(array); } delete[] array; } } // A is shape (m,n), B is shape (n,k) and C is shape (m,k) __global__ void xnor_add_gemm(unsigned int* A_a, unsigned int* A_b, unsigned int* B_a,unsigned int* B_b,float* C, int m, int n, int k) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Each thread block computes one sub-matrix Csub of C float* Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol]; // Shared memory used to store Asub and Bsub respectively __shared__ unsigned int A_as[BLOCK_SIZE][BLOCK_SIZE]; __shared__ unsigned int A_bs[BLOCK_SIZE][BLOCK_SIZE]; __shared__ unsigned int B_as[BLOCK_SIZE][BLOCK_SIZE]; __shared__ unsigned int B_bs[BLOCK_SIZE][BLOCK_SIZE]; // Each thread computes one element of Csub // by accumulating results into Cvalue // block_size = 16 -> 256 threads, one per Csub element unsigned int C_avalue = 0; unsigned int C_bvalue = 0; unsigned int C_cvalue = 0; unsigned int C_dvalue = 0; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int i = 0; i < (n / BLOCK_SIZE); ++i) { // Get sub-matrix Asub of A_a unsigned int* A_asub = &A_a[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i]; // Get sub-matrix Asub of A_b unsigned int* A_bsub = &A_b[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i]; // Get sub-matrix Bsub of B_a unsigned int* B_asub = &B_a[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol]; // Get sub-matrix Bsub of B_b unsigned int* B_bsub = &B_b[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix A_as[row][col] = A_asub[row*n+col]; A_bs[row][col] = A_bsub[row*n+col]; B_as[row][col] = B_asub[row*k+col]; B_bs[row][col] = B_bsub[row*k+col]; // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together // THIS IS THE MOST INTERESTING PART for (int j = 0; j < BLOCK_SIZE; ++j) { C_avalue += __popc(A_as[row][j]^B_as[j][col]); C_bvalue += __popc(A_bs[row][j]^B_bs[j][col]); C_cvalue += __popc(A_as[row][j]^B_bs[j][col]); C_dvalue += __popc(A_bs[row][j]^B_as[j][col]); } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element if(col + blockCol* BLOCK_SIZE< k && row + blockRow* BLOCK_SIZE< m) Csub[row*k+col] = (-4*(2*(float)C_avalue-32*n)-(2*(float)C_bvalue-32*n)-2*(2*(float)C_cvalue-32*n)-2*(2*(float)C_dvalue-32*n)); }
c4b7ac17de8900bf541cefb9ea6a249e261ad512.cu
#include <stdio.h> #define BLOCK_SIZE 16 // CUDA tutorial: http://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory // A is shape (m,n), B is shape (n,k) and C is shape (m,k) __global__ void gemm(float* A, float* B, float* C, int m, int n, int k) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Each thread block computes one sub-matrix Csub of C float* Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol]; // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Each thread computes one element of Csub // by accumulating results into Cvalue // block_size = 16 -> 256 threads, one per Csub element float Cvalue = 0.0; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int i = 0; i < (n / BLOCK_SIZE); ++i) { // Get sub-matrix Asub of A float* Asub = &A[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i]; // Get sub-matrix Bsub of B float* Bsub = &B[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = Asub[row*n+col]; Bs[row][col] = Bsub[row*k+col]; // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int j = 0; j < BLOCK_SIZE; ++j) Cvalue += As[row][j] * Bs[j][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element if(col + blockCol* BLOCK_SIZE< k && row + blockRow* BLOCK_SIZE< m) Csub[row*k+col] = Cvalue; } // 32 single float array -> 32 bits unsigned int __device__ unsigned int concatenate(float* array) { unsigned int rvalue=0; unsigned int sign; for (int i = 0; i < 32; i++) { sign = (array[i]>=0); rvalue = rvalue | (sign<<i); } return rvalue; } __global__ void concatenate_rows_kernel(float *a, unsigned int *b, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<size) b[i] = concatenate(&a[i*32]); } __global__ void concatenate_cols_kernel(float *a, unsigned int *b, int m, int n) { int j = blockIdx.x * blockDim.x + threadIdx.x; if(j<n){ float * array = new float[32]; for(int i=0; i<m; i+=32){ for(int k=0; k<32;k++) array[k] = a[j + n*(i+k)]; b[j+n*i/32]=concatenate(array); } delete[] array; } } // A is shape (m,n), B is shape (n,k) and C is shape (m,k) __global__ void xnor_add_gemm(unsigned int* A_a, unsigned int* A_b, unsigned int* B_a,unsigned int* B_b,float* C, int m, int n, int k) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Each thread block computes one sub-matrix Csub of C float* Csub = &C[BLOCK_SIZE * k * blockRow + BLOCK_SIZE * blockCol]; // Shared memory used to store Asub and Bsub respectively __shared__ unsigned int A_as[BLOCK_SIZE][BLOCK_SIZE]; __shared__ unsigned int A_bs[BLOCK_SIZE][BLOCK_SIZE]; __shared__ unsigned int B_as[BLOCK_SIZE][BLOCK_SIZE]; __shared__ unsigned int B_bs[BLOCK_SIZE][BLOCK_SIZE]; // Each thread computes one element of Csub // by accumulating results into Cvalue // block_size = 16 -> 256 threads, one per Csub element unsigned int C_avalue = 0; unsigned int C_bvalue = 0; unsigned int C_cvalue = 0; unsigned int C_dvalue = 0; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int i = 0; i < (n / BLOCK_SIZE); ++i) { // Get sub-matrix Asub of A_a unsigned int* A_asub = &A_a[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i]; // Get sub-matrix Asub of A_b unsigned int* A_bsub = &A_b[BLOCK_SIZE * blockRow * n + BLOCK_SIZE * i]; // Get sub-matrix Bsub of B_a unsigned int* B_asub = &B_a[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol]; // Get sub-matrix Bsub of B_b unsigned int* B_bsub = &B_b[BLOCK_SIZE * k * i + BLOCK_SIZE * blockCol]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix A_as[row][col] = A_asub[row*n+col]; A_bs[row][col] = A_bsub[row*n+col]; B_as[row][col] = B_asub[row*k+col]; B_bs[row][col] = B_bsub[row*k+col]; // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together // THIS IS THE MOST INTERESTING PART for (int j = 0; j < BLOCK_SIZE; ++j) { C_avalue += __popc(A_as[row][j]^B_as[j][col]); C_bvalue += __popc(A_bs[row][j]^B_bs[j][col]); C_cvalue += __popc(A_as[row][j]^B_bs[j][col]); C_dvalue += __popc(A_bs[row][j]^B_as[j][col]); } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element if(col + blockCol* BLOCK_SIZE< k && row + blockRow* BLOCK_SIZE< m) Csub[row*k+col] = (-4*(2*(float)C_avalue-32*n)-(2*(float)C_bvalue-32*n)-2*(2*(float)C_cvalue-32*n)-2*(2*(float)C_dvalue-32*n)); }
f9047dec3df0d4be18ff9df4d89bd305e15172d8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/structs/utilities.hpp> #include <cudf/lists/detail/sorting.hpp> #include <cudf/lists/drop_list_duplicates.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/table/table_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/transform.h> namespace cudf { namespace lists { namespace detail { namespace { template <typename Type> struct has_negative_nans_fn { column_device_view const d_entries; bool const has_nulls; has_negative_nans_fn(column_device_view const d_entries, bool const has_nulls) : d_entries(d_entries), has_nulls(has_nulls) { } __device__ Type operator()(size_type idx) const noexcept { if (has_nulls && d_entries.is_null_nocheck(idx)) { return false; } auto const val = d_entries.element<Type>(idx); return std::isnan(val) && std::signbit(val); // std::signbit(x) == true if x is negative } }; /** * @brief A structure to be used along with type_dispatcher to check if a column has any * negative NaN value. * * This functor is used to check for replacing negative NaN if there exists one. It is neccessary * because when calling to `lists::detail::sort_lists`, the negative NaN and positive NaN values (if * both exist) are separated to the two ends of the output column. This is due to the API * `lists::detail::sort_lists` internally calls `hipcub::DeviceSegmentedRadixSort`, which performs * sorting by comparing bits of the input numbers. Since negative and positive NaN have * different bits representation, they may not be moved to be close to each other after sorted. */ struct has_negative_nans_dispatch { template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto const d_entries = column_device_view::create(lists_entries, stream); return thrust::count_if( rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.size()), detail::has_negative_nans_fn<Type>{*d_entries, lists_entries.has_nulls()}); } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const { // Recursively check negative NaN on the children columns. return std::any_of( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::has_negative_nans_dispatch{}, col, stream); }); } template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const&, rmm::cuda_stream_view) const { // Columns of non floating-point data will never contain NaN. return false; } }; template <typename Type> struct replace_negative_nans_fn { __device__ Type operator()(Type val) const noexcept { return std::isnan(val) ? std::numeric_limits<Type>::quiet_NaN() : val; } }; /** * @brief A structure to be used along with type_dispatcher to replace -NaN by NaN for all rows * in a floating-point data column. */ struct replace_negative_nans_dispatch { template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view) const noexcept { // For non floating point type and non struct, just return a copy of the input. return std::make_unique<column>(lists_entries); } template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto new_entries = cudf::detail::allocate_like( lists_entries, lists_entries.size(), cudf::mask_allocation_policy::NEVER, stream); new_entries->set_null_mask(cudf::detail::copy_bitmask(lists_entries, stream), lists_entries.null_count()); // Replace all negative NaN values. thrust::transform(rmm::exec_policy(stream), lists_entries.template begin<Type>(), lists_entries.template end<Type>(), new_entries->mutable_view().template begin<Type>(), detail::replace_negative_nans_fn<Type>{}); return new_entries; } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { std::vector<std::unique_ptr<cudf::column>> output_struct_members; std::transform( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), std::back_inserter(output_struct_members), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::replace_negative_nans_dispatch{}, col, stream); }); return cudf::make_structs_column(lists_entries.size(), std::move(output_struct_members), lists_entries.null_count(), cudf::detail::copy_bitmask(lists_entries, stream), stream); } }; /** * @brief Generate a 0-based offset column for a lists column. * * Given a lists_column_view, which may have a non-zero offset, generate a new column containing * 0-based list offsets. This is done by subtracting each of the input list offset by the first * offset. * * @code{.pseudo} * Given a list column having offsets = { 3, 7, 9, 13 }, * then output_offsets = { 0, 4, 6, 10 } * @endcode * * @param lists_column The input lists column. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing 0-based list offsets. */ std::unique_ptr<column> generate_clean_offsets(lists_column_view const& lists_column, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto output_offsets = make_numeric_column(data_type{type_to_id<offset_type>()}, lists_column.size() + 1, mask_state::UNALLOCATED, stream, mr); thrust::transform( rmm::exec_policy(stream), lists_column.offsets_begin(), lists_column.offsets_end(), output_offsets->mutable_view().begin<offset_type>(), [first = lists_column.offsets_begin()] __device__(auto offset) { return offset - *first; }); return output_offsets; } /** * @brief Transform a given lists column to a new lists column in which all the list entries holding * -NaN value are replaced by (positive) NaN. * * Replacing -NaN by NaN is necessary before sorting (individual) lists because the sorting API is * using radix sort, which compares bits of the number thus it may separate -NaN by NaN to the two * ends of the result column. */ std::unique_ptr<column> replace_negative_nans_entries(column_view const& lists_entries, lists_column_view const& lists_column, rmm::cuda_stream_view stream) { // We need to copy the offsets column of the input lists_column. Since the input lists_column may // be sliced, we need to generate clean offsets (i.e., offsets starting from zero). auto new_offsets = generate_clean_offsets(lists_column, stream, rmm::mr::get_current_device_resource()); auto new_entries = type_dispatcher( lists_entries.type(), detail::replace_negative_nans_dispatch{}, lists_entries, stream); return make_lists_column( lists_column.size(), std::move(new_offsets), std::move(new_entries), lists_column.null_count(), cudf::detail::copy_bitmask( lists_column.parent(), stream, rmm::mr::get_current_device_resource())); } /** * @brief Populate list offsets for all list entries. * * Given an `offsets` column_view containing offsets of a lists column and a number of all list * entries in the column, generate an array that maps from each list entry to the offset of the list * containing that entry. * * @code{.pseudo} * num_entries = 10, offsets = { 0, 4, 6, 10 } * output = { 1, 1, 1, 1, 2, 2, 3, 3, 3, 3 } * @endcode * * @param num_entries The number of list entries. * @param offsets Column view to the list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing entry list offsets. */ std::unique_ptr<column> generate_entry_list_offsets(size_type num_entries, column_view const& offsets, rmm::cuda_stream_view stream) { auto entry_list_offsets = make_numeric_column(offsets.type(), num_entries, mask_state::UNALLOCATED, stream, rmm::mr::get_current_device_resource()); thrust::upper_bound(rmm::exec_policy(stream), offsets.begin<offset_type>(), offsets.end<offset_type>(), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries), entry_list_offsets->mutable_view().begin<offset_type>()); return entry_list_offsets; } /** * @brief Performs an equality comparison between two entries in a lists column. * * For the two elements that are NOT in the same list in the lists column, they will always be * considered as different. If they are from the same list and their type is not floating point, * this functor will return the same comparison result as `cudf::element_equality_comparator`. * * For floating-point types, entries holding NaN value can be considered as different values or the * same value depending on the `nans_equal` parameter. * * @tparam Type The data type of entries * @tparam nans_equal Flag to specify whether NaN entries should be considered as equal value (only * applicable for floating-point data column) */ template <class Type> struct column_row_comparator_fn { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __host__ __device__ column_row_comparator_fn(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <typename T, std::enable_if_t<!cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { return lhs_val == rhs_val; } template <typename T, std::enable_if_t<cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { // If both element(i) and element(j) are NaNs and nans are considered as equal value then this // comparison will return `true`. This is the desired behavior in Pandas. if (nans_equal && std::isnan(lhs_val) && std::isnan(rhs_val)) { return true; } // If nans are considered as NOT equal, even both element(i) and element(j) are NaNs this // comparison will still return `false`. This is the desired behavior in Apache Spark. return lhs_val == rhs_val; } bool __device__ operator()(size_type i, size_type j) const noexcept { // Two entries are not considered for equality if they belong to different lists. if (list_offsets[i] != list_offsets[j]) { return false; } if (has_nulls) { bool const lhs_is_null{lhs.nullable() && lhs.is_null_nocheck(i)}; bool const rhs_is_null{rhs.nullable() && rhs.is_null_nocheck(j)}; if (lhs_is_null && rhs_is_null) { return nulls_equal == null_equality::EQUAL; } else if (lhs_is_null != rhs_is_null) { return false; } } return compare<Type>(lhs.element<Type>(i), lhs.element<Type>(j)); } }; /** * @brief Struct used in type_dispatcher for comparing two entries in a lists column. */ struct column_row_comparator_dispatch { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __device__ column_row_comparator_dispatch(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool __device__ operator()(size_type i, size_type j) const noexcept { return column_row_comparator_fn<Type>{ list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}(i, j); } template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool operator()(size_type, size_type) const { CUDF_FAIL( "column_row_comparator_dispatch cannot operate on types that are not equally comparable."); } }; /** * @brief Performs an equality comparison between rows of two tables using `column_row_comparator` * to compare rows of their corresponding columns. */ struct table_row_comparator_fn { offset_type const* const list_offsets; table_device_view const lhs; table_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; table_row_comparator_fn(offset_type const* const list_offsets, table_device_view const& lhs, table_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } bool __device__ operator()(size_type i, size_type j) const noexcept { auto column_comp = [=](column_device_view const& lhs, column_device_view const& rhs) { return type_dispatcher( lhs.type(), column_row_comparator_dispatch{list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}, i, j); }; return thrust::equal(thrust::seq, lhs.begin(), lhs.end(), rhs.begin(), column_comp); } }; /** * @brief Struct used in type_dispatcher for copying indices of the list entries ignoring * duplicates. */ struct get_unique_entries_dispatch { template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>() && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const*, column_view const&, size_type, offset_type*, null_equality, nan_equality, bool, rmm::cuda_stream_view) const { CUDF_FAIL( "`get_unique_entries_dispatch` cannot operate on types that are not equally comparable."); } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const d_view = column_device_view::create(all_lists_entries, stream); auto const comp = column_row_comparator_fn<Type>{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } template <class Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const entries_tview = table_view{{all_lists_entries}}; auto const flatten_nullability = has_nested_nulls(entries_tview) ? structs::detail::column_nullability::FORCE : structs::detail::column_nullability::MATCH_INCOMING; auto const entries_flattened = cudf::structs::detail::flatten_nested_columns( entries_tview, {order::ASCENDING}, {null_order::AFTER}, flatten_nullability); auto const d_view = table_device_view::create(entries_flattened, stream); auto const comp = table_row_comparator_fn{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } }; /** * @brief Copy list entries and entry list offsets ignoring duplicates. * * Given an array of all entries flattened from a list column and an array that maps each entry to * the offset of the list containing that entry, those entries and list offsets are copied into * new arrays such that the duplicated entries within each list will be ignored. * * @param all_lists_entries The input array containing all list entries. * @param entries_list_offsets A map from list entries to their corresponding list offsets. * @param nulls_equal Flag to specify whether null entries should be considered equal. * @param nans_equal Flag to specify whether NaN entries should be considered equal * (only applicable for floating-point data column). * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A pair of columns, the first one contains unique list entries and the second one * contains their corresponding list offsets. */ std::vector<std::unique_ptr<column>> get_unique_entries_and_list_offsets( column_view const& all_lists_entries, column_view const& entries_list_offsets, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const num_entries = all_lists_entries.size(); // Allocate memory to store the indices of the unique entries. auto unique_indices = rmm::device_uvector<offset_type>(num_entries, stream); auto const output_begin = unique_indices.begin(); auto const output_end = type_dispatcher(all_lists_entries.type(), get_unique_entries_dispatch{}, entries_list_offsets.begin<offset_type>(), all_lists_entries, num_entries, output_begin, nulls_equal, nans_equal, all_lists_entries.has_nulls(), stream); auto gather_map = column_view(data_type{type_to_id<offset_type>()}, static_cast<size_type>(thrust::distance(output_begin, output_end)), unique_indices.data()); // Collect unique entries and entry list offsets. // The new null_count and bitmask of the unique entries will also be generated // by the gather function. return cudf::detail::gather(table_view{{all_lists_entries, entries_list_offsets}}, gather_map, cudf::out_of_bounds_policy::DONT_CHECK, cudf::detail::negative_index_policy::NOT_ALLOWED, stream, mr) ->release(); } /** * @brief Generate list offsets from entry offsets. * * Generate an array of list offsets for the final result lists column. The list offsets of the * original lists column are also taken into account to make sure the result lists column will have * the same empty list rows (if any) as in the original lists column. * * @param num_entries The number of unique entries after removing duplicates. * @param entries_list_offsets The mapping from list entries to their list offsets. * @param original_offsets The list offsets of the original lists column, which will also be used to * store the new list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. */ void generate_offsets(size_type num_entries, column_view const& entries_list_offsets, mutable_column_view const& original_offsets, rmm::cuda_stream_view stream) { // Firstly, generate temporary list offsets for the unique entries, ignoring empty lists (if any). // If entries_list_offsets = {1, 1, 1, 1, 2, 3, 3, 3, 4, 4 }, num_entries = 10, // then new_offsets = { 0, 4, 5, 8, 10 }. auto const new_offsets = allocate_like( original_offsets, mask_allocation_policy::NEVER, rmm::mr::get_current_device_resource()); thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries + 1), new_offsets->mutable_view().begin<offset_type>(), [num_entries, offsets_ptr = entries_list_offsets.begin<offset_type>()] __device__( auto i) -> bool { return i == 0 || i == num_entries || offsets_ptr[i] != offsets_ptr[i - 1]; }); // Generate a prefix sum of number of empty lists, storing inplace to the original lists // offsets. // If the original list offsets is { 0, 0, 5, 5, 6, 6 } (there are 2 empty lists), // and new_offsets = { 0, 4, 6 }, then output = { 0, 1, 1, 2, 2, 3}. auto const iter_trans_begin = cudf::detail::make_counting_transform_iterator( 0, [offsets = original_offsets.begin<offset_type>()] __device__(auto i) { return (i > 0 && offsets[i] == offsets[i - 1]) ? 1 : 0; }); thrust::inclusive_scan(rmm::exec_policy(stream), iter_trans_begin, iter_trans_begin + original_offsets.size(), original_offsets.begin<offset_type>()); // Generate the final list offsets. // If the original list offsets are { 0, 0, 5, 5, 6, 6 }, the new offsets are { 0, 4, 6 }, // and the prefix sums of empty lists are { 0, 1, 1, 2, 2, 3 }, // then output = { 0, 0, 4, 4, 5, 5 }. thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(original_offsets.size()), original_offsets.begin<offset_type>(), [prefix_sum_empty_lists = original_offsets.begin<offset_type>(), offsets = new_offsets->view().begin<offset_type>()] __device__(auto i) { return offsets[i - prefix_sum_empty_lists[i]]; }); } } // anonymous namespace /** * @copydoc cudf::lists::drop_list_duplicates * * @param stream CUDA stream used for device memory operations and kernel launches. */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (lists_column.is_empty()) return cudf::empty_like(lists_column.parent()); if (auto const child_type = lists_column.child().type(); cudf::is_nested(child_type) && child_type.id() != type_id::STRUCT) { CUDF_FAIL("Nested types other than STRUCT are not supported in `drop_list_duplicates`."); } // Flatten all entries (depth = 1) of the lists column. auto const lists_entries = lists_column.get_sliced_child(stream); // sorted_lists will store the results of the original lists after calling segmented_sort. auto const sorted_lists = [&]() { // If nans_equal == ALL_EQUAL and the column contains lists of floating-point data type, // we need to replace -NaN by NaN before sorting. auto const replace_negative_nan = nans_equal == nan_equality::ALL_EQUAL && type_dispatcher( lists_entries.type(), detail::has_negative_nans_dispatch{}, lists_entries, stream); if (replace_negative_nan) { auto const new_lists_column = detail::replace_negative_nans_entries(lists_entries, lists_column, stream); return detail::sort_lists( lists_column_view(new_lists_column->view()), order::ASCENDING, null_order::AFTER, stream); } else { return detail::sort_lists(lists_column, order::ASCENDING, null_order::AFTER, stream); } }(); auto const sorted_lists_entries = lists_column_view(sorted_lists->view()).get_sliced_child(stream); // Generate a 0-based offset column. auto lists_offsets = detail::generate_clean_offsets(lists_column, stream, mr); // Generate a mapping from list entries to offsets of the lists containing those entries. auto const entries_list_offsets = detail::generate_entry_list_offsets(sorted_lists_entries.size(), lists_offsets->view(), stream); // Copy non-duplicated entries (along with their list offsets) to new arrays. auto unique_entries_and_list_offsets = detail::get_unique_entries_and_list_offsets( sorted_lists_entries, entries_list_offsets->view(), nulls_equal, nans_equal, stream, mr); // Generate offsets for the new lists column. detail::generate_offsets(unique_entries_and_list_offsets.front()->size(), unique_entries_and_list_offsets.back()->view(), lists_offsets->mutable_view(), stream); // Construct a new lists column without duplicated entries. // Reuse the null_count and bitmask of the lists_column: those are the null information for // the list elements (rows). // For the entries of those lists (rows), their null_count and bitmask were generated separately // during the step `get_unique_entries_and_list_offsets` above. return make_lists_column(lists_column.size(), std::move(lists_offsets), std::move(unique_entries_and_list_offsets.front()), lists_column.null_count(), cudf::detail::copy_bitmask(lists_column.parent(), stream, mr)); } } // namespace detail /** * @copydoc cudf::lists::drop_list_duplicates */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::drop_list_duplicates( lists_column, nulls_equal, nans_equal, rmm::cuda_stream_default, mr); } } // namespace lists } // namespace cudf
f9047dec3df0d4be18ff9df4d89bd305e15172d8.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_factories.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/structs/utilities.hpp> #include <cudf/lists/detail/sorting.hpp> #include <cudf/lists/drop_list_duplicates.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/table/table_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/transform.h> namespace cudf { namespace lists { namespace detail { namespace { template <typename Type> struct has_negative_nans_fn { column_device_view const d_entries; bool const has_nulls; has_negative_nans_fn(column_device_view const d_entries, bool const has_nulls) : d_entries(d_entries), has_nulls(has_nulls) { } __device__ Type operator()(size_type idx) const noexcept { if (has_nulls && d_entries.is_null_nocheck(idx)) { return false; } auto const val = d_entries.element<Type>(idx); return std::isnan(val) && std::signbit(val); // std::signbit(x) == true if x is negative } }; /** * @brief A structure to be used along with type_dispatcher to check if a column has any * negative NaN value. * * This functor is used to check for replacing negative NaN if there exists one. It is neccessary * because when calling to `lists::detail::sort_lists`, the negative NaN and positive NaN values (if * both exist) are separated to the two ends of the output column. This is due to the API * `lists::detail::sort_lists` internally calls `cub::DeviceSegmentedRadixSort`, which performs * sorting by comparing bits of the input numbers. Since negative and positive NaN have * different bits representation, they may not be moved to be close to each other after sorted. */ struct has_negative_nans_dispatch { template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto const d_entries = column_device_view::create(lists_entries, stream); return thrust::count_if( rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.size()), detail::has_negative_nans_fn<Type>{*d_entries, lists_entries.has_nulls()}); } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const { // Recursively check negative NaN on the children columns. return std::any_of( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::has_negative_nans_dispatch{}, col, stream); }); } template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> bool operator()(column_view const&, rmm::cuda_stream_view) const { // Columns of non floating-point data will never contain NaN. return false; } }; template <typename Type> struct replace_negative_nans_fn { __device__ Type operator()(Type val) const noexcept { return std::isnan(val) ? std::numeric_limits<Type>::quiet_NaN() : val; } }; /** * @brief A structure to be used along with type_dispatcher to replace -NaN by NaN for all rows * in a floating-point data column. */ struct replace_negative_nans_dispatch { template <typename Type, std::enable_if_t<!cuda::std::is_floating_point_v<Type> && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view) const noexcept { // For non floating point type and non struct, just return a copy of the input. return std::make_unique<column>(lists_entries); } template <typename Type, std::enable_if_t<cuda::std::is_floating_point_v<Type>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { auto new_entries = cudf::detail::allocate_like( lists_entries, lists_entries.size(), cudf::mask_allocation_policy::NEVER, stream); new_entries->set_null_mask(cudf::detail::copy_bitmask(lists_entries, stream), lists_entries.null_count()); // Replace all negative NaN values. thrust::transform(rmm::exec_policy(stream), lists_entries.template begin<Type>(), lists_entries.template end<Type>(), new_entries->mutable_view().template begin<Type>(), detail::replace_negative_nans_fn<Type>{}); return new_entries; } template <typename Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> std::unique_ptr<column> operator()(column_view const& lists_entries, rmm::cuda_stream_view stream) const noexcept { std::vector<std::unique_ptr<cudf::column>> output_struct_members; std::transform( thrust::make_counting_iterator(0), thrust::make_counting_iterator(lists_entries.num_children()), std::back_inserter(output_struct_members), [structs_view = structs_column_view{lists_entries}, stream](auto const child_idx) { auto const col = structs_view.get_sliced_child(child_idx); return type_dispatcher(col.type(), detail::replace_negative_nans_dispatch{}, col, stream); }); return cudf::make_structs_column(lists_entries.size(), std::move(output_struct_members), lists_entries.null_count(), cudf::detail::copy_bitmask(lists_entries, stream), stream); } }; /** * @brief Generate a 0-based offset column for a lists column. * * Given a lists_column_view, which may have a non-zero offset, generate a new column containing * 0-based list offsets. This is done by subtracting each of the input list offset by the first * offset. * * @code{.pseudo} * Given a list column having offsets = { 3, 7, 9, 13 }, * then output_offsets = { 0, 4, 6, 10 } * @endcode * * @param lists_column The input lists column. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing 0-based list offsets. */ std::unique_ptr<column> generate_clean_offsets(lists_column_view const& lists_column, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto output_offsets = make_numeric_column(data_type{type_to_id<offset_type>()}, lists_column.size() + 1, mask_state::UNALLOCATED, stream, mr); thrust::transform( rmm::exec_policy(stream), lists_column.offsets_begin(), lists_column.offsets_end(), output_offsets->mutable_view().begin<offset_type>(), [first = lists_column.offsets_begin()] __device__(auto offset) { return offset - *first; }); return output_offsets; } /** * @brief Transform a given lists column to a new lists column in which all the list entries holding * -NaN value are replaced by (positive) NaN. * * Replacing -NaN by NaN is necessary before sorting (individual) lists because the sorting API is * using radix sort, which compares bits of the number thus it may separate -NaN by NaN to the two * ends of the result column. */ std::unique_ptr<column> replace_negative_nans_entries(column_view const& lists_entries, lists_column_view const& lists_column, rmm::cuda_stream_view stream) { // We need to copy the offsets column of the input lists_column. Since the input lists_column may // be sliced, we need to generate clean offsets (i.e., offsets starting from zero). auto new_offsets = generate_clean_offsets(lists_column, stream, rmm::mr::get_current_device_resource()); auto new_entries = type_dispatcher( lists_entries.type(), detail::replace_negative_nans_dispatch{}, lists_entries, stream); return make_lists_column( lists_column.size(), std::move(new_offsets), std::move(new_entries), lists_column.null_count(), cudf::detail::copy_bitmask( lists_column.parent(), stream, rmm::mr::get_current_device_resource())); } /** * @brief Populate list offsets for all list entries. * * Given an `offsets` column_view containing offsets of a lists column and a number of all list * entries in the column, generate an array that maps from each list entry to the offset of the list * containing that entry. * * @code{.pseudo} * num_entries = 10, offsets = { 0, 4, 6, 10 } * output = { 1, 1, 1, 1, 2, 2, 3, 3, 3, 3 } * @endcode * * @param num_entries The number of list entries. * @param offsets Column view to the list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A column containing entry list offsets. */ std::unique_ptr<column> generate_entry_list_offsets(size_type num_entries, column_view const& offsets, rmm::cuda_stream_view stream) { auto entry_list_offsets = make_numeric_column(offsets.type(), num_entries, mask_state::UNALLOCATED, stream, rmm::mr::get_current_device_resource()); thrust::upper_bound(rmm::exec_policy(stream), offsets.begin<offset_type>(), offsets.end<offset_type>(), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries), entry_list_offsets->mutable_view().begin<offset_type>()); return entry_list_offsets; } /** * @brief Performs an equality comparison between two entries in a lists column. * * For the two elements that are NOT in the same list in the lists column, they will always be * considered as different. If they are from the same list and their type is not floating point, * this functor will return the same comparison result as `cudf::element_equality_comparator`. * * For floating-point types, entries holding NaN value can be considered as different values or the * same value depending on the `nans_equal` parameter. * * @tparam Type The data type of entries * @tparam nans_equal Flag to specify whether NaN entries should be considered as equal value (only * applicable for floating-point data column) */ template <class Type> struct column_row_comparator_fn { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __host__ __device__ column_row_comparator_fn(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <typename T, std::enable_if_t<!cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { return lhs_val == rhs_val; } template <typename T, std::enable_if_t<cuda::std::is_floating_point_v<T>>* = nullptr> bool __device__ compare(T const& lhs_val, T const& rhs_val) const noexcept { // If both element(i) and element(j) are NaNs and nans are considered as equal value then this // comparison will return `true`. This is the desired behavior in Pandas. if (nans_equal && std::isnan(lhs_val) && std::isnan(rhs_val)) { return true; } // If nans are considered as NOT equal, even both element(i) and element(j) are NaNs this // comparison will still return `false`. This is the desired behavior in Apache Spark. return lhs_val == rhs_val; } bool __device__ operator()(size_type i, size_type j) const noexcept { // Two entries are not considered for equality if they belong to different lists. if (list_offsets[i] != list_offsets[j]) { return false; } if (has_nulls) { bool const lhs_is_null{lhs.nullable() && lhs.is_null_nocheck(i)}; bool const rhs_is_null{rhs.nullable() && rhs.is_null_nocheck(j)}; if (lhs_is_null && rhs_is_null) { return nulls_equal == null_equality::EQUAL; } else if (lhs_is_null != rhs_is_null) { return false; } } return compare<Type>(lhs.element<Type>(i), lhs.element<Type>(j)); } }; /** * @brief Struct used in type_dispatcher for comparing two entries in a lists column. */ struct column_row_comparator_dispatch { offset_type const* const list_offsets; column_device_view const lhs; column_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; __device__ column_row_comparator_dispatch(offset_type const* const list_offsets, column_device_view const& lhs, column_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool __device__ operator()(size_type i, size_type j) const noexcept { return column_row_comparator_fn<Type>{ list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}(i, j); } template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>()>* = nullptr> bool operator()(size_type, size_type) const { CUDF_FAIL( "column_row_comparator_dispatch cannot operate on types that are not equally comparable."); } }; /** * @brief Performs an equality comparison between rows of two tables using `column_row_comparator` * to compare rows of their corresponding columns. */ struct table_row_comparator_fn { offset_type const* const list_offsets; table_device_view const lhs; table_device_view const rhs; null_equality const nulls_equal; bool const has_nulls; bool const nans_equal; table_row_comparator_fn(offset_type const* const list_offsets, table_device_view const& lhs, table_device_view const& rhs, null_equality const nulls_equal, bool const has_nulls, bool const nans_equal) : list_offsets(list_offsets), lhs(lhs), rhs(rhs), nulls_equal(nulls_equal), has_nulls(has_nulls), nans_equal(nans_equal) { } bool __device__ operator()(size_type i, size_type j) const noexcept { auto column_comp = [=](column_device_view const& lhs, column_device_view const& rhs) { return type_dispatcher( lhs.type(), column_row_comparator_dispatch{list_offsets, lhs, rhs, nulls_equal, has_nulls, nans_equal}, i, j); }; return thrust::equal(thrust::seq, lhs.begin(), lhs.end(), rhs.begin(), column_comp); } }; /** * @brief Struct used in type_dispatcher for copying indices of the list entries ignoring * duplicates. */ struct get_unique_entries_dispatch { template <class Type, std::enable_if_t<!cudf::is_equality_comparable<Type, Type>() && !std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const*, column_view const&, size_type, offset_type*, null_equality, nan_equality, bool, rmm::cuda_stream_view) const { CUDF_FAIL( "`get_unique_entries_dispatch` cannot operate on types that are not equally comparable."); } template <class Type, std::enable_if_t<cudf::is_equality_comparable<Type, Type>()>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const d_view = column_device_view::create(all_lists_entries, stream); auto const comp = column_row_comparator_fn<Type>{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } template <class Type, std::enable_if_t<std::is_same_v<Type, cudf::struct_view>>* = nullptr> offset_type* operator()(offset_type const* list_offsets, column_view const& all_lists_entries, size_type num_entries, offset_type* output_begin, null_equality nulls_equal, nan_equality nans_equal, bool has_nulls, rmm::cuda_stream_view stream) const noexcept { auto const entries_tview = table_view{{all_lists_entries}}; auto const flatten_nullability = has_nested_nulls(entries_tview) ? structs::detail::column_nullability::FORCE : structs::detail::column_nullability::MATCH_INCOMING; auto const entries_flattened = cudf::structs::detail::flatten_nested_columns( entries_tview, {order::ASCENDING}, {null_order::AFTER}, flatten_nullability); auto const d_view = table_device_view::create(entries_flattened, stream); auto const comp = table_row_comparator_fn{list_offsets, *d_view, *d_view, nulls_equal, has_nulls, nans_equal == nan_equality::ALL_EQUAL}; return thrust::unique_copy(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(num_entries), output_begin, comp); } }; /** * @brief Copy list entries and entry list offsets ignoring duplicates. * * Given an array of all entries flattened from a list column and an array that maps each entry to * the offset of the list containing that entry, those entries and list offsets are copied into * new arrays such that the duplicated entries within each list will be ignored. * * @param all_lists_entries The input array containing all list entries. * @param entries_list_offsets A map from list entries to their corresponding list offsets. * @param nulls_equal Flag to specify whether null entries should be considered equal. * @param nans_equal Flag to specify whether NaN entries should be considered equal * (only applicable for floating-point data column). * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. * @return A pair of columns, the first one contains unique list entries and the second one * contains their corresponding list offsets. */ std::vector<std::unique_ptr<column>> get_unique_entries_and_list_offsets( column_view const& all_lists_entries, column_view const& entries_list_offsets, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const num_entries = all_lists_entries.size(); // Allocate memory to store the indices of the unique entries. auto unique_indices = rmm::device_uvector<offset_type>(num_entries, stream); auto const output_begin = unique_indices.begin(); auto const output_end = type_dispatcher(all_lists_entries.type(), get_unique_entries_dispatch{}, entries_list_offsets.begin<offset_type>(), all_lists_entries, num_entries, output_begin, nulls_equal, nans_equal, all_lists_entries.has_nulls(), stream); auto gather_map = column_view(data_type{type_to_id<offset_type>()}, static_cast<size_type>(thrust::distance(output_begin, output_end)), unique_indices.data()); // Collect unique entries and entry list offsets. // The new null_count and bitmask of the unique entries will also be generated // by the gather function. return cudf::detail::gather(table_view{{all_lists_entries, entries_list_offsets}}, gather_map, cudf::out_of_bounds_policy::DONT_CHECK, cudf::detail::negative_index_policy::NOT_ALLOWED, stream, mr) ->release(); } /** * @brief Generate list offsets from entry offsets. * * Generate an array of list offsets for the final result lists column. The list offsets of the * original lists column are also taken into account to make sure the result lists column will have * the same empty list rows (if any) as in the original lists column. * * @param num_entries The number of unique entries after removing duplicates. * @param entries_list_offsets The mapping from list entries to their list offsets. * @param original_offsets The list offsets of the original lists column, which will also be used to * store the new list offsets. * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device resource used to allocate memory. */ void generate_offsets(size_type num_entries, column_view const& entries_list_offsets, mutable_column_view const& original_offsets, rmm::cuda_stream_view stream) { // Firstly, generate temporary list offsets for the unique entries, ignoring empty lists (if any). // If entries_list_offsets = {1, 1, 1, 1, 2, 3, 3, 3, 4, 4 }, num_entries = 10, // then new_offsets = { 0, 4, 5, 8, 10 }. auto const new_offsets = allocate_like( original_offsets, mask_allocation_policy::NEVER, rmm::mr::get_current_device_resource()); thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(num_entries + 1), new_offsets->mutable_view().begin<offset_type>(), [num_entries, offsets_ptr = entries_list_offsets.begin<offset_type>()] __device__( auto i) -> bool { return i == 0 || i == num_entries || offsets_ptr[i] != offsets_ptr[i - 1]; }); // Generate a prefix sum of number of empty lists, storing inplace to the original lists // offsets. // If the original list offsets is { 0, 0, 5, 5, 6, 6 } (there are 2 empty lists), // and new_offsets = { 0, 4, 6 }, then output = { 0, 1, 1, 2, 2, 3}. auto const iter_trans_begin = cudf::detail::make_counting_transform_iterator( 0, [offsets = original_offsets.begin<offset_type>()] __device__(auto i) { return (i > 0 && offsets[i] == offsets[i - 1]) ? 1 : 0; }); thrust::inclusive_scan(rmm::exec_policy(stream), iter_trans_begin, iter_trans_begin + original_offsets.size(), original_offsets.begin<offset_type>()); // Generate the final list offsets. // If the original list offsets are { 0, 0, 5, 5, 6, 6 }, the new offsets are { 0, 4, 6 }, // and the prefix sums of empty lists are { 0, 1, 1, 2, 2, 3 }, // then output = { 0, 0, 4, 4, 5, 5 }. thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<offset_type>(0), thrust::make_counting_iterator<offset_type>(original_offsets.size()), original_offsets.begin<offset_type>(), [prefix_sum_empty_lists = original_offsets.begin<offset_type>(), offsets = new_offsets->view().begin<offset_type>()] __device__(auto i) { return offsets[i - prefix_sum_empty_lists[i]]; }); } } // anonymous namespace /** * @copydoc cudf::lists::drop_list_duplicates * * @param stream CUDA stream used for device memory operations and kernel launches. */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (lists_column.is_empty()) return cudf::empty_like(lists_column.parent()); if (auto const child_type = lists_column.child().type(); cudf::is_nested(child_type) && child_type.id() != type_id::STRUCT) { CUDF_FAIL("Nested types other than STRUCT are not supported in `drop_list_duplicates`."); } // Flatten all entries (depth = 1) of the lists column. auto const lists_entries = lists_column.get_sliced_child(stream); // sorted_lists will store the results of the original lists after calling segmented_sort. auto const sorted_lists = [&]() { // If nans_equal == ALL_EQUAL and the column contains lists of floating-point data type, // we need to replace -NaN by NaN before sorting. auto const replace_negative_nan = nans_equal == nan_equality::ALL_EQUAL && type_dispatcher( lists_entries.type(), detail::has_negative_nans_dispatch{}, lists_entries, stream); if (replace_negative_nan) { auto const new_lists_column = detail::replace_negative_nans_entries(lists_entries, lists_column, stream); return detail::sort_lists( lists_column_view(new_lists_column->view()), order::ASCENDING, null_order::AFTER, stream); } else { return detail::sort_lists(lists_column, order::ASCENDING, null_order::AFTER, stream); } }(); auto const sorted_lists_entries = lists_column_view(sorted_lists->view()).get_sliced_child(stream); // Generate a 0-based offset column. auto lists_offsets = detail::generate_clean_offsets(lists_column, stream, mr); // Generate a mapping from list entries to offsets of the lists containing those entries. auto const entries_list_offsets = detail::generate_entry_list_offsets(sorted_lists_entries.size(), lists_offsets->view(), stream); // Copy non-duplicated entries (along with their list offsets) to new arrays. auto unique_entries_and_list_offsets = detail::get_unique_entries_and_list_offsets( sorted_lists_entries, entries_list_offsets->view(), nulls_equal, nans_equal, stream, mr); // Generate offsets for the new lists column. detail::generate_offsets(unique_entries_and_list_offsets.front()->size(), unique_entries_and_list_offsets.back()->view(), lists_offsets->mutable_view(), stream); // Construct a new lists column without duplicated entries. // Reuse the null_count and bitmask of the lists_column: those are the null information for // the list elements (rows). // For the entries of those lists (rows), their null_count and bitmask were generated separately // during the step `get_unique_entries_and_list_offsets` above. return make_lists_column(lists_column.size(), std::move(lists_offsets), std::move(unique_entries_and_list_offsets.front()), lists_column.null_count(), cudf::detail::copy_bitmask(lists_column.parent(), stream, mr)); } } // namespace detail /** * @copydoc cudf::lists::drop_list_duplicates */ std::unique_ptr<column> drop_list_duplicates(lists_column_view const& lists_column, null_equality nulls_equal, nan_equality nans_equal, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::drop_list_duplicates( lists_column, nulls_equal, nans_equal, rmm::cuda_stream_default, mr); } } // namespace lists } // namespace cudf
3941acd8c6e74588b58433abcde356e060705ed8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* file : parallel_search.cu * author : Tiane Zhu * date : Mar 23, 2017 * * this program is an implementation of the parallel search algorithm * ALGORITHM 4.1 in * "An Introduction to Parallel Algorithms" - by Joseph Jaja * p146 - ISBN 9-789201-548563 */ #include "parallel_search.h" __global__ void search_main(number * X, int n, number target, int num_threads, ull * dev_ret) { __shared__ ull record; int tid = threadIdx.x + blockIdx.x * blockDim.x; for(int i=0; i<ITER; i++){ if(tid == 0){ //printf("%d\n", i); *((int *) dev_ret) = 0; *(((int *) dev_ret) + 1) = n+1; } search(X, n, target, num_threads, dev_ret, &record); } } __device__ void search(number * X, int n, number target, int num_threads, ull * dev_ret, ull * record){ int l, r, *ptr; ull *ptr_u; ptr_u = record; ptr = (int *) ptr_u; if(threadIdx.x == 0){ if(target > X[n-1]){ *ptr = n; *(ptr+1) = n; if(blockIdx.x == 0) atomicExch(dev_ret, *record); }else{ *record = atomicCAS(dev_ret, (ull) -2L, 0); //record = *dev_ret; } } __syncthreads(); l = *ptr; r = *(ptr+1); //printf("%llx %u %d %d\n", (ull) -2L, threadIdx.x, l, r); int block_n, start, s, idx; while(r - l > 1){ /* if(threadIdx.x == 0 && blockIdx.x == 0){ printf("%llx\n", record); printf("%d %d\n", l, r); }*/ block_n = (r - l) / gridDim.x; s = block_n / blockDim.x; s = s > 0 ? s : 1; start = l + (blockIdx.x * block_n); /* if(threadIdx.x == 0) printf("blockIdx.x : %d; block_n : %d; s : %d; start : %d\n", blockIdx.x, block_n, s, start); */ idx = start + threadIdx.x * s; if(idx < r){ //printf("threadIdx.x : %u\nblock_n : %d\ns : %d\nstart : %d\nidx : %d\n", threadIdx.x, block_n, s, start, idx); if(X[idx] <= target && X[idx + s] >= target){ *ptr = idx; *(ptr+1) = idx+s; atomicExch(dev_ret, *ptr_u); //printf("threadIdx : %d from block %d : %d %d\n", threadIdx.x, blockIdx.x, *ptr, *(ptr+1)); } /* if((threadIdx.x == blockDim.x - 1)){ //if(threadIdx.x + blockIdx.x * blockDim.x == num_threads - 1) if(X[idx+s] <= target){ *ptr = idx+s; if(atomicCAS(dev_ret, (ull) -2L, 0) == record){ atomicExch(dev_ret, *ptr_u); printf("threadIdx : %d from block %d : %d %d\n", threadIdx.x, blockIdx.x, *ptr, *(ptr+1)); } } } */ } if(threadIdx.x == 0){ *record = atomicCAS(dev_ret, (ull) -2L, 0); //record = *dev_ret; } __syncthreads(); l = *ptr; r = *(ptr+1); } /* if(threadIdx.x == 0 && blockIdx.x == 0){ printf("%llx\n", record); printf("%d %d %d\n", l, r, r - l); }*/ } // main int main(int argc, char * argv[]) { setbuf(stdout, NULL); _init(argc, argv); if(verbose) printf("finding target : %d in array of length %d\n", target, X_len); hipError_t err_code[10]; float gputime, cputime; int ret_idx_dev, ret_idx_host; ull ret_ull, * dev_ret; hipSetDevice(0); hipDeviceReset(); unsigned int num_blocks = (1023 + num_threads) / 1024; unsigned int threads_per_block = num_threads > 1024 ? 1024 : num_threads; // X_len + 2 for the algorithm element at idx 0 and n + 1 (originally 1, 2, ..., n) err_code[0] = hipMalloc( &dev_X , X_size ); err_code[1] = hipMalloc( &dev_ret , sizeof(ull)); for(int i=0; i<2; i++){ gerror(err_code[i]); } int _dev_ret[2]; _dev_ret[0] = 0; _dev_ret[1] = X_len + 1; gerror(hipMemcpy(dev_ret, _dev_ret, sizeof(ull), hipMemcpyHostToDevice)); gerror(hipMemcpy(dev_X, host_X, X_size, hipMemcpyHostToDevice)); hipDeviceSynchronize(); ret_idx_dev = 10086; //printf("launching %u blocks, %u threads per block.\n", num_blocks, threads_per_block); d->Dg = {num_blocks, 1, 1}; d->Db = {threads_per_block, 1, 1}; d->Ns = sizeof(ull); //printf("Ns : %lu\n", d->Ns); gstart(); hipLaunchKernelGGL(( search_main), dim3(d->Dg), dim3(d->Db), d->Ns, 0, dev_X, X_len, target, num_threads, dev_ret); gend(&gputime); //printf("gputime : %f ms\n", gputime); gerror( hipGetLastError() ); gerror( hipDeviceSynchronize() ); gerror(hipMemcpy(&ret_ull, dev_ret, sizeof(ull), hipMemcpyDeviceToHost)); ret_idx_dev = *((int *) &ret_ull); //printf("%llx\n", ret_ull); //printf("%d %d\n", *((int *) &ret_ull), *(((int *) &ret_ull)+1)); //printf("device idx = %d;\n", ret_idx_dev); ret_idx_host = 10086; cstart(); for(int i=0; i<ITER; i++){ ret_idx_host = cpu_search(host_X + 1, X_len, target); } cend(&cputime); //printf("cputime : %f ms\n", cputime); //printf("host idx = %d;\n", ret_idx_host); if(ret_idx_host - ret_idx_dev <= 1){ printf("N %f %f\n", gputime, cputime); }else{ printf("E %d %d\n", ret_idx_dev, ret_idx_host); } gerror(hipFree(dev_X)); gerror(hipFree(dev_ret)); free(host_X); } char fname[80]; void _init(int argc, char ** argv) { X_len = DEFAULT_ARRAY_LEN; num_threads = DEFAULT_NUM_THREADS; target = DEFAULT_TARGET; fname[0] = 0; int len_spec = 0; for(int i=1; i<argc; i++){ switch(*argv[i]){ case '-': switch(argv[i][1]){ case 'v': verbose = 1; break; case 'f': if(!len_spec){ strcpy(fname, argv[++i]); len_spec = 1; } break; case 't': sscanf(argv[++i], "%d", &num_threads); break; case 'l': if(!len_spec){ sscanf(argv[++i], "%d", &X_len); len_spec = 1; } break; default: sscanf(argv[i], "%d", &target); } break; default: sscanf(argv[i], FMT, &target); } } X_size = (X_len + 2) * sizeof(number); _init_array(fname[0] != 0); prep_kernel(); } void _init_array(int with_file) { host_X = (number *) malloc(X_size); host_X[0] = INT_MIN; host_X[X_len+1] = INT_MAX; //not use file if(!with_file){ for(number i=1; i<X_len+1; i++){ host_X[i] = 2 * i; } return; } //use file FILE * fp; printf("array file : \"%s\"", fname); if(!(fp = fopen(fname, "r"))){ printf(" does not exist.\n"); exit(1); } if(fscanf(fp, "%d", &X_len) < 1){ printf(" stats broken.\n"); exit(1); } printf("\n"); for(int i=0; i<X_len; i++){ if(fscanf(fp, FMT, host_X + i) != 1){ printf(" missing the %dth number.\n", i); exit(1); } if(verbose) printf(FMT, host_X[i]); } if(verbose) printf("\n"); }
3941acd8c6e74588b58433abcde356e060705ed8.cu
/* file : parallel_search.cu * author : Tiane Zhu * date : Mar 23, 2017 * * this program is an implementation of the parallel search algorithm * ALGORITHM 4.1 in * "An Introduction to Parallel Algorithms" - by Joseph Jaja * p146 - ISBN 9-789201-548563 */ #include "parallel_search.h" __global__ void search_main(number * X, int n, number target, int num_threads, ull * dev_ret) { __shared__ ull record; int tid = threadIdx.x + blockIdx.x * blockDim.x; for(int i=0; i<ITER; i++){ if(tid == 0){ //printf("%d\n", i); *((int *) dev_ret) = 0; *(((int *) dev_ret) + 1) = n+1; } search(X, n, target, num_threads, dev_ret, &record); } } __device__ void search(number * X, int n, number target, int num_threads, ull * dev_ret, ull * record){ int l, r, *ptr; ull *ptr_u; ptr_u = record; ptr = (int *) ptr_u; if(threadIdx.x == 0){ if(target > X[n-1]){ *ptr = n; *(ptr+1) = n; if(blockIdx.x == 0) atomicExch(dev_ret, *record); }else{ *record = atomicCAS(dev_ret, (ull) -2L, 0); //record = *dev_ret; } } __syncthreads(); l = *ptr; r = *(ptr+1); //printf("%llx %u %d %d\n", (ull) -2L, threadIdx.x, l, r); int block_n, start, s, idx; while(r - l > 1){ /* if(threadIdx.x == 0 && blockIdx.x == 0){ printf("%llx\n", record); printf("%d %d\n", l, r); }*/ block_n = (r - l) / gridDim.x; s = block_n / blockDim.x; s = s > 0 ? s : 1; start = l + (blockIdx.x * block_n); /* if(threadIdx.x == 0) printf("blockIdx.x : %d; block_n : %d; s : %d; start : %d\n", blockIdx.x, block_n, s, start); */ idx = start + threadIdx.x * s; if(idx < r){ //printf("threadIdx.x : %u\nblock_n : %d\ns : %d\nstart : %d\nidx : %d\n", threadIdx.x, block_n, s, start, idx); if(X[idx] <= target && X[idx + s] >= target){ *ptr = idx; *(ptr+1) = idx+s; atomicExch(dev_ret, *ptr_u); //printf("threadIdx : %d from block %d : %d %d\n", threadIdx.x, blockIdx.x, *ptr, *(ptr+1)); } /* if((threadIdx.x == blockDim.x - 1)){ //if(threadIdx.x + blockIdx.x * blockDim.x == num_threads - 1) if(X[idx+s] <= target){ *ptr = idx+s; if(atomicCAS(dev_ret, (ull) -2L, 0) == record){ atomicExch(dev_ret, *ptr_u); printf("threadIdx : %d from block %d : %d %d\n", threadIdx.x, blockIdx.x, *ptr, *(ptr+1)); } } } */ } if(threadIdx.x == 0){ *record = atomicCAS(dev_ret, (ull) -2L, 0); //record = *dev_ret; } __syncthreads(); l = *ptr; r = *(ptr+1); } /* if(threadIdx.x == 0 && blockIdx.x == 0){ printf("%llx\n", record); printf("%d %d %d\n", l, r, r - l); }*/ } // main int main(int argc, char * argv[]) { setbuf(stdout, NULL); _init(argc, argv); if(verbose) printf("finding target : %d in array of length %d\n", target, X_len); cudaError_t err_code[10]; float gputime, cputime; int ret_idx_dev, ret_idx_host; ull ret_ull, * dev_ret; cudaSetDevice(0); cudaDeviceReset(); unsigned int num_blocks = (1023 + num_threads) / 1024; unsigned int threads_per_block = num_threads > 1024 ? 1024 : num_threads; // X_len + 2 for the algorithm element at idx 0 and n + 1 (originally 1, 2, ..., n) err_code[0] = cudaMalloc( &dev_X , X_size ); err_code[1] = cudaMalloc( &dev_ret , sizeof(ull)); for(int i=0; i<2; i++){ gerror(err_code[i]); } int _dev_ret[2]; _dev_ret[0] = 0; _dev_ret[1] = X_len + 1; gerror(cudaMemcpy(dev_ret, _dev_ret, sizeof(ull), cudaMemcpyHostToDevice)); gerror(cudaMemcpy(dev_X, host_X, X_size, cudaMemcpyHostToDevice)); cudaDeviceSynchronize(); ret_idx_dev = 10086; //printf("launching %u blocks, %u threads per block.\n", num_blocks, threads_per_block); d->Dg = {num_blocks, 1, 1}; d->Db = {threads_per_block, 1, 1}; d->Ns = sizeof(ull); //printf("Ns : %lu\n", d->Ns); gstart(); search_main<<<d->Dg, d->Db, d->Ns>>>(dev_X, X_len, target, num_threads, dev_ret); gend(&gputime); //printf("gputime : %f ms\n", gputime); gerror( cudaGetLastError() ); gerror( cudaDeviceSynchronize() ); gerror(cudaMemcpy(&ret_ull, dev_ret, sizeof(ull), cudaMemcpyDeviceToHost)); ret_idx_dev = *((int *) &ret_ull); //printf("%llx\n", ret_ull); //printf("%d %d\n", *((int *) &ret_ull), *(((int *) &ret_ull)+1)); //printf("device idx = %d;\n", ret_idx_dev); ret_idx_host = 10086; cstart(); for(int i=0; i<ITER; i++){ ret_idx_host = cpu_search(host_X + 1, X_len, target); } cend(&cputime); //printf("cputime : %f ms\n", cputime); //printf("host idx = %d;\n", ret_idx_host); if(ret_idx_host - ret_idx_dev <= 1){ printf("N %f %f\n", gputime, cputime); }else{ printf("E %d %d\n", ret_idx_dev, ret_idx_host); } gerror(cudaFree(dev_X)); gerror(cudaFree(dev_ret)); free(host_X); } char fname[80]; void _init(int argc, char ** argv) { X_len = DEFAULT_ARRAY_LEN; num_threads = DEFAULT_NUM_THREADS; target = DEFAULT_TARGET; fname[0] = 0; int len_spec = 0; for(int i=1; i<argc; i++){ switch(*argv[i]){ case '-': switch(argv[i][1]){ case 'v': verbose = 1; break; case 'f': if(!len_spec){ strcpy(fname, argv[++i]); len_spec = 1; } break; case 't': sscanf(argv[++i], "%d", &num_threads); break; case 'l': if(!len_spec){ sscanf(argv[++i], "%d", &X_len); len_spec = 1; } break; default: sscanf(argv[i], "%d", &target); } break; default: sscanf(argv[i], FMT, &target); } } X_size = (X_len + 2) * sizeof(number); _init_array(fname[0] != 0); prep_kernel(); } void _init_array(int with_file) { host_X = (number *) malloc(X_size); host_X[0] = INT_MIN; host_X[X_len+1] = INT_MAX; //not use file if(!with_file){ for(number i=1; i<X_len+1; i++){ host_X[i] = 2 * i; } return; } //use file FILE * fp; printf("array file : \"%s\"", fname); if(!(fp = fopen(fname, "r"))){ printf(" does not exist.\n"); exit(1); } if(fscanf(fp, "%d", &X_len) < 1){ printf(" stats broken.\n"); exit(1); } printf("\n"); for(int i=0; i<X_len; i++){ if(fscanf(fp, FMT, host_X + i) != 1){ printf(" missing the %dth number.\n", i); exit(1); } if(verbose) printf(FMT, host_X[i]); } if(verbose) printf("\n"); }
d6665d295fe72f47319645752e71415168838e01.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include "cupoch/geometry/laserscanbuffer.h" #include "cupoch/geometry/boundingvolume.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/console.h" namespace cupoch { namespace geometry { namespace { std::pair<float, float> TangentMinMax(float min_angle, float max_angle) { float min_angle_tan = tan(min_angle); float max_angle_tan = tan(max_angle); // Correct sign of tan around singularity points if (min_angle_tan < 0.0) min_angle_tan = -min_angle_tan; if (max_angle_tan > 0.0) max_angle_tan = -max_angle_tan; return std::make_pair(min_angle_tan, max_angle_tan); } __device__ bool IsShadow(float r1, float r2, float included_angle, float min_angle_tan, float max_angle_tan) { const float perpendicular_y = r2 * sin(included_angle); const float perpendicular_x = r1 - r2 * cos(included_angle); const float perpendicular_tan = fabs(perpendicular_y) / perpendicular_x; if (perpendicular_tan > 0) { if (perpendicular_tan < min_angle_tan) return true; } else { if (perpendicular_tan > max_angle_tan) return true; } return false; } struct apply_scan_shadow_filter_functor { apply_scan_shadow_filter_functor(const float* ranges, float min_angle_tan, float max_angle_tan, float angle_increment, int num_steps, int window, int neighbors, bool remove_shadow_start_point, float* out) : ranges_(ranges), min_angle_tan_(min_angle_tan), max_angle_tan_(max_angle_tan), angle_increment_(angle_increment), num_steps_(num_steps), window_(window), neighbors_(neighbors), remove_shadow_start_point_(remove_shadow_start_point), out_(out) {}; const float* ranges_; const float min_angle_tan_; const float max_angle_tan_; const float angle_increment_; const int num_steps_; const int window_; const int neighbors_; const bool remove_shadow_start_point_; float* out_; __device__ void operator() (size_t idx) { int n = idx / num_steps_; int i = idx % num_steps_; for (int y = -window_; y < window_ + 1; y++) { int j = i + y; if (j < 0 || j >= num_steps_ || i == j) continue; if (IsShadow(ranges_[n * num_steps_ + i], ranges_[n * num_steps_ + j], y * angle_increment_, min_angle_tan_, max_angle_tan_)) { for (int index = max(i - neighbors_, 0); index <= min(i + neighbors_, num_steps_ - 1); index++) { if (ranges_[i] < ranges_[index]) { out_[n * num_steps_ + index] = std::numeric_limits<float>::quiet_NaN(); } } if (remove_shadow_start_point_) { out_[n * num_steps_ + i] = std::numeric_limits<float>::quiet_NaN(); } } } } }; } LaserScanBuffer::LaserScanBuffer(int num_steps, int num_max_scans, float min_angle, float max_angle) : GeometryBase3D(Geometry::GeometryType::LaserScanBuffer), num_steps_(num_steps), num_max_scans_(num_max_scans), min_angle_(min_angle), max_angle_(max_angle) {} LaserScanBuffer::~LaserScanBuffer() {}; LaserScanBuffer::LaserScanBuffer(const LaserScanBuffer& other) : GeometryBase3D(Geometry::GeometryType::LaserScanBuffer), ranges_(other.ranges_), intensities_(other.intensities_), top_(other.top_), bottom_(other.bottom_), num_steps_(other.num_steps_), num_max_scans_(other.num_max_scans_), min_angle_(other.min_angle_), max_angle_(other.max_angle_), origins_(other.origins_) {} thrust::host_vector<float> LaserScanBuffer::GetRanges() const { thrust::host_vector<float> ranges; if (top_ == bottom_) { return ranges; } int start = top_ % num_max_scans_; int end = bottom_ % num_max_scans_; if (start < end) { int n = end - start; ranges.resize(n * num_steps_); thrust::copy_n(ranges_.begin() + start * num_steps_, n * num_steps_, ranges.begin()); return ranges; } else { ranges.resize(num_max_scans_ * num_steps_); int offset = (num_max_scans_ - start) * num_steps_; thrust::copy_n(ranges_.begin() + start * num_steps_, offset, ranges.begin()); thrust::copy_n(ranges_.begin(), end * num_steps_, ranges.begin() + offset); return ranges; } } thrust::host_vector<float> LaserScanBuffer::GetIntensities() const { thrust::host_vector<float> intensities; if (top_ == bottom_) { return intensities; } int start = top_ % num_max_scans_; int end = bottom_ % num_max_scans_; if (start < end) { int n = start - end; intensities.resize(n * num_steps_); thrust::copy_n(intensities_.begin() + start * num_steps_, n * num_steps_, intensities.begin()); return intensities; } else { intensities.resize(num_max_scans_ * num_steps_); int offset = (num_max_scans_ - start) * num_steps_; thrust::copy_n(intensities_.begin() + start * num_steps_, offset, intensities.begin()); thrust::copy_n(intensities_.begin(), end * num_steps_, intensities.begin() + offset); return intensities; } } LaserScanBuffer &LaserScanBuffer::Clear() { top_ = 0; bottom_ = 0; ranges_.clear(); intensities_.clear(); return *this; } bool LaserScanBuffer::IsEmpty() const { return ranges_.empty(); } Eigen::Vector3f LaserScanBuffer::GetMinBound() const { utility::LogError("LaserScanBuffer::GetMinBound is not supported"); return Eigen::Vector3f::Zero(); } Eigen::Vector3f LaserScanBuffer::GetMaxBound() const { utility::LogError("LaserScanBuffer::GetMaxBound is not supported"); return Eigen::Vector3f::Zero(); } Eigen::Vector3f LaserScanBuffer::GetCenter() const { utility::LogError("LaserScanBuffer::GetCenter is not supported"); return Eigen::Vector3f::Zero(); } AxisAlignedBoundingBox LaserScanBuffer::GetAxisAlignedBoundingBox() const { utility::LogError("LaserScanBuffer::GetAxisAlignedBoundingBox is not supported"); return AxisAlignedBoundingBox(); } LaserScanBuffer &LaserScanBuffer::Transform(const Eigen::Matrix4f &transformation) { thrust::for_each(origins_.begin(), origins_.end(), [transformation] __device__ (Eigen::Matrix4f_u& trans) { trans = trans * transformation; }); return *this; } LaserScanBuffer &LaserScanBuffer::Translate(const Eigen::Vector3f &translation, bool relative) { thrust::for_each(origins_.begin(), origins_.end(), [translation] __device__ (Eigen::Matrix4f_u& trans) { trans.block<3, 1>(0, 3) = trans.block<3, 1>(0, 3) + translation; }); return *this; } LaserScanBuffer &LaserScanBuffer::Scale(const float scale, bool center) { thrust::for_each(ranges_.begin(), ranges_.end(), [scale] __device__ (float &r) { r *= scale; }); return *this; } LaserScanBuffer &LaserScanBuffer::Rotate(const Eigen::Matrix3f &R, bool center) { thrust::for_each(origins_.begin(), origins_.end(), [R] __device__ (Eigen::Matrix4f_u& trans) { trans.block<3, 3>(0, 0) = trans.block<3, 3>(0, 0) * R; }); return *this; } LaserScanBuffer &LaserScanBuffer::AddRanges(const utility::device_vector<float>& ranges, const Eigen::Matrix4f& transformation, const utility::device_vector<float>& intensities) { if (ranges.size() != num_steps_) { utility::LogError("[AddRanges] Invalid size of input ranges."); return *this; } if (HasIntensities() && ranges.size() != intensities.size()) { utility::LogError("[AddRanges] Invalid size of intensities."); return *this; } bool add_intensities = !intensities.empty() && ranges.size() == intensities.size(); int end = bottom_ % num_max_scans_; if (bottom_ + 1 <= num_max_scans_) { ranges_.insert(ranges_.end(), ranges.begin(), ranges.end()); if (add_intensities) intensities_.insert(intensities_.end(), intensities.begin(), intensities.end()); origins_.push_back(transformation); bottom_++; } else { thrust::copy_n(ranges.begin(), num_steps_, ranges_.begin() + end * num_steps_); if (add_intensities) thrust::copy_n(intensities.begin(), num_steps_, intensities_.begin() + end * num_steps_); origins_[end] = transformation; top_++; bottom_++; } return *this; } LaserScanBuffer &LaserScanBuffer::AddRanges(const utility::pinned_host_vector<float>& ranges, const Eigen::Matrix4f& transformation, const utility::pinned_host_vector<float>& intensities) { utility::device_vector<float> d_ranges(ranges.size()); cudaSafeCall(hipMemcpy(thrust::raw_pointer_cast(d_ranges.data()), ranges.data(), ranges.size() * sizeof(float), hipMemcpyHostToDevice)); utility::device_vector<float> d_intensities(intensities.size()); cudaSafeCall(hipMemcpy(thrust::raw_pointer_cast(d_intensities.data()), intensities.data(), intensities.size() * sizeof(float), hipMemcpyHostToDevice)); return AddRanges(d_ranges, transformation, d_intensities); } std::shared_ptr<LaserScanBuffer> LaserScanBuffer::RangeFilter(float min_range, float max_range) const { auto out = std::make_shared<LaserScanBuffer>(num_steps_, num_max_scans_, min_angle_, max_angle_); if (max_range <= min_range) { utility::LogError("[RangeFilter] Invalid parameter with min_range greater than max_range."); } out->ranges_.resize(ranges_.size()); out->top_ = top_; out->bottom_ = bottom_; thrust::transform(ranges_.begin(), ranges_.end(), out->ranges_.begin(), [min_range, max_range] __device__ (float r) { return (r < min_range || r > max_range) ? std::numeric_limits<float>::quiet_NaN() : r; }); return out; } std::shared_ptr<LaserScanBuffer> LaserScanBuffer::ScanShadowsFilter(float min_angle, float max_angle, int window, int neighbors, bool remove_shadow_start_point) const { auto out = std::make_shared<LaserScanBuffer>(*this); auto minmax_tan = TangentMinMax(min_angle, max_angle); apply_scan_shadow_filter_functor func(thrust::raw_pointer_cast(ranges_.data()), minmax_tan.first, minmax_tan.second, GetAngleIncrement(), num_steps_, window, neighbors, remove_shadow_start_point, thrust::raw_pointer_cast(out->ranges_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(ranges_.size()), func); return out; } } }
d6665d295fe72f47319645752e71415168838e01.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include "cupoch/geometry/laserscanbuffer.h" #include "cupoch/geometry/boundingvolume.h" #include "cupoch/utility/platform.h" #include "cupoch/utility/console.h" namespace cupoch { namespace geometry { namespace { std::pair<float, float> TangentMinMax(float min_angle, float max_angle) { float min_angle_tan = tan(min_angle); float max_angle_tan = tan(max_angle); // Correct sign of tan around singularity points if (min_angle_tan < 0.0) min_angle_tan = -min_angle_tan; if (max_angle_tan > 0.0) max_angle_tan = -max_angle_tan; return std::make_pair(min_angle_tan, max_angle_tan); } __device__ bool IsShadow(float r1, float r2, float included_angle, float min_angle_tan, float max_angle_tan) { const float perpendicular_y = r2 * sin(included_angle); const float perpendicular_x = r1 - r2 * cos(included_angle); const float perpendicular_tan = fabs(perpendicular_y) / perpendicular_x; if (perpendicular_tan > 0) { if (perpendicular_tan < min_angle_tan) return true; } else { if (perpendicular_tan > max_angle_tan) return true; } return false; } struct apply_scan_shadow_filter_functor { apply_scan_shadow_filter_functor(const float* ranges, float min_angle_tan, float max_angle_tan, float angle_increment, int num_steps, int window, int neighbors, bool remove_shadow_start_point, float* out) : ranges_(ranges), min_angle_tan_(min_angle_tan), max_angle_tan_(max_angle_tan), angle_increment_(angle_increment), num_steps_(num_steps), window_(window), neighbors_(neighbors), remove_shadow_start_point_(remove_shadow_start_point), out_(out) {}; const float* ranges_; const float min_angle_tan_; const float max_angle_tan_; const float angle_increment_; const int num_steps_; const int window_; const int neighbors_; const bool remove_shadow_start_point_; float* out_; __device__ void operator() (size_t idx) { int n = idx / num_steps_; int i = idx % num_steps_; for (int y = -window_; y < window_ + 1; y++) { int j = i + y; if (j < 0 || j >= num_steps_ || i == j) continue; if (IsShadow(ranges_[n * num_steps_ + i], ranges_[n * num_steps_ + j], y * angle_increment_, min_angle_tan_, max_angle_tan_)) { for (int index = max(i - neighbors_, 0); index <= min(i + neighbors_, num_steps_ - 1); index++) { if (ranges_[i] < ranges_[index]) { out_[n * num_steps_ + index] = std::numeric_limits<float>::quiet_NaN(); } } if (remove_shadow_start_point_) { out_[n * num_steps_ + i] = std::numeric_limits<float>::quiet_NaN(); } } } } }; } LaserScanBuffer::LaserScanBuffer(int num_steps, int num_max_scans, float min_angle, float max_angle) : GeometryBase3D(Geometry::GeometryType::LaserScanBuffer), num_steps_(num_steps), num_max_scans_(num_max_scans), min_angle_(min_angle), max_angle_(max_angle) {} LaserScanBuffer::~LaserScanBuffer() {}; LaserScanBuffer::LaserScanBuffer(const LaserScanBuffer& other) : GeometryBase3D(Geometry::GeometryType::LaserScanBuffer), ranges_(other.ranges_), intensities_(other.intensities_), top_(other.top_), bottom_(other.bottom_), num_steps_(other.num_steps_), num_max_scans_(other.num_max_scans_), min_angle_(other.min_angle_), max_angle_(other.max_angle_), origins_(other.origins_) {} thrust::host_vector<float> LaserScanBuffer::GetRanges() const { thrust::host_vector<float> ranges; if (top_ == bottom_) { return ranges; } int start = top_ % num_max_scans_; int end = bottom_ % num_max_scans_; if (start < end) { int n = end - start; ranges.resize(n * num_steps_); thrust::copy_n(ranges_.begin() + start * num_steps_, n * num_steps_, ranges.begin()); return ranges; } else { ranges.resize(num_max_scans_ * num_steps_); int offset = (num_max_scans_ - start) * num_steps_; thrust::copy_n(ranges_.begin() + start * num_steps_, offset, ranges.begin()); thrust::copy_n(ranges_.begin(), end * num_steps_, ranges.begin() + offset); return ranges; } } thrust::host_vector<float> LaserScanBuffer::GetIntensities() const { thrust::host_vector<float> intensities; if (top_ == bottom_) { return intensities; } int start = top_ % num_max_scans_; int end = bottom_ % num_max_scans_; if (start < end) { int n = start - end; intensities.resize(n * num_steps_); thrust::copy_n(intensities_.begin() + start * num_steps_, n * num_steps_, intensities.begin()); return intensities; } else { intensities.resize(num_max_scans_ * num_steps_); int offset = (num_max_scans_ - start) * num_steps_; thrust::copy_n(intensities_.begin() + start * num_steps_, offset, intensities.begin()); thrust::copy_n(intensities_.begin(), end * num_steps_, intensities.begin() + offset); return intensities; } } LaserScanBuffer &LaserScanBuffer::Clear() { top_ = 0; bottom_ = 0; ranges_.clear(); intensities_.clear(); return *this; } bool LaserScanBuffer::IsEmpty() const { return ranges_.empty(); } Eigen::Vector3f LaserScanBuffer::GetMinBound() const { utility::LogError("LaserScanBuffer::GetMinBound is not supported"); return Eigen::Vector3f::Zero(); } Eigen::Vector3f LaserScanBuffer::GetMaxBound() const { utility::LogError("LaserScanBuffer::GetMaxBound is not supported"); return Eigen::Vector3f::Zero(); } Eigen::Vector3f LaserScanBuffer::GetCenter() const { utility::LogError("LaserScanBuffer::GetCenter is not supported"); return Eigen::Vector3f::Zero(); } AxisAlignedBoundingBox LaserScanBuffer::GetAxisAlignedBoundingBox() const { utility::LogError("LaserScanBuffer::GetAxisAlignedBoundingBox is not supported"); return AxisAlignedBoundingBox(); } LaserScanBuffer &LaserScanBuffer::Transform(const Eigen::Matrix4f &transformation) { thrust::for_each(origins_.begin(), origins_.end(), [transformation] __device__ (Eigen::Matrix4f_u& trans) { trans = trans * transformation; }); return *this; } LaserScanBuffer &LaserScanBuffer::Translate(const Eigen::Vector3f &translation, bool relative) { thrust::for_each(origins_.begin(), origins_.end(), [translation] __device__ (Eigen::Matrix4f_u& trans) { trans.block<3, 1>(0, 3) = trans.block<3, 1>(0, 3) + translation; }); return *this; } LaserScanBuffer &LaserScanBuffer::Scale(const float scale, bool center) { thrust::for_each(ranges_.begin(), ranges_.end(), [scale] __device__ (float &r) { r *= scale; }); return *this; } LaserScanBuffer &LaserScanBuffer::Rotate(const Eigen::Matrix3f &R, bool center) { thrust::for_each(origins_.begin(), origins_.end(), [R] __device__ (Eigen::Matrix4f_u& trans) { trans.block<3, 3>(0, 0) = trans.block<3, 3>(0, 0) * R; }); return *this; } LaserScanBuffer &LaserScanBuffer::AddRanges(const utility::device_vector<float>& ranges, const Eigen::Matrix4f& transformation, const utility::device_vector<float>& intensities) { if (ranges.size() != num_steps_) { utility::LogError("[AddRanges] Invalid size of input ranges."); return *this; } if (HasIntensities() && ranges.size() != intensities.size()) { utility::LogError("[AddRanges] Invalid size of intensities."); return *this; } bool add_intensities = !intensities.empty() && ranges.size() == intensities.size(); int end = bottom_ % num_max_scans_; if (bottom_ + 1 <= num_max_scans_) { ranges_.insert(ranges_.end(), ranges.begin(), ranges.end()); if (add_intensities) intensities_.insert(intensities_.end(), intensities.begin(), intensities.end()); origins_.push_back(transformation); bottom_++; } else { thrust::copy_n(ranges.begin(), num_steps_, ranges_.begin() + end * num_steps_); if (add_intensities) thrust::copy_n(intensities.begin(), num_steps_, intensities_.begin() + end * num_steps_); origins_[end] = transformation; top_++; bottom_++; } return *this; } LaserScanBuffer &LaserScanBuffer::AddRanges(const utility::pinned_host_vector<float>& ranges, const Eigen::Matrix4f& transformation, const utility::pinned_host_vector<float>& intensities) { utility::device_vector<float> d_ranges(ranges.size()); cudaSafeCall(cudaMemcpy(thrust::raw_pointer_cast(d_ranges.data()), ranges.data(), ranges.size() * sizeof(float), cudaMemcpyHostToDevice)); utility::device_vector<float> d_intensities(intensities.size()); cudaSafeCall(cudaMemcpy(thrust::raw_pointer_cast(d_intensities.data()), intensities.data(), intensities.size() * sizeof(float), cudaMemcpyHostToDevice)); return AddRanges(d_ranges, transformation, d_intensities); } std::shared_ptr<LaserScanBuffer> LaserScanBuffer::RangeFilter(float min_range, float max_range) const { auto out = std::make_shared<LaserScanBuffer>(num_steps_, num_max_scans_, min_angle_, max_angle_); if (max_range <= min_range) { utility::LogError("[RangeFilter] Invalid parameter with min_range greater than max_range."); } out->ranges_.resize(ranges_.size()); out->top_ = top_; out->bottom_ = bottom_; thrust::transform(ranges_.begin(), ranges_.end(), out->ranges_.begin(), [min_range, max_range] __device__ (float r) { return (r < min_range || r > max_range) ? std::numeric_limits<float>::quiet_NaN() : r; }); return out; } std::shared_ptr<LaserScanBuffer> LaserScanBuffer::ScanShadowsFilter(float min_angle, float max_angle, int window, int neighbors, bool remove_shadow_start_point) const { auto out = std::make_shared<LaserScanBuffer>(*this); auto minmax_tan = TangentMinMax(min_angle, max_angle); apply_scan_shadow_filter_functor func(thrust::raw_pointer_cast(ranges_.data()), minmax_tan.first, minmax_tan.second, GetAngleIncrement(), num_steps_, window, neighbors, remove_shadow_start_point, thrust::raw_pointer_cast(out->ranges_.data())); thrust::for_each(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator(ranges_.size()), func); return out; } } }
647c7f9bc3104354bf68b56c8234e5c0b4582081.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # define NUM_THREADS 6 # define X_DIRECTION 0 # define Y_DIRECTION 1 # define Z_DIRECTION 2 # define NUM_HALO 2 __global__ void calc_grad_shared_3d(int nx, int ny, int nz, float dx, float *arr_grad, float *arr){ __shared__ float arr_s[NUM_THREADS+NUM_HALO][NUM_THREADS+NUM_HALO][NUM_THREADS+NUM_HALO]; const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const int z = threadIdx.z + blockDim.z * blockIdx.z; const int tx = threadIdx.x + 1; const int ty = threadIdx.y + 1; const int tz = threadIdx.z + 1; const int nxyz = nx * ny * nz; if (x < nx && y < ny && z < nz){ int ijk = nx * ny * z + nx * y + x; // // copy global memory to shared memory // int ijk_f; int ijk_b; arr_s[tz][ty][tx] = arr[ijk]; // halo area if (!(x == 0) && (tx == 1)){ ijk_b = nx * ny * z + nx * y + (x - 1); arr_s[tz][ty][tx-1] = arr[ijk_b]; } else if (!(x == 0) && (tx == NUM_THREADS)){ ijk_f = nx * ny * z + nx * y + (x + 1); arr_s[tz][ty][tx+1] = arr[ijk_f]; } // halo area if (!(y == 0) && (ty == 1)){ ijk_b = nx * ny * z + nx * (y - 1) + x; arr_s[tz][ty-1][tx] = arr[ijk_b]; } else if (!(y == 0) && (ty == NUM_THREADS)){ ijk_f = nx * ny * z + nx * (y + 1) + x; arr_s[tz][ty+1][tx] = arr[ijk_f]; } // halo area if (!(z == 0) && (tz == 1)){ ijk_b = nx * ny * (z - 1) + nx * y + x; arr_s[tz-1][ty][tx] = arr[ijk_b]; } else if (!(z == 0) && (tz == NUM_THREADS)){ ijk_f = nx * ny * (z + 1) + nx * y + x; arr_s[tz+1][ty][tx] = arr[ijk_f]; } __syncthreads(); // // x direction // // calc gradient of x direction if (x == 0){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx+1] - arr_s[tz][ty][tx]) / dx; } else if (x == (nx - 1)){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz][ty][tx-1]) / dx; } else { arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx+1] - arr_s[tz][ty][tx-1]) / (2.0 * dx); } // // y direction // // calc gradient of y direction if (y == 0){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty+1][tx] - arr_s[tz][ty][tx]) / dx; } else if (y == (ny - 1)){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz][ty-1][tx]) / dx; } else { arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty+1][tx] - arr_s[tz][ty-1][tx]) / (2.0 * dx); } // // z direction // // calc gradient of z direction if (z == 0){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz+1][ty][tx] - arr_s[tz][ty][tx]) / dx; } else if (z == (nz - 1)){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz-1][ty][tx]) / dx; } else { arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz+1][ty][tx] - arr_s[tz-1][ty][tx]) / (2.0 * dx); } } } __global__ void calc_grad_global_3d(int nx, int ny, int nz, float dx, float *arr_grad, float *arr){ const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const int z = threadIdx.z + blockDim.z * blockIdx.z; const int nxyz = nx * ny * nz; int ijk = nx * ny * z + nx * y + x; if (x < nx && y < ny && z < nz){ int ijk_f; int ijk_b; // // x direction // // calc gradient of x direction ijk_f = nx * ny * z + nx * y + (x + 1); ijk_b = nx * ny * z + nx * y + (x - 1); if (x == 0){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (x == (nx - 1)){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } // // y direction // // calc gradient of y direction ijk_f = nx * ny * z + nx * (y + 1) + x; ijk_b = nx * ny * z + nx * (y - 1) + x; if (y == 0){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (y == (ny - 1)){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } // // z direction // // calc gradient of z direction ijk_f = nx * ny * (z + 1) + nx * y + x; ijk_b = nx * ny * (z - 1) + nx * y + x; if (z == 0){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (z == (nz - 1)){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } } }
647c7f9bc3104354bf68b56c8234e5c0b4582081.cu
# define NUM_THREADS 6 # define X_DIRECTION 0 # define Y_DIRECTION 1 # define Z_DIRECTION 2 # define NUM_HALO 2 __global__ void calc_grad_shared_3d(int nx, int ny, int nz, float dx, float *arr_grad, float *arr){ __shared__ float arr_s[NUM_THREADS+NUM_HALO][NUM_THREADS+NUM_HALO][NUM_THREADS+NUM_HALO]; const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const int z = threadIdx.z + blockDim.z * blockIdx.z; const int tx = threadIdx.x + 1; const int ty = threadIdx.y + 1; const int tz = threadIdx.z + 1; const int nxyz = nx * ny * nz; if (x < nx && y < ny && z < nz){ int ijk = nx * ny * z + nx * y + x; // // copy global memory to shared memory // int ijk_f; int ijk_b; arr_s[tz][ty][tx] = arr[ijk]; // halo area if (!(x == 0) && (tx == 1)){ ijk_b = nx * ny * z + nx * y + (x - 1); arr_s[tz][ty][tx-1] = arr[ijk_b]; } else if (!(x == 0) && (tx == NUM_THREADS)){ ijk_f = nx * ny * z + nx * y + (x + 1); arr_s[tz][ty][tx+1] = arr[ijk_f]; } // halo area if (!(y == 0) && (ty == 1)){ ijk_b = nx * ny * z + nx * (y - 1) + x; arr_s[tz][ty-1][tx] = arr[ijk_b]; } else if (!(y == 0) && (ty == NUM_THREADS)){ ijk_f = nx * ny * z + nx * (y + 1) + x; arr_s[tz][ty+1][tx] = arr[ijk_f]; } // halo area if (!(z == 0) && (tz == 1)){ ijk_b = nx * ny * (z - 1) + nx * y + x; arr_s[tz-1][ty][tx] = arr[ijk_b]; } else if (!(z == 0) && (tz == NUM_THREADS)){ ijk_f = nx * ny * (z + 1) + nx * y + x; arr_s[tz+1][ty][tx] = arr[ijk_f]; } __syncthreads(); // // x direction // // calc gradient of x direction if (x == 0){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx+1] - arr_s[tz][ty][tx]) / dx; } else if (x == (nx - 1)){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz][ty][tx-1]) / dx; } else { arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx+1] - arr_s[tz][ty][tx-1]) / (2.0 * dx); } // // y direction // // calc gradient of y direction if (y == 0){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty+1][tx] - arr_s[tz][ty][tx]) / dx; } else if (y == (ny - 1)){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz][ty-1][tx]) / dx; } else { arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty+1][tx] - arr_s[tz][ty-1][tx]) / (2.0 * dx); } // // z direction // // calc gradient of z direction if (z == 0){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz+1][ty][tx] - arr_s[tz][ty][tx]) / dx; } else if (z == (nz - 1)){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz-1][ty][tx]) / dx; } else { arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz+1][ty][tx] - arr_s[tz-1][ty][tx]) / (2.0 * dx); } } } __global__ void calc_grad_global_3d(int nx, int ny, int nz, float dx, float *arr_grad, float *arr){ const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const int z = threadIdx.z + blockDim.z * blockIdx.z; const int nxyz = nx * ny * nz; int ijk = nx * ny * z + nx * y + x; if (x < nx && y < ny && z < nz){ int ijk_f; int ijk_b; // // x direction // // calc gradient of x direction ijk_f = nx * ny * z + nx * y + (x + 1); ijk_b = nx * ny * z + nx * y + (x - 1); if (x == 0){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (x == (nx - 1)){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } // // y direction // // calc gradient of y direction ijk_f = nx * ny * z + nx * (y + 1) + x; ijk_b = nx * ny * z + nx * (y - 1) + x; if (y == 0){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (y == (ny - 1)){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } // // z direction // // calc gradient of z direction ijk_f = nx * ny * (z + 1) + nx * y + x; ijk_b = nx * ny * (z - 1) + nx * y + x; if (z == 0){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (z == (nz - 1)){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } } }
1d4013d18b71cc049d6ac2d78f0bb8ae6e8bd8be.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <hiprand/hiprand_kernel.h> #include <omp.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 //Help code for switching between Single Precision and Double Precision #ifdef DP typedef double Real; #define PI 3.14159265358979323846 // known value of pi #else typedef float Real; #define PI 3.1415926535 // known value of pi #endif __global__ void gpu_monte_carlo(Real *estimate, hiprandState_t *states, int trials) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; Real x, y; hiprand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < trials; i++) { x = hiprand_uniform (&states[tid]); y = hiprand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (Real) trials; // return estimate of pi } Real host_monte_carlo(long trials) { Real x, y; long points_in_circle = 0; for(long i = 0; i < trials; i++) { x = rand() / (Real) RAND_MAX; y = rand() / (Real) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } // printf("Serial- points_in_circle : %ld\n", points_in_circle); // printf("Serial- trials: %ld\n",trials ); return 4.0f * points_in_circle / trials; } int main (int argc, char *argv[]) { clock_t start, stop; //get the total number of pthreads int total_threads=atoi(argv[1]); long total_tasks=pow(2,28); int trials_per_thread= total_tasks/(BLOCKS*THREADS); Real host[BLOCKS * THREADS]; Real *dev; hiprandState_t *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", trials_per_thread, BLOCKS, THREADS); start = clock(); hipMalloc((void **) &dev, BLOCKS * THREADS * sizeof(Real)); // allocate device mem. for counts hipMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(hiprandState_t) ); hipLaunchKernelGGL(( gpu_monte_carlo), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, devStates,trials_per_thread); hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(Real), hipMemcpyDeviceToHost); // return results Real pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); Real x,y,z; //loop counter long points_in_circle = 0; //Count holds all the number of how many good coordinates Real omp_pi = 0.f; //holds approx value of pi #pragma omp parallel firstprivate(x, y, z, i) reduction(+:points_in_circle) num_threads(total_threads) { for (long i = 0; i < total_tasks; ++i) //main loop { x = rand() / (Real) RAND_MAX; y = rand() / (Real) RAND_MAX; z = (x*x + y*y) if(z <= 1) points_in_circle++; } } omp_pi = 4.0f * points_in_circle / total_tasks; stop = clock(); printf("OpenMP pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); Real pi_cpu = host_monte_carlo(total_tasks); stop = clock(); printf("CPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); #ifdef DP printf("CUDA estimate of PI = %20.18f [error of %20.18f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %20.18f [error of %20.18f]\n", pi_cpu, pi_cpu - PI); printf("OpenMP estimate of PI = %20.18f [error of %20.18f]\n",omp_pi,omp_pi - PI); #else printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); printf("OpenMP estimate of PI = %f [error of %f]\n",omp_pi,omp_pi - PI); #endif return 0; }
1d4013d18b71cc049d6ac2d78f0bb8ae6e8bd8be.cu
#include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #include <omp.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 //Help code for switching between Single Precision and Double Precision #ifdef DP typedef double Real; #define PI 3.14159265358979323846 // known value of pi #else typedef float Real; #define PI 3.1415926535 // known value of pi #endif __global__ void gpu_monte_carlo(Real *estimate, curandState *states, int trials) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; Real x, y; curand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < trials; i++) { x = curand_uniform (&states[tid]); y = curand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (Real) trials; // return estimate of pi } Real host_monte_carlo(long trials) { Real x, y; long points_in_circle = 0; for(long i = 0; i < trials; i++) { x = rand() / (Real) RAND_MAX; y = rand() / (Real) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } // printf("Serial- points_in_circle : %ld\n", points_in_circle); // printf("Serial- trials: %ld\n",trials ); return 4.0f * points_in_circle / trials; } int main (int argc, char *argv[]) { clock_t start, stop; //get the total number of pthreads int total_threads=atoi(argv[1]); long total_tasks=pow(2,28); int trials_per_thread= total_tasks/(BLOCKS*THREADS); Real host[BLOCKS * THREADS]; Real *dev; curandState *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", trials_per_thread, BLOCKS, THREADS); start = clock(); cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(Real)); // allocate device mem. for counts cudaMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(curandState) ); gpu_monte_carlo<<<BLOCKS, THREADS>>>(dev, devStates,trials_per_thread); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(Real), cudaMemcpyDeviceToHost); // return results Real pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); Real x,y,z; //loop counter long points_in_circle = 0; //Count holds all the number of how many good coordinates Real omp_pi = 0.f; //holds approx value of pi #pragma omp parallel firstprivate(x, y, z, i) reduction(+:points_in_circle) num_threads(total_threads) { for (long i = 0; i < total_tasks; ++i) //main loop { x = rand() / (Real) RAND_MAX; y = rand() / (Real) RAND_MAX; z = (x*x + y*y) if(z <= 1) points_in_circle++; } } omp_pi = 4.0f * points_in_circle / total_tasks; stop = clock(); printf("OpenMP pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); Real pi_cpu = host_monte_carlo(total_tasks); stop = clock(); printf("CPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); #ifdef DP printf("CUDA estimate of PI = %20.18f [error of %20.18f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %20.18f [error of %20.18f]\n", pi_cpu, pi_cpu - PI); printf("OpenMP estimate of PI = %20.18f [error of %20.18f]\n",omp_pi,omp_pi - PI); #else printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); printf("OpenMP estimate of PI = %f [error of %f]\n",omp_pi,omp_pi - PI); #endif return 0; }
117adf0d1841c1f479393c4b17c72f07921c625a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_cooperative_groups.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define SIZE 100 #include <iostream> using namespace std; __global__ void sum(int* input) // kernel function definition { const int tid = threadIdx.x; // thread id int step_size = 1; int number_of_threads = blockDim.x; // no of threads while (number_of_threads > 0) { if (tid < number_of_threads) // still alive? { const int fst = tid * step_size * 2; //get the index in array const int snd = fst + step_size; //get the index in array if (input[fst] > input[snd]) input[fst] = input[snd]; //calculate sum } step_size <<= 1; // increment step_size by 1 number_of_threads >>= 1; //decrement number of threads by 2 } } int main() { int count = SIZE; cout << "Enter the number of elements:\n" << endl; const int size = count * sizeof(int); int h[SIZE]; cout << "Enter the elements:\n" << endl; for (int i = 0; i<count; i++) { h[i] = rand()%500; } h[2] = -2; for (int i = 0; i<count; i++) { printf("%d ",h[i]); } int* d; //GPU parameter hipMalloc(&d, size); //assign memory to parameters on GPU hipMemcpy(d, h, size, hipMemcpyHostToDevice); //copy the array from CPU to GPU float elapsed=0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( sum) , dim3(1), dim3(32) , 0, 0, d); // call kernel function <<<number of blocks, number of threads= number of elements/2 hipEventRecord(stop, 0); hipEventSynchronize (stop); hipEventElapsedTime(&elapsed, start, stop); hipEventDestroy(start); hipEventDestroy(stop); cout<<"\nThe elapsed time in gpu was : "<<elapsed<<"\n"; int result; hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost); // copy the result back from GPU to CPU cout << "Min is " << result << endl; getchar(); hipFree(d); // Free the allocated memory return 0; } /* nvcc 3_2.cu -o a a.exe Thread: This is just an execution of a kernel with a given index. Each thread uses its index to access elements in array (see the kernel in my first CUDA program) such that the collection of all threads cooperatively processes the entire data set. Block: This is a group of threads. Theres not much you can say about the execution of threads within a block they could execute concurrently or serially and in no particular order. You can coordinate the threads, somewhat, using the _syncthreads() function that makes a thread stop at a certain point in the kernel until all the other threads in its block reach the same point. Grid: This is a group of blocks. Theres no synchronization at all between the blocks may allow up to 8 thread blocks to be assigned to an SM. After a block of threads is assigned to a SM, it is divided into sets of 32 threads, each called a warp. However, the size of a warp depends upon the implementation. https://www.tutorialspoint.com/cuda/index.htm */
117adf0d1841c1f479393c4b17c72f07921c625a.cu
#include "cuda_runtime.h" #include "cooperative_groups.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define SIZE 100 #include <iostream> using namespace std; __global__ void sum(int* input) // kernel function definition { const int tid = threadIdx.x; // thread id int step_size = 1; int number_of_threads = blockDim.x; // no of threads while (number_of_threads > 0) { if (tid < number_of_threads) // still alive? { const int fst = tid * step_size * 2; //get the index in array const int snd = fst + step_size; //get the index in array if (input[fst] > input[snd]) input[fst] = input[snd]; //calculate sum } step_size <<= 1; // increment step_size by 1 number_of_threads >>= 1; //decrement number of threads by 2 } } int main() { int count = SIZE; cout << "Enter the number of elements:\n" << endl; const int size = count * sizeof(int); int h[SIZE]; cout << "Enter the elements:\n" << endl; for (int i = 0; i<count; i++) { h[i] = rand()%500; } h[2] = -2; for (int i = 0; i<count; i++) { printf("%d ",h[i]); } int* d; //GPU parameter cudaMalloc(&d, size); //assign memory to parameters on GPU cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); //copy the array from CPU to GPU float elapsed=0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); sum <<< 1, 32 >>>(d); // call kernel function <<<number of blocks, number of threads= number of elements/2 cudaEventRecord(stop, 0); cudaEventSynchronize (stop); cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); cout<<"\nThe elapsed time in gpu was : "<<elapsed<<"\n"; int result; cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); // copy the result back from GPU to CPU cout << "Min is " << result << endl; getchar(); cudaFree(d); // Free the allocated memory return 0; } /* nvcc 3_2.cu -o a a.exe Thread: This is just an execution of a kernel with a given index. Each thread uses its index to access elements in array (see the kernel in my first CUDA program) such that the collection of all threads cooperatively processes the entire data set. Block: This is a group of threads. There’s not much you can say about the execution of threads within a block – they could execute concurrently or serially and in no particular order. You can coordinate the threads, somewhat, using the _syncthreads() function that makes a thread stop at a certain point in the kernel until all the other threads in its block reach the same point. Grid: This is a group of blocks. There’s no synchronization at all between the blocks may allow up to 8 thread blocks to be assigned to an SM. After a block of threads is assigned to a SM, it is divided into sets of 32 threads, each called a warp. However, the size of a warp depends upon the implementation. https://www.tutorialspoint.com/cuda/index.htm */
ebe086610bed5b9af0841c745e1cd74cc14123bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * * Matrix Multiplication - CUDA for GPUs * * CS3210 * **/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <assert.h> int size; typedef struct { float ** element; } matrix; long long wall_clock_time() { #ifdef __linux__ struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll); #endif } /** * Allocates memory for a matrix of size SIZE * The memory is allocated row-major order, i.e. * elements from the same row are allocated at contiguous * memory addresses. **/ void allocate_matrix(matrix* m) { int i; hipError_t rc; // allocate array for all the rows rc = hipMallocManaged((void**)&(m->element), sizeof(float*) * size); if (rc != hipSuccess) { fprintf(stdout, "CUDA error: %s\n", hipGetErrorString(rc)); exit(1); } // allocate an array for each row of the matrix for (i = 0; i < size; i++) { rc = hipMallocManaged((void**)&(m->element[i]), sizeof(float) * size); if (rc != hipSuccess) { fprintf(stdout, "CUDA error: %s\n", hipGetErrorString(rc)); exit(1); } } } /** * Free the memory allocated for a matrix. **/ void free_matrix(matrix* m) { int i; for (i = 0; i < size; i++) hipFree(m->element[i]); hipFree(m->element); } /** * Initializes the elements of the matrix with * random values between 0 and 9 **/ void init_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = rand() % 10; } } /** * Initializes the elements of the matrix with * element 0. **/ void init_matrix_zero(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = 0.0; } } /** * Multiplies matrix @a with matrix @b storing * the result in matrix @result * * The multiplication algorithm is the O(n^3) * algorithm */ void mm(matrix a, matrix b, matrix result) { int i, j, k; // Do the multiplication for (i = 0; i < size; i++) for (j = 0; j < size; j++) for(k = 0; k < size; k++) result.element[i][j] += a.element[i][k] * b.element[k][j]; } /** * Each kernel computes the result element (i,j). */ __global__ void mm_kernel(matrix a, matrix b, matrix result, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k; if (i >= size || j >= size) return; for(k = 0; k < size; k++) result.element[i][j] += b.element[k][j] * a.element[i][k]; } void print_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) { printf("row %4d: ", i); for (j = 0; j < size; j++) printf("%6.2f ", m.element[i][j]); printf("\n"); } } void work() { matrix a, b, result1, result2; long long before, after; int correct, i, j, dim; hipError_t rc; // Allocate memory for matrices allocate_matrix(&a); allocate_matrix(&b); allocate_matrix(&result1); allocate_matrix(&result2); // Initialize matrix elements init_matrix(a); init_matrix(b); // Perform sequential matrix multiplication before = wall_clock_time(); mm(a, b, result1); after = wall_clock_time(); fprintf(stdout, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // Perform CUDA matrix multiplication dim3 block(32, 32); // a block of 32 x 32 CUDA threads dim = (size % 32 == 0) ? size / 32 : size / 32 + 1; dim3 grid(dim, dim); // a grid of CUDA thread blocks before = wall_clock_time(); hipLaunchKernelGGL(( mm_kernel), dim3(grid), dim3(block), 0, 0, a, b, result2, size); hipDeviceSynchronize(); after = wall_clock_time(); fprintf(stdout, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // was there any error? rc = hipGetLastError(); if (rc != hipSuccess) printf("Last CUDA error %s\n", hipGetErrorString(rc)); // Compare the results correct = 1; for (i = 0; correct && i < size; i++) for (j = 0; j < size; j++) if (result1.element[i][j] != result2.element[i][j]) { correct = 0; break; } if (correct) printf("The result matrices are identical!\n"); else printf("Difference in result matrices at element (%d, %d)!\n", i, j); free_matrix(&a); free_matrix(&b); free_matrix(&result1); free_matrix(&result2); } int main(int argc, char ** argv) { srand(0); printf("Usage: %s <size>\n", argv[0]); if (argc >= 2) size = atoi(argv[1]); else size = 1024; fprintf(stdout,"Sequential matrix multiplication of size %d\n", size); // Multiply the matrices work(); return 0; }
ebe086610bed5b9af0841c745e1cd74cc14123bf.cu
/** * * Matrix Multiplication - CUDA for GPUs * * CS3210 * **/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <assert.h> int size; typedef struct { float ** element; } matrix; long long wall_clock_time() { #ifdef __linux__ struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll); #endif } /** * Allocates memory for a matrix of size SIZE * The memory is allocated row-major order, i.e. * elements from the same row are allocated at contiguous * memory addresses. **/ void allocate_matrix(matrix* m) { int i; cudaError_t rc; // allocate array for all the rows rc = cudaMallocManaged((void**)&(m->element), sizeof(float*) * size); if (rc != cudaSuccess) { fprintf(stdout, "CUDA error: %s\n", cudaGetErrorString(rc)); exit(1); } // allocate an array for each row of the matrix for (i = 0; i < size; i++) { rc = cudaMallocManaged((void**)&(m->element[i]), sizeof(float) * size); if (rc != cudaSuccess) { fprintf(stdout, "CUDA error: %s\n", cudaGetErrorString(rc)); exit(1); } } } /** * Free the memory allocated for a matrix. **/ void free_matrix(matrix* m) { int i; for (i = 0; i < size; i++) cudaFree(m->element[i]); cudaFree(m->element); } /** * Initializes the elements of the matrix with * random values between 0 and 9 **/ void init_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = rand() % 10; } } /** * Initializes the elements of the matrix with * element 0. **/ void init_matrix_zero(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = 0.0; } } /** * Multiplies matrix @a with matrix @b storing * the result in matrix @result * * The multiplication algorithm is the O(n^3) * algorithm */ void mm(matrix a, matrix b, matrix result) { int i, j, k; // Do the multiplication for (i = 0; i < size; i++) for (j = 0; j < size; j++) for(k = 0; k < size; k++) result.element[i][j] += a.element[i][k] * b.element[k][j]; } /** * Each kernel computes the result element (i,j). */ __global__ void mm_kernel(matrix a, matrix b, matrix result, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k; if (i >= size || j >= size) return; for(k = 0; k < size; k++) result.element[i][j] += b.element[k][j] * a.element[i][k]; } void print_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) { printf("row %4d: ", i); for (j = 0; j < size; j++) printf("%6.2f ", m.element[i][j]); printf("\n"); } } void work() { matrix a, b, result1, result2; long long before, after; int correct, i, j, dim; cudaError_t rc; // Allocate memory for matrices allocate_matrix(&a); allocate_matrix(&b); allocate_matrix(&result1); allocate_matrix(&result2); // Initialize matrix elements init_matrix(a); init_matrix(b); // Perform sequential matrix multiplication before = wall_clock_time(); mm(a, b, result1); after = wall_clock_time(); fprintf(stdout, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // Perform CUDA matrix multiplication dim3 block(32, 32); // a block of 32 x 32 CUDA threads dim = (size % 32 == 0) ? size / 32 : size / 32 + 1; dim3 grid(dim, dim); // a grid of CUDA thread blocks before = wall_clock_time(); mm_kernel<<<grid, block>>>(a, b, result2, size); cudaDeviceSynchronize(); after = wall_clock_time(); fprintf(stdout, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000); // was there any error? rc = cudaGetLastError(); if (rc != cudaSuccess) printf("Last CUDA error %s\n", cudaGetErrorString(rc)); // Compare the results correct = 1; for (i = 0; correct && i < size; i++) for (j = 0; j < size; j++) if (result1.element[i][j] != result2.element[i][j]) { correct = 0; break; } if (correct) printf("The result matrices are identical!\n"); else printf("Difference in result matrices at element (%d, %d)!\n", i, j); free_matrix(&a); free_matrix(&b); free_matrix(&result1); free_matrix(&result2); } int main(int argc, char ** argv) { srand(0); printf("Usage: %s <size>\n", argv[0]); if (argc >= 2) size = atoi(argv[1]); else size = 1024; fprintf(stdout,"Sequential matrix multiplication of size %d\n", size); // Multiply the matrices work(); return 0; }
9b77d359efd3abe04e21b7e39e6495c08b6f6f26.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <fstream> #include "hip/hip_runtime.h" #include "../common/book.h" #include "../common/cpu_anim.h" #include "nvmlpower.hpp" #include "parameters.h" // these exist on the GPU side // 2 dimension float type texture array texture<float,2> texConstSrc; texture<float,2> texIn; texture<float,2> texOut; // dstOut: a flag to swap Input and Output arrays __global__ void blend_kernel( float *dst, bool dstOut ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // retrieve 5 point stencil float t, l, c, r, b; if (dstOut) { t = tex2D(texIn,x,y-1); l = tex2D(texIn,x-1,y); c = tex2D(texIn,x,y); r = tex2D(texIn,x+1,y); b = tex2D(texIn,x,y+1); } else { t = tex2D(texOut,x,y-1); l = tex2D(texOut,x-1,y); c = tex2D(texOut,x,y); r = tex2D(texOut,x+1,y); b = tex2D(texOut,x,y+1); } // stencil calculation dst[offset] = c + SPEED * (t + b + r + l - 4 * c); } __global__ void copy_const_kernel( float *iptr ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float c = tex2D(texConstSrc,x,y); if (c != 0) iptr[offset] = c; } // globals needed by the update routine struct DataBlock { unsigned char *output_bitmap; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; CPUAnimBitmap *bitmap; hipEvent_t start, stop; float totalTime; float frames; }; void anim_gpu( DataBlock *d, int ticks ) { HANDLE_ERROR( hipEventRecord( d->start, 0 ) ); dim3 blocks(DIM/BLOCKSIZE,DIM/BLOCKSIZE); dim3 threads(BLOCKSIZE,BLOCKSIZE); CPUAnimBitmap *bitmap = d->bitmap; /* * @Abdullah, used this to remove the side effect of * overlapping computation and communication in data placement efficency */ HANDLE_ERROR( hipDeviceSynchronize() ); nvmlAPIRun(); // since tex is global and bound, we have to use a flag to // select which is in/out per iteration volatile bool dstOut = true; for (int i=0; i<REPEAT; i++) { float *in, *out; if (dstOut) { in = d->dev_inSrc; out = d->dev_outSrc; } else { out = d->dev_inSrc; in = d->dev_outSrc; } hipLaunchKernelGGL(( copy_const_kernel), dim3(blocks),dim3(threads), 0, 0, in ); hipLaunchKernelGGL(( blend_kernel), dim3(blocks),dim3(threads), 0, 0, out, dstOut ); dstOut = !dstOut; } hipLaunchKernelGGL(( float_to_color), dim3(blocks),dim3(threads), 0, 0, d->output_bitmap, d->dev_inSrc ); /* @Abdullah */ HANDLE_ERROR( hipDeviceSynchronize() ); /* @Abdullah, Moved this part before the cudaMemCPY to make sure we only consider GPU time*/ HANDLE_ERROR( hipEventRecord( d->stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( d->stop ) ); /* @Abdullah */ nvmlAPIEnd(); printf ("DEBUG: bitmap->get_ptr() =%p\n", bitmap->get_ptr()); printf ("DEBUG: d->output_bitmap =%p\n", d->output_bitmap); printf ("DEBUG: bitmap->image_size() =%d\n", bitmap->image_size()); HANDLE_ERROR( hipMemcpy( bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), hipMemcpyDeviceToHost ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, d->start, d->stop ) ); d->totalTime += elapsedTime; ++d->frames; /*printf( "Average Time per frame: %3.1f ms\n", d->totalTime/d->frames );*/ printf( "%3.1f\n", d->totalTime/d->frames ); } // clean up memory allocated on the GPU void anim_exit( DataBlock *d ) { hipUnbindTexture( texIn ); hipUnbindTexture( texOut ); hipUnbindTexture( texConstSrc ); HANDLE_ERROR( hipFree( d->output_bitmap ) ); HANDLE_ERROR( hipFree( d->dev_inSrc ) ); HANDLE_ERROR( hipFree( d->dev_outSrc ) ); HANDLE_ERROR( hipFree( d->dev_constSrc ) ); HANDLE_ERROR( hipEventDestroy( d->start ) ); HANDLE_ERROR( hipEventDestroy( d->stop ) ); } int main(int argc, char* argv[]) { printf ("%s Execution parameters:\n", argv[0]); printf ("\tDIM:%d\n", DIM); printf ("\tBLOCKSIZE:%d\n", BLOCKSIZE); printf ("\tREPEAT:%d\n", REPEAT); DataBlock data; CPUAnimBitmap bitmap( DIM, DIM, &data ); data.bitmap = &bitmap; data.totalTime = 0; data.frames = 0; /* @NVML-power */ setUpTuningParams(argc, argv); HANDLE_ERROR( hipEventCreate( &data.start ) ); HANDLE_ERROR( hipEventCreate( &data.stop ) ); int imageSize = bitmap.image_size(); HANDLE_ERROR( hipMalloc( (void**)&data.output_bitmap, imageSize ) ); // assume float == 4 chars in size (ie rgba) HANDLE_ERROR( hipMalloc( (void**)&data.dev_inSrc, imageSize ) ); HANDLE_ERROR( hipMalloc( (void**)&data.dev_outSrc, imageSize ) ); HANDLE_ERROR( hipMalloc( (void**)&data.dev_constSrc, imageSize ) ); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); HANDLE_ERROR( hipBindTexture2D( NULL, texConstSrc, data.dev_constSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( hipBindTexture2D( NULL, texIn, data.dev_inSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( hipBindTexture2D( NULL, texOut, data.dev_outSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); // initialize the constant data float *temp = (float*)malloc( imageSize ); for (int i=0; i<DIM*DIM; i++) { temp[i] = 0; int x = i % DIM; int y = i / DIM; if ((x>300) && (x<600) && (y>310) && (y<601)) temp[i] = MAX_TEMP; } temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2; temp[DIM*700+100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for (int y=800; y<900; y++) { for (int x=400; x<500; x++) { temp[x+y*DIM] = MIN_TEMP; } } HANDLE_ERROR( hipMemcpy( data.dev_constSrc, temp, imageSize, hipMemcpyHostToDevice ) ); // initialize the input data for (int y=800; y<DIM; y++) { for (int x=0; x<200; x++) { temp[x+y*DIM] = MAX_TEMP; } } HANDLE_ERROR( hipMemcpy( data.dev_inSrc, temp, imageSize, hipMemcpyHostToDevice ) ); free( temp ); // a version without depending on OpenCL library #ifdef NO_OPENCL static int ticks = 1; anim_gpu( &data, ticks++ ) ; anim_exit(&data ); #else // not interested in graphics display bitmap.anim_and_exit( (void (*)(void*,int))anim_gpu, (void (*)(void*))anim_exit ); #endif /* @Abdullah */ std::ofstream ofs ("exec-time.txt", std::ofstream::out | std::ofstream::app); ofs << argv[0] << "," << argv[1] << "," << argv[2] << "," << data.totalTime/data.frames << std::endl; ofs.close(); return 0; }
9b77d359efd3abe04e21b7e39e6495c08b6f6f26.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <fstream> #include "cuda.h" #include "../common/book.h" #include "../common/cpu_anim.h" #include "nvmlpower.hpp" #include "parameters.h" // these exist on the GPU side // 2 dimension float type texture array texture<float,2> texConstSrc; texture<float,2> texIn; texture<float,2> texOut; // dstOut: a flag to swap Input and Output arrays __global__ void blend_kernel( float *dst, bool dstOut ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // retrieve 5 point stencil float t, l, c, r, b; if (dstOut) { t = tex2D(texIn,x,y-1); l = tex2D(texIn,x-1,y); c = tex2D(texIn,x,y); r = tex2D(texIn,x+1,y); b = tex2D(texIn,x,y+1); } else { t = tex2D(texOut,x,y-1); l = tex2D(texOut,x-1,y); c = tex2D(texOut,x,y); r = tex2D(texOut,x+1,y); b = tex2D(texOut,x,y+1); } // stencil calculation dst[offset] = c + SPEED * (t + b + r + l - 4 * c); } __global__ void copy_const_kernel( float *iptr ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float c = tex2D(texConstSrc,x,y); if (c != 0) iptr[offset] = c; } // globals needed by the update routine struct DataBlock { unsigned char *output_bitmap; float *dev_inSrc; float *dev_outSrc; float *dev_constSrc; CPUAnimBitmap *bitmap; cudaEvent_t start, stop; float totalTime; float frames; }; void anim_gpu( DataBlock *d, int ticks ) { HANDLE_ERROR( cudaEventRecord( d->start, 0 ) ); dim3 blocks(DIM/BLOCKSIZE,DIM/BLOCKSIZE); dim3 threads(BLOCKSIZE,BLOCKSIZE); CPUAnimBitmap *bitmap = d->bitmap; /* * @Abdullah, used this to remove the side effect of * overlapping computation and communication in data placement efficency */ HANDLE_ERROR( cudaDeviceSynchronize() ); nvmlAPIRun(); // since tex is global and bound, we have to use a flag to // select which is in/out per iteration volatile bool dstOut = true; for (int i=0; i<REPEAT; i++) { float *in, *out; if (dstOut) { in = d->dev_inSrc; out = d->dev_outSrc; } else { out = d->dev_inSrc; in = d->dev_outSrc; } copy_const_kernel<<<blocks,threads>>>( in ); blend_kernel<<<blocks,threads>>>( out, dstOut ); dstOut = !dstOut; } float_to_color<<<blocks,threads>>>( d->output_bitmap, d->dev_inSrc ); /* @Abdullah */ HANDLE_ERROR( cudaDeviceSynchronize() ); /* @Abdullah, Moved this part before the cudaMemCPY to make sure we only consider GPU time*/ HANDLE_ERROR( cudaEventRecord( d->stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( d->stop ) ); /* @Abdullah */ nvmlAPIEnd(); printf ("DEBUG: bitmap->get_ptr() =%p\n", bitmap->get_ptr()); printf ("DEBUG: d->output_bitmap =%p\n", d->output_bitmap); printf ("DEBUG: bitmap->image_size() =%d\n", bitmap->image_size()); HANDLE_ERROR( cudaMemcpy( bitmap->get_ptr(), d->output_bitmap, bitmap->image_size(), cudaMemcpyDeviceToHost ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, d->start, d->stop ) ); d->totalTime += elapsedTime; ++d->frames; /*printf( "Average Time per frame: %3.1f ms\n", d->totalTime/d->frames );*/ printf( "%3.1f\n", d->totalTime/d->frames ); } // clean up memory allocated on the GPU void anim_exit( DataBlock *d ) { cudaUnbindTexture( texIn ); cudaUnbindTexture( texOut ); cudaUnbindTexture( texConstSrc ); HANDLE_ERROR( cudaFree( d->output_bitmap ) ); HANDLE_ERROR( cudaFree( d->dev_inSrc ) ); HANDLE_ERROR( cudaFree( d->dev_outSrc ) ); HANDLE_ERROR( cudaFree( d->dev_constSrc ) ); HANDLE_ERROR( cudaEventDestroy( d->start ) ); HANDLE_ERROR( cudaEventDestroy( d->stop ) ); } int main(int argc, char* argv[]) { printf ("%s Execution parameters:\n", argv[0]); printf ("\tDIM:%d\n", DIM); printf ("\tBLOCKSIZE:%d\n", BLOCKSIZE); printf ("\tREPEAT:%d\n", REPEAT); DataBlock data; CPUAnimBitmap bitmap( DIM, DIM, &data ); data.bitmap = &bitmap; data.totalTime = 0; data.frames = 0; /* @NVML-power */ setUpTuningParams(argc, argv); HANDLE_ERROR( cudaEventCreate( &data.start ) ); HANDLE_ERROR( cudaEventCreate( &data.stop ) ); int imageSize = bitmap.image_size(); HANDLE_ERROR( cudaMalloc( (void**)&data.output_bitmap, imageSize ) ); // assume float == 4 chars in size (ie rgba) HANDLE_ERROR( cudaMalloc( (void**)&data.dev_inSrc, imageSize ) ); HANDLE_ERROR( cudaMalloc( (void**)&data.dev_outSrc, imageSize ) ); HANDLE_ERROR( cudaMalloc( (void**)&data.dev_constSrc, imageSize ) ); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); HANDLE_ERROR( cudaBindTexture2D( NULL, texConstSrc, data.dev_constSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( cudaBindTexture2D( NULL, texIn, data.dev_inSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( cudaBindTexture2D( NULL, texOut, data.dev_outSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); // initialize the constant data float *temp = (float*)malloc( imageSize ); for (int i=0; i<DIM*DIM; i++) { temp[i] = 0; int x = i % DIM; int y = i / DIM; if ((x>300) && (x<600) && (y>310) && (y<601)) temp[i] = MAX_TEMP; } temp[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2; temp[DIM*700+100] = MIN_TEMP; temp[DIM*300+300] = MIN_TEMP; temp[DIM*200+700] = MIN_TEMP; for (int y=800; y<900; y++) { for (int x=400; x<500; x++) { temp[x+y*DIM] = MIN_TEMP; } } HANDLE_ERROR( cudaMemcpy( data.dev_constSrc, temp, imageSize, cudaMemcpyHostToDevice ) ); // initialize the input data for (int y=800; y<DIM; y++) { for (int x=0; x<200; x++) { temp[x+y*DIM] = MAX_TEMP; } } HANDLE_ERROR( cudaMemcpy( data.dev_inSrc, temp, imageSize, cudaMemcpyHostToDevice ) ); free( temp ); // a version without depending on OpenCL library #ifdef NO_OPENCL static int ticks = 1; anim_gpu( &data, ticks++ ) ; anim_exit(&data ); #else // not interested in graphics display bitmap.anim_and_exit( (void (*)(void*,int))anim_gpu, (void (*)(void*))anim_exit ); #endif /* @Abdullah */ std::ofstream ofs ("exec-time.txt", std::ofstream::out | std::ofstream::app); ofs << argv[0] << "," << argv[1] << "," << argv[2] << "," << data.totalTime/data.frames << std::endl; ofs.close(); return 0; }
6aa658c68ee6037a1ff25f3f6bd88436a19252fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "catch2/catch.hpp" #include "stencil/local_domain.cuh" TEST_CASE("case1", "[cuda]") { Dim3 arrSz(3, 4, 5); Dim3 origin(0, 0, 0); LocalDomain ld(arrSz, origin, 0); Radius radius = Radius::constant(0); radius.dir(1, 0, 0) = 2; radius.dir(-1, 0, 0) = 1; ld.set_radius(radius); ld.realize(); // +x send is size of -x side halo REQUIRE(ld.halo_extent(Dim3(1, 0, 0) * -1) == Dim3(1, 4, 5)); } TEST_CASE("curr!=next", "[cuda]") { Dim3 arrSz(3, 4, 5); Dim3 origin(0, 0, 0); LocalDomain ld(arrSz, origin, 0); auto h = ld.add_data<float>(); Radius radius = Radius::constant(0); radius.dir(1, 0, 0) = 2; radius.dir(-1, 0, 0) = 1; ld.set_radius(radius); ld.realize(); // cur and next pointers should be different REQUIRE(ld.get_curr(h).ptr != ld.get_next(h).ptr); } TEMPLATE_TEST_CASE("symmetric radius", "[cuda][template]", int, double) { const Dim3 sz(30, 40, 50); const Dim3 origin(0, 0, 0); const int gpu = 0; const size_t radius = 4; LocalDomain d0(sz, origin, gpu); d0.set_radius(radius); auto handle = d0.add_data<TestType>(); d0.realize(); PitchedPtr<TestType> p = d0.get_curr(handle); REQUIRE(p != PitchedPtr<TestType>()); SECTION("face position in halo") { bool isHalo = true; REQUIRE(Dim3(0, 4, 4) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x REQUIRE(Dim3(34, 4, 4) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(4, 0, 4) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(4, 44, 4) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(4, 4, 0) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(4, 4, 54) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face position in compute") { bool isHalo = false; REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face extent") { REQUIRE(Dim3(4, 40, 50) == d0.halo_extent(Dim3(-1, 0, 0))); // x face REQUIRE(Dim3(30, 4, 50) == d0.halo_extent(Dim3(0, -1, 0))); // y face REQUIRE(Dim3(30, 40, 4) == d0.halo_extent(Dim3(0, 0, -1))); // z face } SECTION("edge position in halo") { bool isHalo = true; REQUIRE(Dim3(0, 0, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(34, 0, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(0, 44, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(34, 44, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(0, 4, 0) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(34, 4, 0) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(0, 4, 54) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(34, 4, 54) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 0, 0) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 44, 0) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 0, 54) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 44, 54) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z REQUIRE(Dim3(0, 0, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(34, 0, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(0, 44, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(34, 44, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(0, 4, 0) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(34, 4, 0) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(0, 4, 54) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(34, 4, 54) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 0, 0) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 44, 0) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 0, 54) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 44, 54) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z } SECTION("edge position in compute") { bool isHalo = false; REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z } SECTION("edge extent") { REQUIRE(Dim3(4, 4, 50) == d0.halo_extent(Dim3(1, 1, 0))); // x y edge REQUIRE(Dim3(4, 40, 4) == d0.halo_extent(Dim3(1, 0, 1))); // x z edge REQUIRE(Dim3(30, 4, 4) == d0.halo_extent(Dim3(0, 1, 1))); // y z edge REQUIRE(Dim3(4, 4, 50) == d0.halo_extent(Dim3(1, 1, 0))); // x y edge REQUIRE(Dim3(4, 40, 4) == d0.halo_extent(Dim3(1, 0, 1))); // x z edge REQUIRE(Dim3(30, 4, 4) == d0.halo_extent(Dim3(0, 1, 1))); // y z edge } SECTION("corner position in halo") { const bool isHalo = true; REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, -1, -1), isHalo)); // -x -y -z REQUIRE(Dim3(34, 0, 0) == d0.halo_pos(Dim3(1, -1, -1), isHalo)); // +x -y -z REQUIRE(Dim3(0, 44, 0) == d0.halo_pos(Dim3(-1, 1, -1), isHalo)); // -x +y -z REQUIRE(Dim3(34, 44, 0) == d0.halo_pos(Dim3(1, 1, -1), isHalo)); // +x +y -z REQUIRE(Dim3(0, 0, 54) == d0.halo_pos(Dim3(-1, -1, 1), isHalo)); // -x -y +z REQUIRE(Dim3(34, 0, 54) == d0.halo_pos(Dim3(1, -1, 1), isHalo)); // +x -y +z REQUIRE(Dim3(0, 44, 54) == d0.halo_pos(Dim3(-1, 1, 1), isHalo)); // -x +y +z REQUIRE(Dim3(34, 44, 54) == d0.halo_pos(Dim3(1, 1, 1), isHalo)); // +x +y +z REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, -1, -1), isHalo)); // -x -y -z REQUIRE(Dim3(34, 0, 0) == d0.halo_pos(Dim3(1, -1, -1), isHalo)); // +x -y -z REQUIRE(Dim3(0, 44, 0) == d0.halo_pos(Dim3(-1, 1, -1), isHalo)); // -x +y -z REQUIRE(Dim3(34, 44, 0) == d0.halo_pos(Dim3(1, 1, -1), isHalo)); // +x +y -z REQUIRE(Dim3(0, 0, 54) == d0.halo_pos(Dim3(-1, -1, 1), isHalo)); // -x -y +z REQUIRE(Dim3(34, 0, 54) == d0.halo_pos(Dim3(1, -1, 1), isHalo)); // +x -y +z REQUIRE(Dim3(0, 44, 54) == d0.halo_pos(Dim3(-1, 1, 1), isHalo)); // -x +y +z REQUIRE(Dim3(34, 44, 54) == d0.halo_pos(Dim3(1, 1, 1), isHalo)); // +x +y +z } SECTION("corner position in compute") { REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, -1), false)); // -x -y -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, -1), false)); // +x -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, -1), false)); // -x +y -z REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, -1), false)); // +x +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, -1, 1), false)); // -x -y +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, -1, 1), false)); // +x -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(-1, 1, 1), false)); // -x +y +z REQUIRE(Dim3(30, 40, 50) == d0.halo_pos(Dim3(1, 1, 1), false)); // +x +y +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, -1), false)); // -x -y -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, -1), false)); // +x -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, -1), false)); // -x +y -z REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, -1), false)); // +x +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, -1, 1), false)); // -x -y +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, -1, 1), false)); // +x -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(-1, 1, 1), false)); // -x +y +z REQUIRE(Dim3(30, 40, 50) == d0.halo_pos(Dim3(1, 1, 1), false)); // +x +y +z } SECTION("corner extent") { REQUIRE(Dim3(4, 4, 4) == d0.halo_extent(Dim3(1, 1, 1))); } } TEMPLATE_TEST_CASE("x-leaning radius", "[cuda][template]", int, double) { const Dim3 sz(30, 40, 50); const Dim3 origin(0, 0, 0); const int gpu = 0; Radius radius = Radius::constant(0); radius.dir(1, 0, 0) = 3; // +x LocalDomain d0(sz, origin, gpu); d0.set_radius(radius); auto handle = d0.add_data<TestType>(); d0.realize(); PitchedPtr<TestType> p = d0.get_curr(handle); REQUIRE(p != PitchedPtr<TestType>()); SECTION("face position in halo") { bool isHalo = true; REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x REQUIRE(Dim3(30, 0, 0) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(0, 40, 0) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(0, 0, 50) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face position in compute") { bool isHalo = true; REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x // no interior halo region on this side since it would send to -x REQUIRE(Dim3(30, 0, 0) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(0, 40, 0) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(0, 0, 50) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face extent") { REQUIRE(Dim3(3, 40, 50) == d0.halo_extent(Dim3(1, 0, 0))); // +x face REQUIRE(Dim3(0, 40, 50) == d0.halo_extent(Dim3(-1, 0, 0))); // -x face REQUIRE(Dim3(30, 0, 50) == d0.halo_extent(Dim3(0, 1, 0))); // +y face REQUIRE(Dim3(30, 0, 50) == d0.halo_extent(Dim3(0, -1, 0))); // -y face REQUIRE(Dim3(30, 40, 0) == d0.halo_extent(Dim3(0, 0, 1))); // +z face REQUIRE(Dim3(30, 40, 0) == d0.halo_extent(Dim3(0, 0, -1))); // -z face } } template <typename T> __global__ void init_kernel(PitchedPtr<T> dst, //<! [out] pointer to beginning of dst allocation const Dim3 rawSz //<! [in] logical extent of the dst allocation ) { constexpr size_t radius = 1; const Dim3 domSz = rawSz - Dim3(2 * radius, 2 * radius, 2 * radius); const size_t gdz = gridDim.z; const size_t biz = blockIdx.z; const size_t bdz = blockDim.z; const size_t tiz = threadIdx.z; const size_t gdy = gridDim.y; const size_t biy = blockIdx.y; const size_t bdy = blockDim.y; const size_t tiy = threadIdx.y; const size_t gdx = gridDim.x; const size_t bix = blockIdx.x; const size_t bdx = blockDim.x; const size_t tix = threadIdx.x; // initialize the compute domain for (size_t z = biz * bdz + tiz; z < rawSz.z; z += gdz * bdz) { for (size_t y = biy * bdy + tiy; y < rawSz.y; y += gdy * bdy) { for (size_t x = bix * bdx + tix; x < rawSz.x; x += gdx * bdx) { if (z >= radius && x >= radius && y >= radius && z < rawSz.z - radius && y < rawSz.y - radius && x < rawSz.x - radius) { dst.at(x, y, z) = 1.0; } else { dst.at(x, y, z) = 0.0; } } } } } template <typename T> __global__ void stencil_kernel(PitchedPtr<T> dst, //<! [out] beginning of dst allocation const PitchedPtr<T> src, //<! [in] beginning of src allooation const Dim3 rawSz //<! [in] 3D size of the dst allocations ) { constexpr size_t radius = 1; const Dim3 domSz = rawSz - Dim3(2 * radius, 2 * radius, 2 * radius); // assume arr is the beginning of the allocation, not the beginning of the compute domain for (int64_t z = blockIdx.z * blockDim.z + threadIdx.z; z < domSz.z; z += gridDim.z * blockDim.z) { for (int64_t y = blockIdx.y * blockDim.y + threadIdx.y; y < domSz.y; y += gridDim.y * blockDim.y) { for (int64_t x = blockIdx.x * blockDim.x + threadIdx.x; x < domSz.x; x += gridDim.x * blockDim.x) { T acc = 0; for (int dz = -1; dz <= 1; dz += 1) { for (int dy = -1; dy <= 1; dy += 1) { for (int dx = -1; dx <= 1; dx += 1) { int64_t srcX = x + dx; int64_t srcY = y + dy; int64_t srcZ = z + dz; T inc = src.at(srcX + radius, srcY + radius, srcZ + radius); acc += inc; } } } dst.at(x + radius, y + radius, z + radius) = acc; } } } } TEMPLATE_TEST_CASE("local domain", "[cuda][template]", int, double) { std::cerr << "TEST: \"local domain\"\n"; // create a domain INFO("ctor"); const Dim3 origin(0, 0, 0); LocalDomain ld(Dim3(10, 10, 10), origin, /*gpu*/ 0); ld.set_radius(1); auto h = ld.add_data<TestType>(); INFO("realize"); ld.realize(); SECTION("interior_to_host") { for (size_t qi = 0; qi < ld.num_data(); ++qi) { auto vec = ld.interior_to_host(qi); } } SECTION("quantity_to_host") { for (size_t qi = 0; qi < ld.num_data(); ++qi) { auto vec = ld.quantity_to_host(qi); REQUIRE(vec.size() == 12*12*12*sizeof(TestType)); } } } TEMPLATE_TEST_CASE("local domain stencil", "[cuda][template]", int) { std::cerr << "TEST: \"local domain stencil*\"\n"; // TODO: why does this test fail without this // test passes if run alone CUDA_RUNTIME(hipDeviceReset()); // create a domain INFO("ctor"); const Dim3 origin(0, 0, 0); LocalDomain ld(Dim3(10, 10, 10), origin, /*gpu*/ 0); ld.set_radius(1); auto h = ld.add_data<TestType>(); INFO("realize"); ld.realize(); // initialize the domain INFO("init"); dim3 dimGrid(2, 16, 32); dim3 dimBlock(1, 1, 1); hipLaunchKernelGGL(( init_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ld.get_curr(h), ld.raw_size()); CUDA_RUNTIME(hipDeviceSynchronize()); // check the initialization INFO("d2h"); auto vec = ld.quantity_to_host(0); REQUIRE(vec.size() == 12 * 12 * 12 * sizeof(TestType)); TestType *host = reinterpret_cast<TestType *>(vec.data()); INFO("check initialization"); #define at_host(_x, _y, _z) host[(_z + 1) * 12 * 12 + (_y + 1) * 12 + (_x + 1)] REQUIRE(at_host(-1, -1, -1) == 0); REQUIRE(at_host(0, 0, 0) == 1); REQUIRE(at_host(0, 0, 9) == 1); REQUIRE(at_host(0, 9, 0) == 1); REQUIRE(at_host(0, 9, 9) == 1); REQUIRE(at_host(9, 0, 0) == 1); REQUIRE(at_host(9, 0, 9) == 1); REQUIRE(at_host(9, 9, 0) == 1); REQUIRE(at_host(9, 9, 9) == 1); REQUIRE(at_host(10, 10, 10) == 0); #undef at_host INFO("apply stencil"); hipLaunchKernelGGL(( stencil_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ld.get_next(h), ld.get_curr(h), ld.raw_size()); CUDA_RUNTIME(hipDeviceSynchronize()); /* swap so we can copy the stencil results to the host */ INFO("swap"); ld.swap(); INFO("d2h"); vec.clear(); vec = ld.quantity_to_host(0); REQUIRE(vec.size() == 12 * 12 * 12 * sizeof(TestType)); host = reinterpret_cast<TestType *>(vec.data()); // // check the results // CUDA_RUNTIME( // hipMemcpy(host, ld.get_next(h).ptr, ld.raw_size().flatten() * sizeof(TestType), hipMemcpyDeviceToHost)); INFO("check results"); #define at_host(_x, _y, _z) host[(_z + 1) * 12 * 12 + (_y + 1) * 12 + (_x + 1)] INFO("halo unchanged"); REQUIRE(0 == at_host(-1, -1, -1)); REQUIRE(0 == at_host(-1, -1, 0)); REQUIRE(0 == at_host(-1, 0, 0)); REQUIRE(0 == at_host(-1, 6, 3)); REQUIRE(0 == at_host(10, 10, 10)); INFO("corners have 8 nbrs"); #if 0 for (int y = -1; y < 11; ++y) { for (int x = -1; x < 11; ++x) { std::cerr << at_host(x, y, 1) << " "; } std::cerr << "\n"; } #endif REQUIRE(at_host(0, 0, 0) == 8); REQUIRE(at_host(0, 0, 9) == 8); REQUIRE(at_host(0, 9, 0) == 8); REQUIRE(at_host(0, 9, 9) == 8); REQUIRE(at_host(9, 0, 0) == 8); REQUIRE(at_host(9, 0, 9) == 8); REQUIRE(at_host(9, 9, 0) == 8); REQUIRE(at_host(9, 9, 9) == 8); INFO("edges have 12 nbrs"); REQUIRE(at_host(0, 0, 4) == 12); INFO("faces have 18 nbrs"); REQUIRE(at_host(0, 4, 4) == 18); INFO("center has 27 nbrs"); REQUIRE(at_host(1, 1, 1) == 27); #undef at_host }
6aa658c68ee6037a1ff25f3f6bd88436a19252fe.cu
#include "catch2/catch.hpp" #include "stencil/local_domain.cuh" TEST_CASE("case1", "[cuda]") { Dim3 arrSz(3, 4, 5); Dim3 origin(0, 0, 0); LocalDomain ld(arrSz, origin, 0); Radius radius = Radius::constant(0); radius.dir(1, 0, 0) = 2; radius.dir(-1, 0, 0) = 1; ld.set_radius(radius); ld.realize(); // +x send is size of -x side halo REQUIRE(ld.halo_extent(Dim3(1, 0, 0) * -1) == Dim3(1, 4, 5)); } TEST_CASE("curr!=next", "[cuda]") { Dim3 arrSz(3, 4, 5); Dim3 origin(0, 0, 0); LocalDomain ld(arrSz, origin, 0); auto h = ld.add_data<float>(); Radius radius = Radius::constant(0); radius.dir(1, 0, 0) = 2; radius.dir(-1, 0, 0) = 1; ld.set_radius(radius); ld.realize(); // cur and next pointers should be different REQUIRE(ld.get_curr(h).ptr != ld.get_next(h).ptr); } TEMPLATE_TEST_CASE("symmetric radius", "[cuda][template]", int, double) { const Dim3 sz(30, 40, 50); const Dim3 origin(0, 0, 0); const int gpu = 0; const size_t radius = 4; LocalDomain d0(sz, origin, gpu); d0.set_radius(radius); auto handle = d0.add_data<TestType>(); d0.realize(); PitchedPtr<TestType> p = d0.get_curr(handle); REQUIRE(p != PitchedPtr<TestType>()); SECTION("face position in halo") { bool isHalo = true; REQUIRE(Dim3(0, 4, 4) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x REQUIRE(Dim3(34, 4, 4) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(4, 0, 4) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(4, 44, 4) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(4, 4, 0) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(4, 4, 54) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face position in compute") { bool isHalo = false; REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face extent") { REQUIRE(Dim3(4, 40, 50) == d0.halo_extent(Dim3(-1, 0, 0))); // x face REQUIRE(Dim3(30, 4, 50) == d0.halo_extent(Dim3(0, -1, 0))); // y face REQUIRE(Dim3(30, 40, 4) == d0.halo_extent(Dim3(0, 0, -1))); // z face } SECTION("edge position in halo") { bool isHalo = true; REQUIRE(Dim3(0, 0, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(34, 0, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(0, 44, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(34, 44, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(0, 4, 0) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(34, 4, 0) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(0, 4, 54) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(34, 4, 54) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 0, 0) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 44, 0) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 0, 54) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 44, 54) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z REQUIRE(Dim3(0, 0, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(34, 0, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(0, 44, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(34, 44, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(0, 4, 0) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(34, 4, 0) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(0, 4, 54) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(34, 4, 54) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 0, 0) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 44, 0) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 0, 54) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 44, 54) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z } SECTION("edge position in compute") { bool isHalo = false; REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, 0), isHalo)); // -x -y REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, 0), isHalo)); // +x -y REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, 0), isHalo)); // -x +y REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, 0), isHalo)); // +x +y REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, 0, -1), isHalo)); // -x -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, 0, -1), isHalo)); // +x -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, 0, 1), isHalo)); // -x +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, 0, 1), isHalo)); // +x +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(0, -1, -1), isHalo)); // -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(0, 1, -1), isHalo)); // +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(0, -1, 1), isHalo)); // -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(0, 1, 1), isHalo)); // +y +z } SECTION("edge extent") { REQUIRE(Dim3(4, 4, 50) == d0.halo_extent(Dim3(1, 1, 0))); // x y edge REQUIRE(Dim3(4, 40, 4) == d0.halo_extent(Dim3(1, 0, 1))); // x z edge REQUIRE(Dim3(30, 4, 4) == d0.halo_extent(Dim3(0, 1, 1))); // y z edge REQUIRE(Dim3(4, 4, 50) == d0.halo_extent(Dim3(1, 1, 0))); // x y edge REQUIRE(Dim3(4, 40, 4) == d0.halo_extent(Dim3(1, 0, 1))); // x z edge REQUIRE(Dim3(30, 4, 4) == d0.halo_extent(Dim3(0, 1, 1))); // y z edge } SECTION("corner position in halo") { const bool isHalo = true; REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, -1, -1), isHalo)); // -x -y -z REQUIRE(Dim3(34, 0, 0) == d0.halo_pos(Dim3(1, -1, -1), isHalo)); // +x -y -z REQUIRE(Dim3(0, 44, 0) == d0.halo_pos(Dim3(-1, 1, -1), isHalo)); // -x +y -z REQUIRE(Dim3(34, 44, 0) == d0.halo_pos(Dim3(1, 1, -1), isHalo)); // +x +y -z REQUIRE(Dim3(0, 0, 54) == d0.halo_pos(Dim3(-1, -1, 1), isHalo)); // -x -y +z REQUIRE(Dim3(34, 0, 54) == d0.halo_pos(Dim3(1, -1, 1), isHalo)); // +x -y +z REQUIRE(Dim3(0, 44, 54) == d0.halo_pos(Dim3(-1, 1, 1), isHalo)); // -x +y +z REQUIRE(Dim3(34, 44, 54) == d0.halo_pos(Dim3(1, 1, 1), isHalo)); // +x +y +z REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, -1, -1), isHalo)); // -x -y -z REQUIRE(Dim3(34, 0, 0) == d0.halo_pos(Dim3(1, -1, -1), isHalo)); // +x -y -z REQUIRE(Dim3(0, 44, 0) == d0.halo_pos(Dim3(-1, 1, -1), isHalo)); // -x +y -z REQUIRE(Dim3(34, 44, 0) == d0.halo_pos(Dim3(1, 1, -1), isHalo)); // +x +y -z REQUIRE(Dim3(0, 0, 54) == d0.halo_pos(Dim3(-1, -1, 1), isHalo)); // -x -y +z REQUIRE(Dim3(34, 0, 54) == d0.halo_pos(Dim3(1, -1, 1), isHalo)); // +x -y +z REQUIRE(Dim3(0, 44, 54) == d0.halo_pos(Dim3(-1, 1, 1), isHalo)); // -x +y +z REQUIRE(Dim3(34, 44, 54) == d0.halo_pos(Dim3(1, 1, 1), isHalo)); // +x +y +z } SECTION("corner position in compute") { REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, -1), false)); // -x -y -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, -1), false)); // +x -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, -1), false)); // -x +y -z REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, -1), false)); // +x +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, -1, 1), false)); // -x -y +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, -1, 1), false)); // +x -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(-1, 1, 1), false)); // -x +y +z REQUIRE(Dim3(30, 40, 50) == d0.halo_pos(Dim3(1, 1, 1), false)); // +x +y +z REQUIRE(Dim3(4, 4, 4) == d0.halo_pos(Dim3(-1, -1, -1), false)); // -x -y -z REQUIRE(Dim3(30, 4, 4) == d0.halo_pos(Dim3(1, -1, -1), false)); // +x -y -z REQUIRE(Dim3(4, 40, 4) == d0.halo_pos(Dim3(-1, 1, -1), false)); // -x +y -z REQUIRE(Dim3(30, 40, 4) == d0.halo_pos(Dim3(1, 1, -1), false)); // +x +y -z REQUIRE(Dim3(4, 4, 50) == d0.halo_pos(Dim3(-1, -1, 1), false)); // -x -y +z REQUIRE(Dim3(30, 4, 50) == d0.halo_pos(Dim3(1, -1, 1), false)); // +x -y +z REQUIRE(Dim3(4, 40, 50) == d0.halo_pos(Dim3(-1, 1, 1), false)); // -x +y +z REQUIRE(Dim3(30, 40, 50) == d0.halo_pos(Dim3(1, 1, 1), false)); // +x +y +z } SECTION("corner extent") { REQUIRE(Dim3(4, 4, 4) == d0.halo_extent(Dim3(1, 1, 1))); } } TEMPLATE_TEST_CASE("x-leaning radius", "[cuda][template]", int, double) { const Dim3 sz(30, 40, 50); const Dim3 origin(0, 0, 0); const int gpu = 0; Radius radius = Radius::constant(0); radius.dir(1, 0, 0) = 3; // +x LocalDomain d0(sz, origin, gpu); d0.set_radius(radius); auto handle = d0.add_data<TestType>(); d0.realize(); PitchedPtr<TestType> p = d0.get_curr(handle); REQUIRE(p != PitchedPtr<TestType>()); SECTION("face position in halo") { bool isHalo = true; REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x REQUIRE(Dim3(30, 0, 0) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(0, 40, 0) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(0, 0, 50) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face position in compute") { bool isHalo = true; REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(-1, 0, 0), isHalo)); // -x // no interior halo region on this side since it would send to -x REQUIRE(Dim3(30, 0, 0) == d0.halo_pos(Dim3(1, 0, 0), isHalo)); // +x REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, -1, 0), isHalo)); // -y REQUIRE(Dim3(0, 40, 0) == d0.halo_pos(Dim3(0, 1, 0), isHalo)); // +y REQUIRE(Dim3(0, 0, 0) == d0.halo_pos(Dim3(0, 0, -1), isHalo)); // -z REQUIRE(Dim3(0, 0, 50) == d0.halo_pos(Dim3(0, 0, 1), isHalo)); // +z } SECTION("face extent") { REQUIRE(Dim3(3, 40, 50) == d0.halo_extent(Dim3(1, 0, 0))); // +x face REQUIRE(Dim3(0, 40, 50) == d0.halo_extent(Dim3(-1, 0, 0))); // -x face REQUIRE(Dim3(30, 0, 50) == d0.halo_extent(Dim3(0, 1, 0))); // +y face REQUIRE(Dim3(30, 0, 50) == d0.halo_extent(Dim3(0, -1, 0))); // -y face REQUIRE(Dim3(30, 40, 0) == d0.halo_extent(Dim3(0, 0, 1))); // +z face REQUIRE(Dim3(30, 40, 0) == d0.halo_extent(Dim3(0, 0, -1))); // -z face } } template <typename T> __global__ void init_kernel(PitchedPtr<T> dst, //<! [out] pointer to beginning of dst allocation const Dim3 rawSz //<! [in] logical extent of the dst allocation ) { constexpr size_t radius = 1; const Dim3 domSz = rawSz - Dim3(2 * radius, 2 * radius, 2 * radius); const size_t gdz = gridDim.z; const size_t biz = blockIdx.z; const size_t bdz = blockDim.z; const size_t tiz = threadIdx.z; const size_t gdy = gridDim.y; const size_t biy = blockIdx.y; const size_t bdy = blockDim.y; const size_t tiy = threadIdx.y; const size_t gdx = gridDim.x; const size_t bix = blockIdx.x; const size_t bdx = blockDim.x; const size_t tix = threadIdx.x; // initialize the compute domain for (size_t z = biz * bdz + tiz; z < rawSz.z; z += gdz * bdz) { for (size_t y = biy * bdy + tiy; y < rawSz.y; y += gdy * bdy) { for (size_t x = bix * bdx + tix; x < rawSz.x; x += gdx * bdx) { if (z >= radius && x >= radius && y >= radius && z < rawSz.z - radius && y < rawSz.y - radius && x < rawSz.x - radius) { dst.at(x, y, z) = 1.0; } else { dst.at(x, y, z) = 0.0; } } } } } template <typename T> __global__ void stencil_kernel(PitchedPtr<T> dst, //<! [out] beginning of dst allocation const PitchedPtr<T> src, //<! [in] beginning of src allooation const Dim3 rawSz //<! [in] 3D size of the dst allocations ) { constexpr size_t radius = 1; const Dim3 domSz = rawSz - Dim3(2 * radius, 2 * radius, 2 * radius); // assume arr is the beginning of the allocation, not the beginning of the compute domain for (int64_t z = blockIdx.z * blockDim.z + threadIdx.z; z < domSz.z; z += gridDim.z * blockDim.z) { for (int64_t y = blockIdx.y * blockDim.y + threadIdx.y; y < domSz.y; y += gridDim.y * blockDim.y) { for (int64_t x = blockIdx.x * blockDim.x + threadIdx.x; x < domSz.x; x += gridDim.x * blockDim.x) { T acc = 0; for (int dz = -1; dz <= 1; dz += 1) { for (int dy = -1; dy <= 1; dy += 1) { for (int dx = -1; dx <= 1; dx += 1) { int64_t srcX = x + dx; int64_t srcY = y + dy; int64_t srcZ = z + dz; T inc = src.at(srcX + radius, srcY + radius, srcZ + radius); acc += inc; } } } dst.at(x + radius, y + radius, z + radius) = acc; } } } } TEMPLATE_TEST_CASE("local domain", "[cuda][template]", int, double) { std::cerr << "TEST: \"local domain\"\n"; // create a domain INFO("ctor"); const Dim3 origin(0, 0, 0); LocalDomain ld(Dim3(10, 10, 10), origin, /*gpu*/ 0); ld.set_radius(1); auto h = ld.add_data<TestType>(); INFO("realize"); ld.realize(); SECTION("interior_to_host") { for (size_t qi = 0; qi < ld.num_data(); ++qi) { auto vec = ld.interior_to_host(qi); } } SECTION("quantity_to_host") { for (size_t qi = 0; qi < ld.num_data(); ++qi) { auto vec = ld.quantity_to_host(qi); REQUIRE(vec.size() == 12*12*12*sizeof(TestType)); } } } TEMPLATE_TEST_CASE("local domain stencil", "[cuda][template]", int) { std::cerr << "TEST: \"local domain stencil*\"\n"; // TODO: why does this test fail without this // test passes if run alone CUDA_RUNTIME(cudaDeviceReset()); // create a domain INFO("ctor"); const Dim3 origin(0, 0, 0); LocalDomain ld(Dim3(10, 10, 10), origin, /*gpu*/ 0); ld.set_radius(1); auto h = ld.add_data<TestType>(); INFO("realize"); ld.realize(); // initialize the domain INFO("init"); dim3 dimGrid(2, 16, 32); dim3 dimBlock(1, 1, 1); init_kernel<<<dimGrid, dimBlock>>>(ld.get_curr(h), ld.raw_size()); CUDA_RUNTIME(cudaDeviceSynchronize()); // check the initialization INFO("d2h"); auto vec = ld.quantity_to_host(0); REQUIRE(vec.size() == 12 * 12 * 12 * sizeof(TestType)); TestType *host = reinterpret_cast<TestType *>(vec.data()); INFO("check initialization"); #define at_host(_x, _y, _z) host[(_z + 1) * 12 * 12 + (_y + 1) * 12 + (_x + 1)] REQUIRE(at_host(-1, -1, -1) == 0); REQUIRE(at_host(0, 0, 0) == 1); REQUIRE(at_host(0, 0, 9) == 1); REQUIRE(at_host(0, 9, 0) == 1); REQUIRE(at_host(0, 9, 9) == 1); REQUIRE(at_host(9, 0, 0) == 1); REQUIRE(at_host(9, 0, 9) == 1); REQUIRE(at_host(9, 9, 0) == 1); REQUIRE(at_host(9, 9, 9) == 1); REQUIRE(at_host(10, 10, 10) == 0); #undef at_host INFO("apply stencil"); stencil_kernel<<<dimGrid, dimBlock>>>(ld.get_next(h), ld.get_curr(h), ld.raw_size()); CUDA_RUNTIME(cudaDeviceSynchronize()); /* swap so we can copy the stencil results to the host */ INFO("swap"); ld.swap(); INFO("d2h"); vec.clear(); vec = ld.quantity_to_host(0); REQUIRE(vec.size() == 12 * 12 * 12 * sizeof(TestType)); host = reinterpret_cast<TestType *>(vec.data()); // // check the results // CUDA_RUNTIME( // cudaMemcpy(host, ld.get_next(h).ptr, ld.raw_size().flatten() * sizeof(TestType), cudaMemcpyDeviceToHost)); INFO("check results"); #define at_host(_x, _y, _z) host[(_z + 1) * 12 * 12 + (_y + 1) * 12 + (_x + 1)] INFO("halo unchanged"); REQUIRE(0 == at_host(-1, -1, -1)); REQUIRE(0 == at_host(-1, -1, 0)); REQUIRE(0 == at_host(-1, 0, 0)); REQUIRE(0 == at_host(-1, 6, 3)); REQUIRE(0 == at_host(10, 10, 10)); INFO("corners have 8 nbrs"); #if 0 for (int y = -1; y < 11; ++y) { for (int x = -1; x < 11; ++x) { std::cerr << at_host(x, y, 1) << " "; } std::cerr << "\n"; } #endif REQUIRE(at_host(0, 0, 0) == 8); REQUIRE(at_host(0, 0, 9) == 8); REQUIRE(at_host(0, 9, 0) == 8); REQUIRE(at_host(0, 9, 9) == 8); REQUIRE(at_host(9, 0, 0) == 8); REQUIRE(at_host(9, 0, 9) == 8); REQUIRE(at_host(9, 9, 0) == 8); REQUIRE(at_host(9, 9, 9) == 8); INFO("edges have 12 nbrs"); REQUIRE(at_host(0, 0, 4) == 12); INFO("faces have 18 nbrs"); REQUIRE(at_host(0, 4, 4) == 18); INFO("center has 27 nbrs"); REQUIRE(at_host(1, 1, 1) == 27); #undef at_host }
64dfa5523b74b309918c531bf3e88219636bbf46.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
64dfa5523b74b309918c531bf3e88219636bbf46.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
d8a9518a1113dd442389de1a162506f017a3b1d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define BLOCK_SIZE 32 #define N 3200 __global__ void matMult(float* a, float* b, int n, float* c) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.0f; int ia = n * BLOCK_SIZE * by + n * ty; int ib = BLOCK_SIZE * bx + tx; for (int k = 0; k < n; k++) sum += a[ia + k] * b[ib + k * n]; int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; c[ic + n * ty + tx] = sum; } int main(int argc, char* argv[]) { printf("START\n"); int numBytes = N * N * sizeof(float); float* a = new float[N * N]; float* b = new float[N * N]; float* c = new float[N * N]; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { int k = N * i + j; a[k] = 1.0f; b[k] = 1.0f; } float* adev = NULL; float* bdev = NULL; float* cdev = NULL; hipMalloc((void**)&adev, numBytes); hipMalloc((void**)&bdev, numBytes); hipMalloc((void**)&cdev, numBytes); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N / threads.x, N / threads.y); hipMemcpy(adev, a, numBytes, hipMemcpyHostToDevice); hipMemcpy(bdev, b, numBytes, hipMemcpyHostToDevice); matMult << <blocks, threads >> > (adev, bdev, N, cdev); hipMemcpy(c, cdev, numBytes, hipMemcpyDeviceToHost); printf("END %d", c[1]); hipFree(adev); hipFree(bdev); hipFree(cdev); delete a; delete b; delete c; return 0; }
d8a9518a1113dd442389de1a162506f017a3b1d8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define BLOCK_SIZE 32 #define N 3200 __global__ void matMult(float* a, float* b, int n, float* c) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.0f; int ia = n * BLOCK_SIZE * by + n * ty; int ib = BLOCK_SIZE * bx + tx; for (int k = 0; k < n; k++) sum += a[ia + k] * b[ib + k * n]; int ic = n * BLOCK_SIZE * by + BLOCK_SIZE * bx; c[ic + n * ty + tx] = sum; } int main(int argc, char* argv[]) { printf("START\n"); int numBytes = N * N * sizeof(float); float* a = new float[N * N]; float* b = new float[N * N]; float* c = new float[N * N]; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { int k = N * i + j; a[k] = 1.0f; b[k] = 1.0f; } float* adev = NULL; float* bdev = NULL; float* cdev = NULL; cudaMalloc((void**)&adev, numBytes); cudaMalloc((void**)&bdev, numBytes); cudaMalloc((void**)&cdev, numBytes); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N / threads.x, N / threads.y); cudaMemcpy(adev, a, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(bdev, b, numBytes, cudaMemcpyHostToDevice); matMult << <blocks, threads >> > (adev, bdev, N, cdev); cudaMemcpy(c, cdev, numBytes, cudaMemcpyDeviceToHost); printf("END %d", c[1]); cudaFree(adev); cudaFree(bdev); cudaFree(cdev); delete a; delete b; delete c; return 0; }
cf7d80afe109387756ab0b065031f104d1467ba2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sortMatrix ( const int nd, const float *a, float *sm ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int ij = i + j * nd; if ( i < nd && j < nd ) { sm[ij] = ( a[i] > a[j] ); } }
cf7d80afe109387756ab0b065031f104d1467ba2.cu
#include "includes.h" __global__ void sortMatrix ( const int nd, const float *a, float *sm ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int ij = i + j * nd; if ( i < nd && j < nd ) { sm[ij] = ( a[i] > a[j] ); } }
2f595a226e1cac681b582da0a517df920e627117.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <iostream> #include <vector> #include <algorithm> #include <cassert> #include <numeric> using std::accumulate; using std::generate; using std::cout; using std::vector; #define SHMEM_SIZE 256 // For last iteration ( save useless work) // Use volatile to prevent caching in registers (compiler optimization) // No __syncthreads() necessary __device__ void warpReduce(volatile int * shmem_ptr, int t) { shmem_ptr[t] += shmem_ptr[t + 32]; shmem_ptr[t] += shmem_ptr[t + 16]; shmem_ptr[t] += shmem_ptr[t + 8]; shmem_ptr[t] += shmem_ptr[t + 4]; shmem_ptr[t] += shmem_ptr[t + 2]; shmem_ptr[t] += shmem_ptr[t + 1]; } __global__ void sum_reduction(int *v, int *v_r) { // Allocate shared memory __shared__ int partial_sum[SHMEM_SIZE]; // Calculate thread ID int tid = blockIdx.x * blockDim.x + threadIdx.x; // Load elements AND do first add of reduction // Vector now 2x as long as number of threads, so scale i int i = blockIdx.x * ( 2 * blockDim.x) + threadIdx.x; // Store first partial result instead of just the elements partial_sum[threadIdx.x] = v[i] + v[i + blockDim.x]; __syncthreads(); // Iterate of logbase 2 the block dimension // Stop early (call device function instead) for(int s = blockDim.x/2; s > 32 ; s>>=1 ) { if(threadIdx.x < s) // each thread does work unless it goes off the block { partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s ]; } __syncthreads(); } if(threadIdx.x < 32) { warpReduce(partial_sum, threadIdx.x); } // Let the thread 0 for this block write it's results to main memory // Result indexed by this block if(threadIdx.x == 0 ) { v_r[blockIdx.x] = partial_sum[0]; } } int main() { // vector size int N = 1<<16; size_t bytes = N * sizeof(int); // host data vector<int> h_v(N); vector<int> h_v_r(N); // initialize vector generate(begin(h_v), end(h_v), [](){return rand() % 10; }); // device memory int *d_v, *d_v_r; hipMalloc(&d_v, bytes); hipMalloc(&d_v_r, bytes); // copy from host ( CPU ) to device ( GPU ) hipMemcpy(d_v, h_v.data(), bytes, hipMemcpyHostToDevice); // Thread block size const int TB_SIZE = 256; // The Grid size int GRID_SIZE = N/TB_SIZE/2; // call the kernels hipLaunchKernelGGL(( sum_reduction), dim3(GRID_SIZE), dim3(TB_SIZE), 0, 0, d_v, d_v_r); // first convert the 65536 vector into a 256 sized vector of partial sums hipLaunchKernelGGL(( sum_reduction), dim3(1), dim3(TB_SIZE), 0, 0, d_v_r, d_v_r); // use the 256 sized vector of partial sums to calculate the final sum hipMemcpy(h_v_r.data(), d_v_r, bytes, hipMemcpyDeviceToHost); // check the result assert(h_v_r[0] == std::accumulate(begin(h_v), end(h_v), 0)); cout<<"COMPLETED SUCCESSFULLY\n"; return 0; }
2f595a226e1cac681b582da0a517df920e627117.cu
#include <cstdlib> #include <iostream> #include <vector> #include <algorithm> #include <cassert> #include <numeric> using std::accumulate; using std::generate; using std::cout; using std::vector; #define SHMEM_SIZE 256 // For last iteration ( save useless work) // Use volatile to prevent caching in registers (compiler optimization) // No __syncthreads() necessary __device__ void warpReduce(volatile int * shmem_ptr, int t) { shmem_ptr[t] += shmem_ptr[t + 32]; shmem_ptr[t] += shmem_ptr[t + 16]; shmem_ptr[t] += shmem_ptr[t + 8]; shmem_ptr[t] += shmem_ptr[t + 4]; shmem_ptr[t] += shmem_ptr[t + 2]; shmem_ptr[t] += shmem_ptr[t + 1]; } __global__ void sum_reduction(int *v, int *v_r) { // Allocate shared memory __shared__ int partial_sum[SHMEM_SIZE]; // Calculate thread ID int tid = blockIdx.x * blockDim.x + threadIdx.x; // Load elements AND do first add of reduction // Vector now 2x as long as number of threads, so scale i int i = blockIdx.x * ( 2 * blockDim.x) + threadIdx.x; // Store first partial result instead of just the elements partial_sum[threadIdx.x] = v[i] + v[i + blockDim.x]; __syncthreads(); // Iterate of logbase 2 the block dimension // Stop early (call device function instead) for(int s = blockDim.x/2; s > 32 ; s>>=1 ) { if(threadIdx.x < s) // each thread does work unless it goes off the block { partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s ]; } __syncthreads(); } if(threadIdx.x < 32) { warpReduce(partial_sum, threadIdx.x); } // Let the thread 0 for this block write it's results to main memory // Result indexed by this block if(threadIdx.x == 0 ) { v_r[blockIdx.x] = partial_sum[0]; } } int main() { // vector size int N = 1<<16; size_t bytes = N * sizeof(int); // host data vector<int> h_v(N); vector<int> h_v_r(N); // initialize vector generate(begin(h_v), end(h_v), [](){return rand() % 10; }); // device memory int *d_v, *d_v_r; cudaMalloc(&d_v, bytes); cudaMalloc(&d_v_r, bytes); // copy from host ( CPU ) to device ( GPU ) cudaMemcpy(d_v, h_v.data(), bytes, cudaMemcpyHostToDevice); // Thread block size const int TB_SIZE = 256; // The Grid size int GRID_SIZE = N/TB_SIZE/2; // call the kernels sum_reduction<<<GRID_SIZE, TB_SIZE>>>(d_v, d_v_r); // first convert the 65536 vector into a 256 sized vector of partial sums sum_reduction<<<1, TB_SIZE>>>(d_v_r, d_v_r); // use the 256 sized vector of partial sums to calculate the final sum cudaMemcpy(h_v_r.data(), d_v_r, bytes, cudaMemcpyDeviceToHost); // check the result assert(h_v_r[0] == std::accumulate(begin(h_v), end(h_v), 0)); cout<<"COMPLETED SUCCESSFULLY\n"; return 0; }
0b66f7d63892c8d65fd11a9251b84e0488021ea1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> void MatrixAddC(float* A, float* B, float* S, int Width, int Height, int offset) { int col = 0; int row = 0; int DestIndex = 0; int N = Width * Height; for (col = 0; col < Width; col++) { for (row = 0; row < Height; row++) { DestIndex = col * Width + row; S[DestIndex] = A[DestIndex + offset] + B[DestIndex + offset]; } } } __global__ void MatrixAddGlobalMem(float* A, float* B, float* S, int Width, int Height, int offset) { int tid, tx, ty, N; tx = threadIdx.x + blockIdx.x * blockDim.x; ty = threadIdx.y + blockIdx.y * blockDim.y; tid = Width * ty + tx; N = Width * Height; int k = tid + offset; if (k < N) S[tid] = A[k] + B[k]; } int main() { int nx = 1600; int ny = 1600; int dimx = 32; int dimy = 32; int offset = 0; dim3 block(dimx, dimy); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); const int MatrixSize = nx * ny; const int BufferSize = MatrixSize * sizeof(float); float* A; float* B; float* Sum; float* S_C; // Host memory allocation A = (float*)malloc(BufferSize); B = (float*)malloc(BufferSize); Sum = (float*)malloc(BufferSize); S_C = (float*)malloc(BufferSize); // Data input for (int i = 0; i < MatrixSize; i++) { A[i] = i; B[i] = i; Sum[i] = 0; } float* dev_A; float* dev_B; float* dev_S; // Device memory allocation hipMalloc((float**)&dev_A, BufferSize); hipMalloc((float**)&dev_B, BufferSize); hipMalloc((float**)&dev_S, BufferSize); // Copy host to device hipMemcpy(dev_A, A, BufferSize, hipMemcpyHostToDevice); hipMemcpy(dev_B, B, BufferSize, hipMemcpyHostToDevice); // Kernel function hipLaunchKernelGGL(( MatrixAddGlobalMem) , dim3(grid), dim3(block), 0, 0, dev_A, dev_B, dev_S, nx, ny, offset); // Copy result from Device to Host hipMemcpy(Sum, dev_S, BufferSize, hipMemcpyDeviceToHost); MatrixAddC(A, B, S_C, nx, ny, offset); bool ResultFlag = true; for (int i = 0; i < MatrixSize; i++) { if (Sum[i] != S_C[i]) { ResultFlag = false; printf("Error Matrix Add at element %d\n", i); break; } } if (ResultFlag == true) printf("Matrix Add is OK\n"); else printf("Error Matrix Add\n"); // Free memory hipFree(dev_A); hipFree(dev_B); hipFree(dev_S); free(A); free(B); free(Sum); free(S_C); }
0b66f7d63892c8d65fd11a9251b84e0488021ea1.cu
#include <stdio.h> #include <cuda.h> void MatrixAddC(float* A, float* B, float* S, int Width, int Height, int offset) { int col = 0; int row = 0; int DestIndex = 0; int N = Width * Height; for (col = 0; col < Width; col++) { for (row = 0; row < Height; row++) { DestIndex = col * Width + row; S[DestIndex] = A[DestIndex + offset] + B[DestIndex + offset]; } } } __global__ void MatrixAddGlobalMem(float* A, float* B, float* S, int Width, int Height, int offset) { int tid, tx, ty, N; tx = threadIdx.x + blockIdx.x * blockDim.x; ty = threadIdx.y + blockIdx.y * blockDim.y; tid = Width * ty + tx; N = Width * Height; int k = tid + offset; if (k < N) S[tid] = A[k] + B[k]; } int main() { int nx = 1600; int ny = 1600; int dimx = 32; int dimy = 32; int offset = 0; dim3 block(dimx, dimy); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); const int MatrixSize = nx * ny; const int BufferSize = MatrixSize * sizeof(float); float* A; float* B; float* Sum; float* S_C; // Host memory allocation A = (float*)malloc(BufferSize); B = (float*)malloc(BufferSize); Sum = (float*)malloc(BufferSize); S_C = (float*)malloc(BufferSize); // Data input for (int i = 0; i < MatrixSize; i++) { A[i] = i; B[i] = i; Sum[i] = 0; } float* dev_A; float* dev_B; float* dev_S; // Device memory allocation cudaMalloc((float**)&dev_A, BufferSize); cudaMalloc((float**)&dev_B, BufferSize); cudaMalloc((float**)&dev_S, BufferSize); // Copy host to device cudaMemcpy(dev_A, A, BufferSize, cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, BufferSize, cudaMemcpyHostToDevice); // Kernel function MatrixAddGlobalMem <<<grid, block>>> (dev_A, dev_B, dev_S, nx, ny, offset); // Copy result from Device to Host cudaMemcpy(Sum, dev_S, BufferSize, cudaMemcpyDeviceToHost); MatrixAddC(A, B, S_C, nx, ny, offset); bool ResultFlag = true; for (int i = 0; i < MatrixSize; i++) { if (Sum[i] != S_C[i]) { ResultFlag = false; printf("Error Matrix Add at element %d\n", i); break; } } if (ResultFlag == true) printf("Matrix Add is OK\n"); else printf("Error Matrix Add\n"); // Free memory cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_S); free(A); free(B); free(Sum); free(S_C); }
dcf394662cb9fd605bfeae3df9190c27413bda48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *//* */ /** @file cutlass_resnet.cu * @author Thomas Mller, NVIDIA * @brief CUTLASS implementation of an optimized fully connected network with residual connections. Supports online training and simultaneous inference. */ #include <tiny-cuda-nn/networks/cutlass_resnet.h> #include <tiny-cuda-nn/cutlass_matmul.h> TCNN_NAMESPACE_BEGIN template <typename T, Activation input_activation, Activation output_activation> CutlassResNet<T, input_activation, output_activation>::CutlassResNet(uint32_t input_width, uint32_t network_width, uint32_t output_width, uint32_t n_blocks, uint32_t n_matrices_per_block) : m_input_width{input_width}, m_network_width{network_width}, m_output_width{output_width}, m_n_blocks{n_blocks}, m_n_matrices_per_block{n_matrices_per_block} { m_padded_output_width = next_multiple(m_output_width, tensorcore_width); // Create matrices related to weights m_weight_matrices.emplace_back(nullptr, network_width, input_width); m_weight_matrices_inference.emplace_back(nullptr, network_width, input_width); m_weight_matrices_full_precision.emplace_back(nullptr, network_width, input_width); m_gradient_matrices.emplace_back(nullptr, network_width, input_width); for (uint32_t i = 0; i < n_blocks * n_matrices_per_block; ++i) { m_weight_matrices.emplace_back(nullptr, network_width, network_width); m_weight_matrices_inference.emplace_back(nullptr, network_width, network_width); m_weight_matrices_full_precision.emplace_back(nullptr, network_width, network_width); m_gradient_matrices.emplace_back(nullptr, network_width, network_width); } m_weight_matrices.emplace_back(nullptr, m_padded_output_width, network_width); m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, network_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, network_width); m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, network_width); // Determine total number of memory entries and set it m_total_n_params = 0; for (const auto& m : m_weight_matrices) { m_total_n_params += m.n_elements(); } // Buffers to keep data from the forward pass m_forward_tmp.resize(m_n_blocks * n_matrices_per_block + 1); m_backward_tmp.resize(m_n_blocks * n_matrices_per_block + 1); // Streams & events. Null for now to avoid clashes with external cuda calls // 1 fewer stream and event than the number of matrices, because the last // split-k matmul can use the regular training stream. m_training_splitk_streams.resize(m_weight_matrices.size()); m_training_splitk_events.resize(m_weight_matrices.size()); for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { CUDA_CHECK_THROW(hipStreamCreate(&m_training_splitk_streams[i])); CUDA_CHECK_THROW(hipEventCreate(&m_training_splitk_events[i])); } } template <typename T, Activation input_activation, Activation output_activation> CutlassResNet<T, input_activation, output_activation>::~CutlassResNet() { for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { free_workspace(m_training_splitk_streams[i]); CUDA_CHECK_PRINT(hipEventDestroy(m_training_splitk_events[i])); CUDA_CHECK_PRINT(hipStreamDestroy(m_training_splitk_streams[i])); } } template <typename T, typename arch> std::enable_if_t<std::is_same<arch, cutlass::arch::Sm75>::value && std::is_same<__half, T>::value> residual_block_2_inference( hipStream_t stream, const GPUMatrix<T>& input, const GPUMatrix<T, RM>& weights1, const GPUMatrix<T, RM>& weights2, GPUMatrix<T>& output ) { auto transposed_output = output.transposed(); switch (weights1.n()) { case 64: fc_multiply_b2b<Activation::None, FullLayerB2bPreReLU64, FullLayerB2bPreReLU64>( stream, input.transposed(), weights1.transposed(), transposed_output, weights2.transposed(), input.transposed(), transposed_output, (T)0, (T)1 ); break; case 128: fc_multiply_b2b<Activation::None, FullLayerB2bPreReLU128, FullLayerB2bPreReLU128>( stream, input.transposed(), weights1.transposed(), transposed_output, weights2.transposed(), input.transposed(), transposed_output, (T)0, (T)1 ); break; default: throw std::runtime_error{"Invalid layer size (must be 64, 128, or 256)."}; } } template <typename T, typename arch> std::enable_if_t<!(std::is_same<arch, cutlass::arch::Sm75>::value && std::is_same<__half, T>::value)> residual_block_2_inference( hipStream_t, const GPUMatrix<T>&, const GPUMatrix<T, RM>&, const GPUMatrix<T, RM>&, GPUMatrix<T>& ) { // Dummy implementation for successful compilation when Sm75 is not available } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::inference(hipStream_t stream, const GPUMatrix<T>& input, GPUMatrix<float>& output) { inference_mixed_precision(stream, input, m_inference_output_tmp); const uint32_t n_elements = (uint32_t)output.n_elements(); hipLaunchKernelGGL(( trim_and_cast<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_padded_output_width, m_output_width, m_inference_output_tmp.data(), output.data()); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::inference_mixed_precision(hipStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (&output != &m_inference_output_tmp && output.m() != m_output_width) { throw std::runtime_error(std::string("Output has incorrect width: ") + std::to_string(output.m()) + "!=" + std::to_string(m_output_width)); } if (&output != &m_inference_output_tmp && input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our teporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_inference_linear_tmp.n() != batch_size) { allocate_inference_buffers(batch_size); } const bool can_fuse_residual_block = std::is_same<SmArch, cutlass::arch::Sm75>::value && std::is_same<__half, T>::value && m_n_matrices_per_block == 2 && (m_network_width == 128 || m_network_width == 64); // Run the actual network { // Input fc_multiply<input_activation, FullLayer>(stream, input_weight_matrix(use_inference_matrices), input, m_inference_linear_tmp, m_inference_linear_tmp); // Res blocks for (uint32_t i = 0; i < m_n_blocks; ++i) { // Compute a residual block using a _single_ fused back-to-back matrix multiplication when applicable. if (can_fuse_residual_block) { residual_block_2_inference<T, SmArch>( stream, i == 0 ? m_inference_linear_tmp : m_inference_residual_tmp[i % 2], weight_matrix_at(use_inference_matrices, i, 0), weight_matrix_at(use_inference_matrices, i, 1), m_inference_residual_tmp[(i + 1) % 2] );; continue; } fc_multiply<Activation::None, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, 0), m_inference_linear_tmp, m_inference_residual_tmp[0]); for (uint32_t matrix_idx = 1; matrix_idx < m_n_matrices_per_block - 1; ++matrix_idx) { fc_multiply<Activation::None, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_inference_residual_tmp[(matrix_idx+1) % 2], m_inference_residual_tmp[matrix_idx % 2]); } // In case there's just 1 matrix per block, the remaining addition must be done manually if (m_n_matrices_per_block == 1) { const uint32_t n_elements = (uint32_t)m_inference_residual_tmp.front().n_elements(); hipLaunchKernelGGL(( add<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_inference_residual_tmp.front().data(), m_inference_linear_tmp.data()); } else { uint32_t matrix_idx = m_n_matrices_per_block - 1; fc_multiply<Activation::None, FullLayerPreReLU>( stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_inference_residual_tmp[(matrix_idx+1) % 2], m_inference_linear_tmp, m_inference_linear_tmp, (T)1 ); // beta==1 sums up the residual and linear parts } } auto& output_matrix = can_fuse_residual_block ? m_inference_residual_tmp[m_n_blocks % 2] : m_inference_linear_tmp; // Output if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), output_matrix, tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), output_matrix, tmp, (T)m_output_activation_param); } } } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::forward(hipStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices, bool prepare_input_gradients) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (output.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output has incorrect width (must be padded): ") + std::to_string(output.m()) + "!=" + std::to_string(m_padded_output_width)); } if (input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our teporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_forward_tmp.front().n() != batch_size) { allocate_forward_buffers(batch_size); } const uint32_t n_elements = (uint32_t)m_forward_tmp.front().n_elements(); // Run the actual network { auto& input_target = input_activation_value == Activation::None ? m_forward_tmp.front() : m_forward_input_tmp; fc_multiply<Activation::None, FullLayer>(stream, input_weight_matrix(use_inference_matrices), input, input_target); switch (input_activation_value) { case Activation::None: break; case Activation::Exponential:hipLaunchKernelGGL(( exp<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_forward_input_tmp.data(), m_forward_tmp.front().data()); break; case Activation::ReLU:hipLaunchKernelGGL(( relu<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_forward_input_tmp.data(), m_forward_tmp.front().data()); break; case Activation::Sine:hipLaunchKernelGGL(( sin<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_forward_input_tmp.data(), m_forward_tmp.front().data()); break; default: throw std::runtime_error{"Unsupported input activation."}; } // Res blocks for (uint32_t i = 0; i < m_n_blocks; ++i) { uint32_t idx = i * m_n_matrices_per_block + 1; if (m_n_matrices_per_block == 1) { fc_multiply<Activation::None, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, 0), m_forward_tmp.at(idx-1), m_forward_tmp.at(idx)); hipLaunchKernelGGL(( add<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_forward_tmp.at(idx-1).data(), m_forward_tmp.at(idx).data()); } else { fc_multiply<Activation::ReLU, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, 0), m_forward_tmp.at(idx-1), m_forward_tmp.at(idx)); for (uint32_t matrix_idx = 1; matrix_idx < m_n_matrices_per_block - 1; ++matrix_idx) { uint32_t fwd_idx = idx + matrix_idx; fc_multiply<Activation::ReLU, FullLayer>(stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_forward_tmp.at(fwd_idx-1), m_forward_tmp.at(fwd_idx)); } uint32_t matrix_idx = m_n_matrices_per_block - 1; uint32_t fwd_idx = idx + matrix_idx; fc_multiply<Activation::None, FullLayer>( stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_forward_tmp.at(fwd_idx-1), m_forward_tmp.at(idx-1), m_forward_tmp.at(fwd_idx), (T)1 ); // beta==1 sums up the residual and linear parts } // Retroactively apply ReLU to input. It's needed for backprop later. // We schedule it to the appropriate splitk stream, because only the later splitk operation depends on // the ReLU'd values to be present hipLaunchKernelGGL(( relu<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, m_training_splitk_streams.at(idx-1), n_elements, m_forward_tmp.at(idx-1).data()); } // Output if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } } } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::backward( hipStream_t stream, const GPUMatrix<T>& input, const GPUMatrixDynamic<T>& output, const GPUMatrixDynamic<T>& dL_doutput, GPUMatrix<T>* dL_dinput, bool use_inference_matrices, bool compute_param_gradients ) { if (dL_doutput.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output gradients have incorrect width (must be padded): ") + std::to_string(dL_doutput.m()) + "!=" + std::to_string(m_padded_output_width)); } // Make sure our teporary buffers have the correct size for the given batch size uint32_t batch_size = dL_doutput.n(); if (m_backward_tmp.front().n() != batch_size) { allocate_backward_buffers(batch_size); } // Compute transfer of output activation in-place... it's treated specially for performance reasons { const uint32_t n_elements = (uint32_t)dL_doutput.n_elements(); switch (output_activation_value) { case Activation::None: break; case Activation::Exponential:hipLaunchKernelGGL(( exp_transfer_output<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; case Activation::ReLU:hipLaunchKernelGGL(( relu_transfer_output<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; default: throw std::runtime_error{"Unsupported output activation."}; } } // Backprop // - weight_gradient.T = input_activation * output_gradient.T // - input_gradient = weights.T * output_gradient // - RELU: pre_activation_gradinet = post_activation_gradient if val > 0 else 0 { // T normalization = (T)(1.0f / batch_size); T normalization = (T)(1.0f); const uint32_t n_elements = (uint32_t)m_backward_tmp.front().n_elements(); int split_k_factor = batch_size / ::min((uint32_t)(1 << 12), batch_size); m_backward_output_tmp.set_layout(dL_doutput.layout()); const GPUMatrixDynamic<T>& tmp_dL_doutput = output_activation_value == Activation::None ? dL_doutput : m_backward_output_tmp; if (compute_param_gradients) { // Output layer hipEventRecord(m_training_splitk_events.back(), stream); hipStreamWaitEvent(m_training_splitk_streams.back(), m_training_splitk_events.back(), 0); if (output.layout() == CM) { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.back(), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.back().transposed(), output_gradient_matrix(), split_k_factor, normalization); } else { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.back(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.back().transposed(), output_gradient_matrix(), split_k_factor, normalization); } hipEventRecord(m_training_splitk_events.back(), m_training_splitk_streams.back()); } if (output.layout() == CM) { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(use_inference_matrices).transposed(), GPUMatrix<T>{tmp_dL_doutput}, m_backward_tmp.back()); } else { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(use_inference_matrices).transposed(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_backward_tmp.back()); } // Res blocks for (uint32_t i = 0; i < m_n_blocks; ++i) { uint32_t block_idx = m_n_blocks - i - 1; uint32_t idx = block_idx * m_n_matrices_per_block + 1; for (uint32_t j = 0; j < m_n_matrices_per_block; ++j) { uint32_t matrix_idx = m_n_matrices_per_block - 1 - j; uint32_t fwd_idx = idx + matrix_idx; if (compute_param_gradients) { hipEventRecord(m_training_splitk_events.at(fwd_idx), stream); hipStreamWaitEvent(m_training_splitk_streams.at(fwd_idx), m_training_splitk_events.at(fwd_idx), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.at(fwd_idx), m_backward_tmp.at(fwd_idx), m_forward_tmp.at(fwd_idx-1).transposed(), gradient_matrix_at(block_idx, matrix_idx), split_k_factor, normalization); hipEventRecord(m_training_splitk_events.at(fwd_idx), m_training_splitk_streams.at(fwd_idx)); } fc_multiply<Activation::ReLUTransfer, FullLayer>(stream, weight_matrix_at(use_inference_matrices, block_idx, matrix_idx).transposed(), m_backward_tmp.at(fwd_idx), m_forward_tmp.at(fwd_idx-1), m_backward_tmp.at(fwd_idx-1)); } hipLaunchKernelGGL(( add<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_backward_tmp.at(idx+m_n_matrices_per_block-1).data(), m_backward_tmp.at(idx-1).data()); } switch (input_activation_value) { case Activation::None: break; case Activation::ReLU:hipLaunchKernelGGL(( relu_transfer<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_forward_input_tmp.data(), m_backward_tmp.front().data()); break; case Activation::Exponential:hipLaunchKernelGGL(( exp_transfer<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_forward_input_tmp.data(), m_backward_tmp.front().data()); break; case Activation::Sine:hipLaunchKernelGGL(( sin_transfer<T>), dim3(n_blocks_linear(n_elements)), dim3(n_threads_linear), 0, stream, n_elements, m_forward_input_tmp.data(), m_backward_tmp.front().data()); break; default: throw std::runtime_error{"Invalid input activation"}; }; if (compute_param_gradients) { hipEventRecord(m_training_splitk_events.front(), stream); hipStreamWaitEvent(m_training_splitk_streams.front(), m_training_splitk_events.front(), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.front(), m_backward_tmp.front(), input.transposed(), input_gradient_matrix(), split_k_factor, normalization); hipEventRecord(m_training_splitk_events.front(), m_training_splitk_streams.front()); } // If requested, compute sensitivity of loss w.r.t. inputs if (dL_dinput) { // TODO: optimization opportunity to only compute sensitivity w.r.t selected SUBSET of inputs. Useful for NFs, where conditional dims stay the same. fc_multiply<Activation::None, FullLayer>(stream, input_weight_matrix(use_inference_matrices).transposed(), m_backward_tmp.front(), *dL_dinput); } } if (compute_param_gradients) { // All the per-layer split-k matrix multiplications summing over // the batch are computed in parallel streams to the actual // backpropagation. Here, we need to wait for all of these to complete. for (auto& event : m_training_splitk_events) { hipStreamWaitEvent(stream, event, 0); } } } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::allocate_inference_buffers(uint32_t batch_size) { m_inference_linear_tmp.set_size(m_network_width, batch_size); m_inference_residual_tmp[0].set_size(m_network_width, batch_size); m_inference_residual_tmp[1].set_size(m_network_width, batch_size); m_inference_output_tmp.set_size(m_padded_output_width, batch_size); GPUMatrixBase::allocate_shared_memory( m_inference_buffer, { &m_inference_linear_tmp, &m_inference_residual_tmp[0], &m_inference_residual_tmp[1], &m_inference_output_tmp, } ); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::allocate_forward_buffers(uint32_t batch_size) { std::vector<GPUMatrixBase*> matrix_pointers = {&m_forward_input_tmp}; m_forward_input_tmp.set_size(m_network_width, batch_size); for (uint32_t i = 0; i < (uint32_t)m_forward_tmp.size(); ++i) { m_forward_tmp[i].set_size(m_network_width, batch_size); matrix_pointers.emplace_back(&m_forward_tmp[i]); } GPUMatrixBase::allocate_shared_memory(m_forward_buffer, matrix_pointers); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::allocate_backward_buffers(uint32_t batch_size) { std::vector<GPUMatrixBase*> matrix_pointers = {&m_backward_output_tmp}; m_backward_output_tmp.set_size(m_padded_output_width, batch_size); for (uint32_t i = 0; i < (uint32_t)m_backward_tmp.size(); ++i) { m_backward_tmp[i].set_size(m_network_width, batch_size); matrix_pointers.emplace_back(&m_backward_tmp[i]); } GPUMatrixBase::allocate_shared_memory(m_backward_buffer, matrix_pointers); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::initialize_params(std::mt19937& rnd, float* params_full_precision, T* params, T* inference_params, T* backward_params, T* gradients, float scale) { size_t current_pos = 0; for (size_t i = 0; i < m_weight_matrices.size(); ++i) { m_weight_matrices[i].set_data(params + current_pos); m_weight_matrices_inference[i].set_data(inference_params + current_pos); m_weight_matrices_full_precision[i].set_data(params_full_precision + current_pos); m_gradient_matrices[i].set_data(gradients + current_pos); current_pos += m_weight_matrices[i].n_elements(); } // Initialize the params for (size_t i = 0; i < m_weight_matrices_full_precision.size(); ++i) { if (i == 0 && input_activation_value == Activation::Sine) { m_weight_matrices_full_precision[i].initialize_siren_uniform_first(rnd, scale); } else { m_weight_matrices_full_precision[i].initialize_xavier_uniform(rnd, scale); } } } // Explicitly instantiate resnet classes. template class CutlassResNet<network_precision_t, Activation::None, Activation::Exponential>; template class CutlassResNet<network_precision_t, Activation::None, Activation::None>; TCNN_NAMESPACE_END
dcf394662cb9fd605bfeae3df9190c27413bda48.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *//* */ /** @file cutlass_resnet.cu * @author Thomas Müller, NVIDIA * @brief CUTLASS implementation of an optimized fully connected network with residual connections. Supports online training and simultaneous inference. */ #include <tiny-cuda-nn/networks/cutlass_resnet.h> #include <tiny-cuda-nn/cutlass_matmul.h> TCNN_NAMESPACE_BEGIN template <typename T, Activation input_activation, Activation output_activation> CutlassResNet<T, input_activation, output_activation>::CutlassResNet(uint32_t input_width, uint32_t network_width, uint32_t output_width, uint32_t n_blocks, uint32_t n_matrices_per_block) : m_input_width{input_width}, m_network_width{network_width}, m_output_width{output_width}, m_n_blocks{n_blocks}, m_n_matrices_per_block{n_matrices_per_block} { m_padded_output_width = next_multiple(m_output_width, tensorcore_width); // Create matrices related to weights m_weight_matrices.emplace_back(nullptr, network_width, input_width); m_weight_matrices_inference.emplace_back(nullptr, network_width, input_width); m_weight_matrices_full_precision.emplace_back(nullptr, network_width, input_width); m_gradient_matrices.emplace_back(nullptr, network_width, input_width); for (uint32_t i = 0; i < n_blocks * n_matrices_per_block; ++i) { m_weight_matrices.emplace_back(nullptr, network_width, network_width); m_weight_matrices_inference.emplace_back(nullptr, network_width, network_width); m_weight_matrices_full_precision.emplace_back(nullptr, network_width, network_width); m_gradient_matrices.emplace_back(nullptr, network_width, network_width); } m_weight_matrices.emplace_back(nullptr, m_padded_output_width, network_width); m_weight_matrices_inference.emplace_back(nullptr, m_padded_output_width, network_width); m_weight_matrices_full_precision.emplace_back(nullptr, m_padded_output_width, network_width); m_gradient_matrices.emplace_back(nullptr, m_padded_output_width, network_width); // Determine total number of memory entries and set it m_total_n_params = 0; for (const auto& m : m_weight_matrices) { m_total_n_params += m.n_elements(); } // Buffers to keep data from the forward pass m_forward_tmp.resize(m_n_blocks * n_matrices_per_block + 1); m_backward_tmp.resize(m_n_blocks * n_matrices_per_block + 1); // Streams & events. Null for now to avoid clashes with external cuda calls // 1 fewer stream and event than the number of matrices, because the last // split-k matmul can use the regular training stream. m_training_splitk_streams.resize(m_weight_matrices.size()); m_training_splitk_events.resize(m_weight_matrices.size()); for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { CUDA_CHECK_THROW(cudaStreamCreate(&m_training_splitk_streams[i])); CUDA_CHECK_THROW(cudaEventCreate(&m_training_splitk_events[i])); } } template <typename T, Activation input_activation, Activation output_activation> CutlassResNet<T, input_activation, output_activation>::~CutlassResNet() { for (size_t i = 0; i < m_training_splitk_streams.size(); ++i) { free_workspace(m_training_splitk_streams[i]); CUDA_CHECK_PRINT(cudaEventDestroy(m_training_splitk_events[i])); CUDA_CHECK_PRINT(cudaStreamDestroy(m_training_splitk_streams[i])); } } template <typename T, typename arch> std::enable_if_t<std::is_same<arch, cutlass::arch::Sm75>::value && std::is_same<__half, T>::value> residual_block_2_inference( cudaStream_t stream, const GPUMatrix<T>& input, const GPUMatrix<T, RM>& weights1, const GPUMatrix<T, RM>& weights2, GPUMatrix<T>& output ) { auto transposed_output = output.transposed(); switch (weights1.n()) { case 64: fc_multiply_b2b<Activation::None, FullLayerB2bPreReLU64, FullLayerB2bPreReLU64>( stream, input.transposed(), weights1.transposed(), transposed_output, weights2.transposed(), input.transposed(), transposed_output, (T)0, (T)1 ); break; case 128: fc_multiply_b2b<Activation::None, FullLayerB2bPreReLU128, FullLayerB2bPreReLU128>( stream, input.transposed(), weights1.transposed(), transposed_output, weights2.transposed(), input.transposed(), transposed_output, (T)0, (T)1 ); break; default: throw std::runtime_error{"Invalid layer size (must be 64, 128, or 256)."}; } } template <typename T, typename arch> std::enable_if_t<!(std::is_same<arch, cutlass::arch::Sm75>::value && std::is_same<__half, T>::value)> residual_block_2_inference( cudaStream_t, const GPUMatrix<T>&, const GPUMatrix<T, RM>&, const GPUMatrix<T, RM>&, GPUMatrix<T>& ) { // Dummy implementation for successful compilation when Sm75 is not available } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::inference(cudaStream_t stream, const GPUMatrix<T>& input, GPUMatrix<float>& output) { inference_mixed_precision(stream, input, m_inference_output_tmp); const uint32_t n_elements = (uint32_t)output.n_elements(); trim_and_cast<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_padded_output_width, m_output_width, m_inference_output_tmp.data(), output.data()); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::inference_mixed_precision(cudaStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (&output != &m_inference_output_tmp && output.m() != m_output_width) { throw std::runtime_error(std::string("Output has incorrect width: ") + std::to_string(output.m()) + "!=" + std::to_string(m_output_width)); } if (&output != &m_inference_output_tmp && input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our teporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_inference_linear_tmp.n() != batch_size) { allocate_inference_buffers(batch_size); } const bool can_fuse_residual_block = std::is_same<SmArch, cutlass::arch::Sm75>::value && std::is_same<__half, T>::value && m_n_matrices_per_block == 2 && (m_network_width == 128 || m_network_width == 64); // Run the actual network { // Input fc_multiply<input_activation, FullLayer>(stream, input_weight_matrix(use_inference_matrices), input, m_inference_linear_tmp, m_inference_linear_tmp); // Res blocks for (uint32_t i = 0; i < m_n_blocks; ++i) { // Compute a residual block using a _single_ fused back-to-back matrix multiplication when applicable. if (can_fuse_residual_block) { residual_block_2_inference<T, SmArch>( stream, i == 0 ? m_inference_linear_tmp : m_inference_residual_tmp[i % 2], weight_matrix_at(use_inference_matrices, i, 0), weight_matrix_at(use_inference_matrices, i, 1), m_inference_residual_tmp[(i + 1) % 2] );; continue; } fc_multiply<Activation::None, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, 0), m_inference_linear_tmp, m_inference_residual_tmp[0]); for (uint32_t matrix_idx = 1; matrix_idx < m_n_matrices_per_block - 1; ++matrix_idx) { fc_multiply<Activation::None, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_inference_residual_tmp[(matrix_idx+1) % 2], m_inference_residual_tmp[matrix_idx % 2]); } // In case there's just 1 matrix per block, the remaining addition must be done manually if (m_n_matrices_per_block == 1) { const uint32_t n_elements = (uint32_t)m_inference_residual_tmp.front().n_elements(); add<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_inference_residual_tmp.front().data(), m_inference_linear_tmp.data()); } else { uint32_t matrix_idx = m_n_matrices_per_block - 1; fc_multiply<Activation::None, FullLayerPreReLU>( stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_inference_residual_tmp[(matrix_idx+1) % 2], m_inference_linear_tmp, m_inference_linear_tmp, (T)1 ); // beta==1 sums up the residual and linear parts } } auto& output_matrix = can_fuse_residual_block ? m_inference_residual_tmp[m_n_blocks % 2] : m_inference_linear_tmp; // Output if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), output_matrix, tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), output_matrix, tmp, (T)m_output_activation_param); } } } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::forward(cudaStream_t stream, const GPUMatrix<T>& input, GPUMatrixDynamic<T>& output, bool use_inference_matrices, bool prepare_input_gradients) { // Various error checks if (input.m() != m_input_width) { throw std::runtime_error(std::string("Input has incorrect width: ") + std::to_string(input.m()) + "!=" + std::to_string(m_input_width)); } if (output.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output has incorrect width (must be padded): ") + std::to_string(output.m()) + "!=" + std::to_string(m_padded_output_width)); } if (input.n() != output.n()) { throw std::runtime_error(std::string("Input and output don't have matching batch size: ") + std::to_string(input.n()) + "!=" + std::to_string(output.n())); } // Make sure our teporary buffers have the correct size for the given batch size uint32_t batch_size = input.n(); if (m_forward_tmp.front().n() != batch_size) { allocate_forward_buffers(batch_size); } const uint32_t n_elements = (uint32_t)m_forward_tmp.front().n_elements(); // Run the actual network { auto& input_target = input_activation_value == Activation::None ? m_forward_tmp.front() : m_forward_input_tmp; fc_multiply<Activation::None, FullLayer>(stream, input_weight_matrix(use_inference_matrices), input, input_target); switch (input_activation_value) { case Activation::None: break; case Activation::Exponential: exp<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_forward_input_tmp.data(), m_forward_tmp.front().data()); break; case Activation::ReLU: relu<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_forward_input_tmp.data(), m_forward_tmp.front().data()); break; case Activation::Sine: sin<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_forward_input_tmp.data(), m_forward_tmp.front().data()); break; default: throw std::runtime_error{"Unsupported input activation."}; } // Res blocks for (uint32_t i = 0; i < m_n_blocks; ++i) { uint32_t idx = i * m_n_matrices_per_block + 1; if (m_n_matrices_per_block == 1) { fc_multiply<Activation::None, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, 0), m_forward_tmp.at(idx-1), m_forward_tmp.at(idx)); add<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_forward_tmp.at(idx-1).data(), m_forward_tmp.at(idx).data()); } else { fc_multiply<Activation::ReLU, FullLayerPreReLU>(stream, weight_matrix_at(use_inference_matrices, i, 0), m_forward_tmp.at(idx-1), m_forward_tmp.at(idx)); for (uint32_t matrix_idx = 1; matrix_idx < m_n_matrices_per_block - 1; ++matrix_idx) { uint32_t fwd_idx = idx + matrix_idx; fc_multiply<Activation::ReLU, FullLayer>(stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_forward_tmp.at(fwd_idx-1), m_forward_tmp.at(fwd_idx)); } uint32_t matrix_idx = m_n_matrices_per_block - 1; uint32_t fwd_idx = idx + matrix_idx; fc_multiply<Activation::None, FullLayer>( stream, weight_matrix_at(use_inference_matrices, i, matrix_idx), m_forward_tmp.at(fwd_idx-1), m_forward_tmp.at(idx-1), m_forward_tmp.at(fwd_idx), (T)1 ); // beta==1 sums up the residual and linear parts } // Retroactively apply ReLU to input. It's needed for backprop later. // We schedule it to the appropriate splitk stream, because only the later splitk operation depends on // the ReLU'd values to be present relu<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, m_training_splitk_streams.at(idx-1)>>>(n_elements, m_forward_tmp.at(idx-1).data()); } // Output if (output.layout() == CM) { auto tmp = GPUMatrix<T>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } else { auto tmp = GPUMatrix<T, RM>{output}; fc_multiply<output_activation, LastLayer>(stream, output_weight_matrix(use_inference_matrices), m_forward_tmp.back(), tmp, (T)m_output_activation_param); } } } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::backward( cudaStream_t stream, const GPUMatrix<T>& input, const GPUMatrixDynamic<T>& output, const GPUMatrixDynamic<T>& dL_doutput, GPUMatrix<T>* dL_dinput, bool use_inference_matrices, bool compute_param_gradients ) { if (dL_doutput.m() != m_padded_output_width) { throw std::runtime_error(std::string("Output gradients have incorrect width (must be padded): ") + std::to_string(dL_doutput.m()) + "!=" + std::to_string(m_padded_output_width)); } // Make sure our teporary buffers have the correct size for the given batch size uint32_t batch_size = dL_doutput.n(); if (m_backward_tmp.front().n() != batch_size) { allocate_backward_buffers(batch_size); } // Compute transfer of output activation in-place... it's treated specially for performance reasons { const uint32_t n_elements = (uint32_t)dL_doutput.n_elements(); switch (output_activation_value) { case Activation::None: break; case Activation::Exponential: exp_transfer_output<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; case Activation::ReLU: relu_transfer_output<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, output.data(), dL_doutput.data(), m_backward_output_tmp.data()); break; default: throw std::runtime_error{"Unsupported output activation."}; } } // Backprop // - weight_gradient.T = input_activation * output_gradient.T // - input_gradient = weights.T * output_gradient // - RELU: pre_activation_gradinet = post_activation_gradient if val > 0 else 0 { // T normalization = (T)(1.0f / batch_size); T normalization = (T)(1.0f); const uint32_t n_elements = (uint32_t)m_backward_tmp.front().n_elements(); int split_k_factor = batch_size / std::min((uint32_t)(1 << 12), batch_size); m_backward_output_tmp.set_layout(dL_doutput.layout()); const GPUMatrixDynamic<T>& tmp_dL_doutput = output_activation_value == Activation::None ? dL_doutput : m_backward_output_tmp; if (compute_param_gradients) { // Output layer cudaEventRecord(m_training_splitk_events.back(), stream); cudaStreamWaitEvent(m_training_splitk_streams.back(), m_training_splitk_events.back(), 0); if (output.layout() == CM) { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.back(), GPUMatrix<T>{tmp_dL_doutput}, m_forward_tmp.back().transposed(), output_gradient_matrix(), split_k_factor, normalization); } else { fc_multiply_split_k<Activation::None, LastLayerK>(m_training_splitk_streams.back(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_forward_tmp.back().transposed(), output_gradient_matrix(), split_k_factor, normalization); } cudaEventRecord(m_training_splitk_events.back(), m_training_splitk_streams.back()); } if (output.layout() == CM) { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(use_inference_matrices).transposed(), GPUMatrix<T>{tmp_dL_doutput}, m_backward_tmp.back()); } else { fc_multiply<Activation::None, FullLayer>(stream, output_weight_matrix(use_inference_matrices).transposed(), GPUMatrix<T, RM>{tmp_dL_doutput}, m_backward_tmp.back()); } // Res blocks for (uint32_t i = 0; i < m_n_blocks; ++i) { uint32_t block_idx = m_n_blocks - i - 1; uint32_t idx = block_idx * m_n_matrices_per_block + 1; for (uint32_t j = 0; j < m_n_matrices_per_block; ++j) { uint32_t matrix_idx = m_n_matrices_per_block - 1 - j; uint32_t fwd_idx = idx + matrix_idx; if (compute_param_gradients) { cudaEventRecord(m_training_splitk_events.at(fwd_idx), stream); cudaStreamWaitEvent(m_training_splitk_streams.at(fwd_idx), m_training_splitk_events.at(fwd_idx), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.at(fwd_idx), m_backward_tmp.at(fwd_idx), m_forward_tmp.at(fwd_idx-1).transposed(), gradient_matrix_at(block_idx, matrix_idx), split_k_factor, normalization); cudaEventRecord(m_training_splitk_events.at(fwd_idx), m_training_splitk_streams.at(fwd_idx)); } fc_multiply<Activation::ReLUTransfer, FullLayer>(stream, weight_matrix_at(use_inference_matrices, block_idx, matrix_idx).transposed(), m_backward_tmp.at(fwd_idx), m_forward_tmp.at(fwd_idx-1), m_backward_tmp.at(fwd_idx-1)); } add<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_backward_tmp.at(idx+m_n_matrices_per_block-1).data(), m_backward_tmp.at(idx-1).data()); } switch (input_activation_value) { case Activation::None: break; case Activation::ReLU: relu_transfer<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_forward_input_tmp.data(), m_backward_tmp.front().data()); break; case Activation::Exponential: exp_transfer<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_forward_input_tmp.data(), m_backward_tmp.front().data()); break; case Activation::Sine: sin_transfer<T><<<n_blocks_linear(n_elements), n_threads_linear, 0, stream>>>(n_elements, m_forward_input_tmp.data(), m_backward_tmp.front().data()); break; default: throw std::runtime_error{"Invalid input activation"}; }; if (compute_param_gradients) { cudaEventRecord(m_training_splitk_events.front(), stream); cudaStreamWaitEvent(m_training_splitk_streams.front(), m_training_splitk_events.front(), 0); fc_multiply_split_k<Activation::None, FullLayerK>(m_training_splitk_streams.front(), m_backward_tmp.front(), input.transposed(), input_gradient_matrix(), split_k_factor, normalization); cudaEventRecord(m_training_splitk_events.front(), m_training_splitk_streams.front()); } // If requested, compute sensitivity of loss w.r.t. inputs if (dL_dinput) { // TODO: optimization opportunity to only compute sensitivity w.r.t selected SUBSET of inputs. Useful for NFs, where conditional dims stay the same. fc_multiply<Activation::None, FullLayer>(stream, input_weight_matrix(use_inference_matrices).transposed(), m_backward_tmp.front(), *dL_dinput); } } if (compute_param_gradients) { // All the per-layer split-k matrix multiplications summing over // the batch are computed in parallel streams to the actual // backpropagation. Here, we need to wait for all of these to complete. for (auto& event : m_training_splitk_events) { cudaStreamWaitEvent(stream, event, 0); } } } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::allocate_inference_buffers(uint32_t batch_size) { m_inference_linear_tmp.set_size(m_network_width, batch_size); m_inference_residual_tmp[0].set_size(m_network_width, batch_size); m_inference_residual_tmp[1].set_size(m_network_width, batch_size); m_inference_output_tmp.set_size(m_padded_output_width, batch_size); GPUMatrixBase::allocate_shared_memory( m_inference_buffer, { &m_inference_linear_tmp, &m_inference_residual_tmp[0], &m_inference_residual_tmp[1], &m_inference_output_tmp, } ); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::allocate_forward_buffers(uint32_t batch_size) { std::vector<GPUMatrixBase*> matrix_pointers = {&m_forward_input_tmp}; m_forward_input_tmp.set_size(m_network_width, batch_size); for (uint32_t i = 0; i < (uint32_t)m_forward_tmp.size(); ++i) { m_forward_tmp[i].set_size(m_network_width, batch_size); matrix_pointers.emplace_back(&m_forward_tmp[i]); } GPUMatrixBase::allocate_shared_memory(m_forward_buffer, matrix_pointers); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::allocate_backward_buffers(uint32_t batch_size) { std::vector<GPUMatrixBase*> matrix_pointers = {&m_backward_output_tmp}; m_backward_output_tmp.set_size(m_padded_output_width, batch_size); for (uint32_t i = 0; i < (uint32_t)m_backward_tmp.size(); ++i) { m_backward_tmp[i].set_size(m_network_width, batch_size); matrix_pointers.emplace_back(&m_backward_tmp[i]); } GPUMatrixBase::allocate_shared_memory(m_backward_buffer, matrix_pointers); } template <typename T, Activation input_activation, Activation output_activation> void CutlassResNet<T, input_activation, output_activation>::initialize_params(std::mt19937& rnd, float* params_full_precision, T* params, T* inference_params, T* backward_params, T* gradients, float scale) { size_t current_pos = 0; for (size_t i = 0; i < m_weight_matrices.size(); ++i) { m_weight_matrices[i].set_data(params + current_pos); m_weight_matrices_inference[i].set_data(inference_params + current_pos); m_weight_matrices_full_precision[i].set_data(params_full_precision + current_pos); m_gradient_matrices[i].set_data(gradients + current_pos); current_pos += m_weight_matrices[i].n_elements(); } // Initialize the params for (size_t i = 0; i < m_weight_matrices_full_precision.size(); ++i) { if (i == 0 && input_activation_value == Activation::Sine) { m_weight_matrices_full_precision[i].initialize_siren_uniform_first(rnd, scale); } else { m_weight_matrices_full_precision[i].initialize_xavier_uniform(rnd, scale); } } } // Explicitly instantiate resnet classes. template class CutlassResNet<network_precision_t, Activation::None, Activation::Exponential>; template class CutlassResNet<network_precision_t, Activation::None, Activation::None>; TCNN_NAMESPACE_END
dcc094a2caf45d17452868984b1158099f6ed2c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/device_functions.h> #include <device_launch_parameters.h> #include "matrixmul.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #define WIDTH 10 #define HEIGHT 10 #define DELTA_T 0.075 #define PARA_A 0.15 __global__ void periodicalize_row(Matrix in) { int i = blockIdx.x * blockDim.x + threadIdx.x; float frontRow1 = in.elements[0 + i]; float frontRow2 = in.elements[in.width + i]; float backRow1 = in.elements[in.width * (in.height - 2) + i]; float backRow2 = in.elements[in.width * (in.height - 1) + i]; float leftCol1 = in.elements[in.width * i + 0]; float leftCol2 = in.elements[in.width * i + 1]; float rightCol1 = in.elements[in.width * i + in.width - 2]; float rightCol2 = in.elements[in.width * i + in.width - 1]; in.elements[0 + i] = backRow1; in.elements[in.width + i] = backRow2; in.elements[in.width * (in.height - 2) + i] = frontRow1; in.elements[in.width * (in.height - 1) + i] = frontRow2; in.elements[in.width * i + 0] = rightCol1; in.elements[in.width * i + 1] = rightCol2; in.elements[in.width * i + in.width - 2] = leftCol1; in.elements[in.width * i + in.width - 1] = leftCol2; } __device__ float laplaceCal(float front, float back, float deltaX, float deltaY, float num) { float res = (1/powf(deltaX, num)) * front + (1 / powf(deltaY, num)) * back; return res; } __device__ float frontCal(float *in) { float res = 0.125 * (in[3 + 3] + in[1 + 3] + in[3 + 1]) + 0.75 * (in[3 + 2] + in[1 + 2]) - 0.25 * (in[3 + 2] + in[1 + 2]) - 1.5 * in[2 + 2]; } __device__ float backCal(float *in) { float res = 0.125 * (in[3 + 3] + in[1 + 3] + in[3 + 1]) + 0.75 * (in[2 + 3] + in[2 + 1]) - 0.25 * (in[2 + 3] + in[2 + 1]) - 1.5 * in[2 + 2]; } __device__ float* getFOI(Matrix in, int i) { //how about using shared mem? float foi[9]; foi[0 + 0] = in.elements[i - WIDTH - 1]; foi[0 + 1] = in.elements[i - WIDTH]; foi[0 + 2] = in.elements[i - WIDTH + 1]; foi[1 + 0] = in.elements[i - 1]; foi[1 + 1] = in.elements[i]; foi[1 + 2] = in.elements[i + 1]; foi[2 + 0] = in.elements[i + WIDTH - 1]; foi[2 + 1] = in.elements[i + WIDTH]; foi[2 + 2] = in.elements[i + WIDTH + 1]; return foi; } __device__ float* foiPowOf3(float *foi) { float foi3[9]; foi3[0 + 0] = foi[0 + 0] * foi[0 + 0] * foi[0 + 0]; foi3[0 + 1] = foi[0 + 1] * foi[0 + 1] * foi[0 + 1]; foi3[0 + 2] = foi[0 + 2] * foi[0 + 2] * foi[0 + 2]; foi3[1 + 0] = foi[1 + 0] * foi[1 + 0] * foi[1 + 0]; foi3[1 + 1] = foi[1 + 1] * foi[1 + 1] * foi[1 + 1]; foi3[1 + 2] = foi[1 + 2] * foi[1 + 2] * foi[1 + 2]; foi3[2 + 0] = foi[2 + 0] * foi[2 + 0] * foi[2 + 0]; foi3[2 + 1] = foi[2 + 1] * foi[2 + 1] * foi[2 + 1]; foi3[2 + 2] = foi[2 + 2] * foi[2 + 2] * foi[2 + 2]; return foi3; } __device__ void getNowOi(Matrix out, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32, int i) { out.elements[i] = oi.elements[i] + DELTA_T * ((1 - PARA_A) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempRecordOi6.elements[i] + tempRecordOi32.elements[i]); } __constant__ float deltaX; __constant__ float deltaY; __global__ void firstCal(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32) { //consider using tile? int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; //use col and row represent i? int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; if (col > 0 && col < WIDTH - 1 && row > 0 && row < HEIGHT - 1) { float *foi = getFOI(oi, i); float *foi3 = foiPowOf3(foi); float tempOi2 = laplaceCal(frontCal(foi), backCal(foi), deltaX, deltaY, 2.0); float tempOi32 = laplaceCal(frontCal(foi3), backCal(foi3), deltaX, deltaY, 2.0); tempRecordOi2.elements[i] = tempOi2; tempRecordOi32.elements[i] = tempOi32; __syncthreads; float *foi2 = getFOI(tempRecordOi2, i); float tempOi4 = laplaceCal(frontCal(foi2), backCal(foi2), deltaX,deltaY, 2); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); float *foi4 = getFOI(tempRecordOi4, i); float tempOi6 = laplaceCal(frontCal(foi4), backCal(foi4), deltaX, deltaY, 2); tempRecordOi6.elements[i] = tempOi6; newOi.elements[i] = oi.elements[i] + DELTA_T * ((1 - PARA_A) * tempOi2 + 2 * tempOi4 + tempOi6 + tempOi32); } }
dcc094a2caf45d17452868984b1158099f6ed2c1.cu
#include "cuda_runtime.h" #include <device_functions.h> #include <device_launch_parameters.h> #include "matrixmul.h" #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #define WIDTH 10 #define HEIGHT 10 #define DELTA_T 0.075 #define PARA_A 0.15 __global__ void periodicalize_row(Matrix in) { int i = blockIdx.x * blockDim.x + threadIdx.x; float frontRow1 = in.elements[0 + i]; float frontRow2 = in.elements[in.width + i]; float backRow1 = in.elements[in.width * (in.height - 2) + i]; float backRow2 = in.elements[in.width * (in.height - 1) + i]; float leftCol1 = in.elements[in.width * i + 0]; float leftCol2 = in.elements[in.width * i + 1]; float rightCol1 = in.elements[in.width * i + in.width - 2]; float rightCol2 = in.elements[in.width * i + in.width - 1]; in.elements[0 + i] = backRow1; in.elements[in.width + i] = backRow2; in.elements[in.width * (in.height - 2) + i] = frontRow1; in.elements[in.width * (in.height - 1) + i] = frontRow2; in.elements[in.width * i + 0] = rightCol1; in.elements[in.width * i + 1] = rightCol2; in.elements[in.width * i + in.width - 2] = leftCol1; in.elements[in.width * i + in.width - 1] = leftCol2; } __device__ float laplaceCal(float front, float back, float deltaX, float deltaY, float num) { float res = (1/powf(deltaX, num)) * front + (1 / powf(deltaY, num)) * back; return res; } __device__ float frontCal(float *in) { float res = 0.125 * (in[3 + 3] + in[1 + 3] + in[3 + 1]) + 0.75 * (in[3 + 2] + in[1 + 2]) - 0.25 * (in[3 + 2] + in[1 + 2]) - 1.5 * in[2 + 2]; } __device__ float backCal(float *in) { float res = 0.125 * (in[3 + 3] + in[1 + 3] + in[3 + 1]) + 0.75 * (in[2 + 3] + in[2 + 1]) - 0.25 * (in[2 + 3] + in[2 + 1]) - 1.5 * in[2 + 2]; } __device__ float* getFOI(Matrix in, int i) { //how about using shared mem? float foi[9]; foi[0 + 0] = in.elements[i - WIDTH - 1]; foi[0 + 1] = in.elements[i - WIDTH]; foi[0 + 2] = in.elements[i - WIDTH + 1]; foi[1 + 0] = in.elements[i - 1]; foi[1 + 1] = in.elements[i]; foi[1 + 2] = in.elements[i + 1]; foi[2 + 0] = in.elements[i + WIDTH - 1]; foi[2 + 1] = in.elements[i + WIDTH]; foi[2 + 2] = in.elements[i + WIDTH + 1]; return foi; } __device__ float* foiPowOf3(float *foi) { float foi3[9]; foi3[0 + 0] = foi[0 + 0] * foi[0 + 0] * foi[0 + 0]; foi3[0 + 1] = foi[0 + 1] * foi[0 + 1] * foi[0 + 1]; foi3[0 + 2] = foi[0 + 2] * foi[0 + 2] * foi[0 + 2]; foi3[1 + 0] = foi[1 + 0] * foi[1 + 0] * foi[1 + 0]; foi3[1 + 1] = foi[1 + 1] * foi[1 + 1] * foi[1 + 1]; foi3[1 + 2] = foi[1 + 2] * foi[1 + 2] * foi[1 + 2]; foi3[2 + 0] = foi[2 + 0] * foi[2 + 0] * foi[2 + 0]; foi3[2 + 1] = foi[2 + 1] * foi[2 + 1] * foi[2 + 1]; foi3[2 + 2] = foi[2 + 2] * foi[2 + 2] * foi[2 + 2]; return foi3; } __device__ void getNowOi(Matrix out, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32, int i) { out.elements[i] = oi.elements[i] + DELTA_T * ((1 - PARA_A) * tempRecordOi2.elements[i] + 2 * tempRecordOi4.elements[i] + tempRecordOi6.elements[i] + tempRecordOi32.elements[i]); } __constant__ float deltaX; __constant__ float deltaY; __global__ void firstCal(Matrix newOi, Matrix oi, Matrix tempRecordOi2, Matrix tempRecordOi4, Matrix tempRecordOi6, Matrix tempRecordOi32) { //consider using tile? int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; //use col and row represent i? int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; if (col > 0 && col < WIDTH - 1 && row > 0 && row < HEIGHT - 1) { float *foi = getFOI(oi, i); float *foi3 = foiPowOf3(foi); float tempOi2 = laplaceCal(frontCal(foi), backCal(foi), deltaX, deltaY, 2.0); float tempOi32 = laplaceCal(frontCal(foi3), backCal(foi3), deltaX, deltaY, 2.0); tempRecordOi2.elements[i] = tempOi2; tempRecordOi32.elements[i] = tempOi32; __syncthreads; float *foi2 = getFOI(tempRecordOi2, i); float tempOi4 = laplaceCal(frontCal(foi2), backCal(foi2), deltaX,deltaY, 2); tempRecordOi4.elements[i] = tempOi4; __syncthreads(); float *foi4 = getFOI(tempRecordOi4, i); float tempOi6 = laplaceCal(frontCal(foi4), backCal(foi4), deltaX, deltaY, 2); tempRecordOi6.elements[i] = tempOi6; newOi.elements[i] = oi.elements[i] + DELTA_T * ((1 - PARA_A) * tempOi2 + 2 * tempOi4 + tempOi6 + tempOi32); } }
76e34be3f939032f01860b1c6da4caa4e2b4f825.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <hip/hip_runtime.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Thread block size #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Matrix multiplication kernel called by MatMul() __global__ void MatMulGPU(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; //if ((blockRow * blockDim.y + row < C.height) && (blockCol * blockDim.x + col < C.width)) { // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); //} } // Matrix multiplication kernel called by MatMul() void MatMulCPU(Matrix A, Matrix B, Matrix C) { std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); float Cvalue = 0; for (int i = 0; i < A.height; i++) for (int j = 0; j < B.width; j++) for (int e = 0; e < A.width; ++e) C.elements[i * C.width + j] += A.elements[i * A.width + e] * B.elements[e * B.width + j]; //printf("C[%d][%d] = %f", row, col, Cvalue); std::chrono::high_resolution_clock::time_point stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_span = std::chrono::duration_cast< std::chrono::duration<double>>(stop - start); if (ELAPSED_TIME == 1) { std::cout << time_span.count() * 1000 << "\n"; } else { print(C); } } void print(Matrix X) { for (int i = 0; i < X.height; i++) { for (int j = 0; j < X.width; j++) { std::cout << X.elements[i * X.width + j] << " "; } std::cout << "\n"; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); dim3 dimGrid((B.width - 1) / dimBlock.x + 1, (A.height - 1) / dimBlock.y + 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( MatMulGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); hipEventRecord(stop); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync)); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); if (ELAPSED_TIME == 1) { hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else { print(C); } // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } int main() { int n, m, q; scanf("%d", &n); m = q = n; Matrix A; Matrix B; Matrix C; int sizeA = n * m * sizeof(float); A.height = n; A.width = m; A.elements = new float[sizeA]; int sizeB = m * q * sizeof(float); B.height = m; B.width = q; B.elements = new float[sizeB]; int sizeC = n * q * sizeof(float); C.height = n; C.width = q; C.elements = new float[sizeC]; srand (time(NULL)); for(int i = 0; i < n*m; i++) scanf("%f", &A.elements[i]); for (int i = 0; i < m * q; i++) scanf("%f", &B.elements[i]); //print(A); //printf("\n"); //print(B); //printf("\n"); MatMul(A, B, C); free(A.elements); free(B.elements); free(C.elements); return 0; }
76e34be3f939032f01860b1c6da4caa4e2b4f825.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda.h> // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Thread block size #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Matrix multiplication kernel called by MatMul() __global__ void MatMulGPU(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; //if ((blockRow * blockDim.y + row < C.height) && (blockCol * blockDim.x + col < C.width)) { // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); //} } // Matrix multiplication kernel called by MatMul() void MatMulCPU(Matrix A, Matrix B, Matrix C) { std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now(); float Cvalue = 0; for (int i = 0; i < A.height; i++) for (int j = 0; j < B.width; j++) for (int e = 0; e < A.width; ++e) C.elements[i * C.width + j] += A.elements[i * A.width + e] * B.elements[e * B.width + j]; //printf("C[%d][%d] = %f", row, col, Cvalue); std::chrono::high_resolution_clock::time_point stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_span = std::chrono::duration_cast< std::chrono::duration<double>>(stop - start); if (ELAPSED_TIME == 1) { std::cout << time_span.count() * 1000 << "\n"; } else { print(C); } } void print(Matrix X) { for (int i = 0; i < X.height; i++) { for (int j = 0; j < X.width; j++) { std::cout << X.elements[i * X.width + j] << " "; } std::cout << "\n"; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); //dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); dim3 dimGrid((B.width - 1) / dimBlock.x + 1, (A.height - 1) / dimBlock.y + 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); MatMulGPU<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaEventRecord(stop); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync)); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); if (ELAPSED_TIME == 1) { cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else { print(C); } // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } int main() { int n, m, q; scanf("%d", &n); m = q = n; Matrix A; Matrix B; Matrix C; int sizeA = n * m * sizeof(float); A.height = n; A.width = m; A.elements = new float[sizeA]; int sizeB = m * q * sizeof(float); B.height = m; B.width = q; B.elements = new float[sizeB]; int sizeC = n * q * sizeof(float); C.height = n; C.width = q; C.elements = new float[sizeC]; srand (time(NULL)); for(int i = 0; i < n*m; i++) scanf("%f", &A.elements[i]); for (int i = 0; i < m * q; i++) scanf("%f", &B.elements[i]); //print(A); //printf("\n"); //print(B); //printf("\n"); MatMul(A, B, C); free(A.elements); free(B.elements); free(C.elements); return 0; }
6ca2079a16ecce648524372773e5547f6df18a19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void revStr(char *str, int *length) { int i = threadIdx.x; int len = *length; int temp = str[i]; str[i] = str[len - i - 1]; str[len - i - 1] = temp; } int main() { char *str = (char *) calloc(BUFSIZ, sizeof(char)), *dStr; printf("Enter the string\n"); scanf("%[^\n]%*c", str); int len = strlen(str), *dLen; hipMalloc(&dStr, len); hipMalloc(&dLen, sizeof(int)); hipMemcpy(dStr, str, len, hipMemcpyHostToDevice); hipMemcpy(dLen, &len, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( revStr), dim3(1), dim3(len / 2), 0, 0, dStr, dLen); hipMemcpy(str, dStr, len, hipMemcpyDeviceToHost); printf("The reversed string:\n%s", str); hipFree(dStr); }
6ca2079a16ecce648524372773e5547f6df18a19.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void revStr(char *str, int *length) { int i = threadIdx.x; int len = *length; int temp = str[i]; str[i] = str[len - i - 1]; str[len - i - 1] = temp; } int main() { char *str = (char *) calloc(BUFSIZ, sizeof(char)), *dStr; printf("Enter the string\n"); scanf("%[^\n]%*c", str); int len = strlen(str), *dLen; cudaMalloc(&dStr, len); cudaMalloc(&dLen, sizeof(int)); cudaMemcpy(dStr, str, len, cudaMemcpyHostToDevice); cudaMemcpy(dLen, &len, sizeof(int), cudaMemcpyHostToDevice); revStr<<<1, len / 2>>>(dStr, dLen); cudaMemcpy(str, dStr, len, cudaMemcpyDeviceToHost); printf("The reversed string:\n%s", str); cudaFree(dStr); }
908d5096f1828bf4cc988114241b5b8092f14c64.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmergebicgstab.cu normal z -> d, Wed Sep 17 15:08:43 2014 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from dmergebicgstab into one // The difference to dmergedbicgstab2 is that the SpMV is not merged into the // kernes. This results in higher flexibility at the price of lower performance. /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge1_kernel( int n, double *skp, double *v, double *r, double *p ){ int i = blockIdx.x * blockDim.x + threadIdx.x; double beta=skp[1]; double omega=skp[2]; if( i<n ){ p[i] = r[i] + beta * ( p[i] - omega * v[i] ); } } /** Purpose ------- Mergels multiple operations into one kernel: p = beta*p p = p-omega*beta*v p = p+r -> p = r + beta * ( p - omega * v ) Arguments --------- @param n int dimension n @param skp double* set of scalar parameters @param v double* input v @param r double* input r @param p double* input/output p @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge1( int n, double *skp, double *v, double *r, double *p ){ dim3 Bs( BLOCK_SIZE ); dim3 Gs( (n+BLOCK_SIZE-1)/BLOCK_SIZE ); hipLaunchKernelGGL(( magma_dbicgmerge1_kernel), dim3(Gs), dim3(Bs), 0, 0, n, skp, v, r, p ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge2_kernel( int n, double *skp, double *r, double *v, double *s ){ int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha=skp[0]; if( i<n ){ s[i] = r[i] - alpha * v[i] ; } } /** Purpose ------- Mergels multiple operations into one kernel: s=r s=s-alpha*v -> s = r - alpha * v Arguments --------- @param n int dimension n @param skp double* set of scalar parameters @param r double* input r @param v double* input v @param s double* input/output s @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge2( int n, double *skp, double *r, double *v, double *s ){ dim3 Bs( BLOCK_SIZE ); dim3 Gs( (n+BLOCK_SIZE-1)/BLOCK_SIZE ); hipLaunchKernelGGL(( magma_dbicgmerge2_kernel), dim3(Gs), dim3(Bs), 0, 0, n, skp, r, v, s ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge3_kernel( int n, double *skp, double *p, double *se, double *t, double *x, double *r ){ int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha=skp[0]; double omega=skp[2]; if( i<n ){ double s; s = se[i]; x[i] = x[i] + alpha * p[i] + omega * s; r[i] = s - omega * t[i]; } } /** Purpose ------- Mergels multiple operations into one kernel: x=x+alpha*p x=x+omega*s r=s r=r-omega*t -> x = x + alpha * p + omega * s -> r = s - omega * t Arguments --------- @param n int dimension n @param skp double* set of scalar parameters @param p double* input p @param s double* input s @param t double* input t @param x double* input/output x @param r double* input/output r @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge3( int n, double *skp, double *p, double *s, double *t, double *x, double *r ){ dim3 Bs( BLOCK_SIZE ); dim3 Gs( (n+BLOCK_SIZE-1)/BLOCK_SIZE ); hipLaunchKernelGGL(( magma_dbicgmerge3_kernel), dim3(Gs), dim3(Bs), 0, 0, n, skp, p, s, t, x, r ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge4_kernel_1( double *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp = skp[0]; skp[0] = skp[4]/tmp; } } __global__ void magma_dbicgmerge4_kernel_2( double *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ skp[2] = skp[6]/skp[7]; skp[3] = skp[4]; } } __global__ void magma_dbicgmerge4_kernel_3( double *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp1 = skp[4]/skp[3]; double tmp2 = skp[0] / skp[2]; skp[1] = tmp1*tmp2; //skp[1] = skp[4]/skp[3] * skp[0] / skp[2]; } } /** Purpose ------- Performs some parameter operations for the BiCGSTAB with scalars on GPU. Arguments --------- @param type int kernel type @param skp double* vector with parameters @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge4( int type, double *skp ){ dim3 Bs( 1 ); dim3 Gs( 1 ); if( type == 1 ) hipLaunchKernelGGL(( magma_dbicgmerge4_kernel_1), dim3(Gs), dim3(Bs), 0, 0, skp ); else if( type == 2 ) hipLaunchKernelGGL(( magma_dbicgmerge4_kernel_2), dim3(Gs), dim3(Bs), 0, 0, skp ); else if( type == 3 ) hipLaunchKernelGGL(( magma_dbicgmerge4_kernel_3), dim3(Gs), dim3(Bs), 0, 0, skp ); else printf("error: no kernel called\n"); return MAGMA_SUCCESS; }
908d5096f1828bf4cc988114241b5b8092f14c64.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmergebicgstab.cu normal z -> d, Wed Sep 17 15:08:43 2014 @author Hartwig Anzt */ #include "common_magma.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from dmergebicgstab into one // The difference to dmergedbicgstab2 is that the SpMV is not merged into the // kernes. This results in higher flexibility at the price of lower performance. /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge1_kernel( int n, double *skp, double *v, double *r, double *p ){ int i = blockIdx.x * blockDim.x + threadIdx.x; double beta=skp[1]; double omega=skp[2]; if( i<n ){ p[i] = r[i] + beta * ( p[i] - omega * v[i] ); } } /** Purpose ------- Mergels multiple operations into one kernel: p = beta*p p = p-omega*beta*v p = p+r -> p = r + beta * ( p - omega * v ) Arguments --------- @param n int dimension n @param skp double* set of scalar parameters @param v double* input v @param r double* input r @param p double* input/output p @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge1( int n, double *skp, double *v, double *r, double *p ){ dim3 Bs( BLOCK_SIZE ); dim3 Gs( (n+BLOCK_SIZE-1)/BLOCK_SIZE ); magma_dbicgmerge1_kernel<<<Gs, Bs, 0>>>( n, skp, v, r, p ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge2_kernel( int n, double *skp, double *r, double *v, double *s ){ int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha=skp[0]; if( i<n ){ s[i] = r[i] - alpha * v[i] ; } } /** Purpose ------- Mergels multiple operations into one kernel: s=r s=s-alpha*v -> s = r - alpha * v Arguments --------- @param n int dimension n @param skp double* set of scalar parameters @param r double* input r @param v double* input v @param s double* input/output s @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge2( int n, double *skp, double *r, double *v, double *s ){ dim3 Bs( BLOCK_SIZE ); dim3 Gs( (n+BLOCK_SIZE-1)/BLOCK_SIZE ); magma_dbicgmerge2_kernel<<<Gs, Bs, 0>>>( n, skp, r, v, s ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge3_kernel( int n, double *skp, double *p, double *se, double *t, double *x, double *r ){ int i = blockIdx.x * blockDim.x + threadIdx.x; double alpha=skp[0]; double omega=skp[2]; if( i<n ){ double s; s = se[i]; x[i] = x[i] + alpha * p[i] + omega * s; r[i] = s - omega * t[i]; } } /** Purpose ------- Mergels multiple operations into one kernel: x=x+alpha*p x=x+omega*s r=s r=r-omega*t -> x = x + alpha * p + omega * s -> r = s - omega * t Arguments --------- @param n int dimension n @param skp double* set of scalar parameters @param p double* input p @param s double* input s @param t double* input t @param x double* input/output x @param r double* input/output r @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge3( int n, double *skp, double *p, double *s, double *t, double *x, double *r ){ dim3 Bs( BLOCK_SIZE ); dim3 Gs( (n+BLOCK_SIZE-1)/BLOCK_SIZE ); magma_dbicgmerge3_kernel<<<Gs, Bs, 0>>>( n, skp, p, s, t, x, r ); return MAGMA_SUCCESS; } /* -------------------------------------------------------------------------- */ __global__ void magma_dbicgmerge4_kernel_1( double *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp = skp[0]; skp[0] = skp[4]/tmp; } } __global__ void magma_dbicgmerge4_kernel_2( double *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ skp[2] = skp[6]/skp[7]; skp[3] = skp[4]; } } __global__ void magma_dbicgmerge4_kernel_3( double *skp ){ int i = blockIdx.x * blockDim.x + threadIdx.x; if( i==0 ){ double tmp1 = skp[4]/skp[3]; double tmp2 = skp[0] / skp[2]; skp[1] = tmp1*tmp2; //skp[1] = skp[4]/skp[3] * skp[0] / skp[2]; } } /** Purpose ------- Performs some parameter operations for the BiCGSTAB with scalars on GPU. Arguments --------- @param type int kernel type @param skp double* vector with parameters @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" int magma_dbicgmerge4( int type, double *skp ){ dim3 Bs( 1 ); dim3 Gs( 1 ); if( type == 1 ) magma_dbicgmerge4_kernel_1<<<Gs, Bs, 0>>>( skp ); else if( type == 2 ) magma_dbicgmerge4_kernel_2<<<Gs, Bs, 0>>>( skp ); else if( type == 3 ) magma_dbicgmerge4_kernel_3<<<Gs, Bs, 0>>>( skp ); else printf("error: no kernel called\n"); return MAGMA_SUCCESS; }
9d6118aff764e3da3d5e34365235f6bbf0b46b65.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <helper_cuda.h> #include <iostream> #include <set> #include "../../cudaconv3/include/cudaconv2.cuh" #include "../../util/include/matrix.h" #include "../include/layer_kernels.cuh" #include "../include/layer.cuh" #include "../include/data.cuh" #include "../include/util.cuh" #include "../include/weights.cuh" using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : _convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _numOutputs = pyDictGetInt(paramsDict, "outputs"); _numReplicas = pyDictGetInt(paramsDict, "numReplicas"); _numReplicasPrev = 1; _rcvdBInputMsgs = 0; _actBroadcaster = NULL; _gradReducer = NULL; _initialized = false; } Layer::~Layer() { if (_actBroadcaster != NULL) { _actBroadcaster->stop(); delete _actBroadcaster; } if (_gradReducer != NULL) { _gradReducer->stop(); delete _gradReducer; } // For now, gradReducer doesn't have a destructor // delete _gradReducer; for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } } hipStream_t Layer::getStream() { assert(getDeviceID() >= 0); return NVMatrix::getDefaultStream(getDeviceID()); } void Layer::syncStream() { NVMatrix::syncStream(getStream()); } void Layer::fpropNext(PASS_TYPE passType, int passIdx) { if (_next.size() > 0) { if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) { syncStream(); // Make sure I've finished computing before broadcasting } getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue)); } if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) { _broadcastFinishQueue.dequeue(); assert(_broadcastFinishQueue.getNumElements() == 0); } } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } bool Layer::fprop(PASS_TYPE passType, int passIdx) { _rcvdFInputMsgs++; // I require messages from *all* input replicas because it makes the propagation easier to think about. // Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation // might not actually be finished yet. if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) { // printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID()); int ridx = getFwdActiveInputReplicaIdx(passIdx); assert(getDeviceID() == NVMatrix::getDeviceID()); map<int, NVMatrix*> v; if (ridx >= 0) { for (int i = 0; i < getNumLayersPrev(); i++) { v[i] = &_prev[ridx][i]->getActs(getDeviceID()); } } fprop(v, passType, passIdx); return true; } return false; } void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) { if (getFwdActiveInputReplicaIdx(passIdx) >= 0) { assert(v.size() == getNumLayersPrev()); _inputs.clear(); _inputs.insert(v.begin(), v.end()); int numCases = _inputs[0]->getLeadingDim(); for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemory(numCases); } if (numCases > 0) { //printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases); _rcvdFInputMsgs = getNumExpectedFwdMsgs(); for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) { it->second->transpose(_trans); } getActs().transpose(_trans); fpropCommon(passType); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType, passIdx); } // Then add the rest of the inputs to that for (int i = 0; i < getNumLayersPrev(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx); } } } } fpropNext(passType, passIdx); } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_actsGradTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } if (_actsTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } } int Layer::getNumGradProducersNext() { return _numGradProducersNext; } int Layer::getNumExpectedBwdMsgs() { return _numGradProducersNext * getNumSiblingReplicas(); } int Layer::getNumExpectedFwdMsgs() { return getNumLayersPrev() * getNumInputReplicas(); } void Layer::bprop(PASS_TYPE passType, int passIdx) { if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) { // printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID()); if (_gradReducer != NULL) { _gradReducer->waitForFinish(); } // This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages bprop(getActsGrad(), passType, passIdx); if (_bwdTerminal[passIdx]) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL)); } } } void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) { Layer& prev = *_prev[replicaIdx][inputIdx]; if (prev.isGradConsumer() && isGradProducer(prev.getName())) { if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0 bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType); } prev.getNumComputedActsGrads(getDeviceID())++; // Synchronize if the previous layer is going to actually do a reduction. // If the previous layer is on the same GPU as us and has no next layers // on other GPUs then it won't need to do a reduction. if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) { syncStream(); } prev.getGradReducer().enqueueReduction(getDeviceID()); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { v.transpose(_trans); assert(getDeviceID() == NVMatrix::getDeviceID()); int ridx = getBwdActiveInputReplicaIdx(passIdx); LayerV& prev = _prev[ridx]; map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx]; for (int i = 0; i < prev.size(); i++) { _inputs[i]->transpose(_trans); prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); // NOTE: this should be here (before the bpropActs) because if you have a layer // that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite // v which is used in bpropCommon. So bpropCommon must come first. bpropCommon(v, ridx, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) { const set<Layer*>& deviceLayers = it->second; for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) { if (_actsGradTarget != (*it2)->getInputIdx(_name)) { bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name)); } } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0) { bpropActsCall(v, passType, ridx, _actsGradTarget); } } // Synchronization is necessary because the kernel calls that compute my backward acts // execute asynchronously. Therefore I don't want to tell other threads that I've // computed bprop activities for them when in fact I've only called a function which // will eventually compute them. if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) { syncStream(); } if (getConvNet().isConserveMemory()) { truncBwdActs(); } if (isGradProducer()) { /*for (int i = 0; i < prev.size(); i++) { if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) { prev[i]->getGradReducer().enqueueReduction(getDeviceID()); } }*/ // Send backward messages to *all* replicas. // Note that the messages will be dismissed unless the passIdx indicates // that the previous layer should do some work. for (int r = 0; r < getNumInputReplicas(); r++) { for (int i = 0; i < _prev[r].size(); i++) { if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) { _prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx)); } } } } } IActGradReducer& Layer::getGradReducer() { return *_gradReducer; } // This is called between minibatches void Layer::reset() { _rcvdFInputMsgs = 0; _rcvdBInputMsgs = 0; for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) { it->second = 0; } } // This is called between microbatches void Layer::resetPassIdx() { _rcvdFInputMsgs = 0; if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) { reset(); } } /* * Returns number of cases in given matrix. */ int Layer::getNumCases(NVMatrix& v) { return v.getLeadingDim(); } int Layer::incRcvdBInputMsgs() { return ++_rcvdBInputMsgs; } std::string& Layer::getName() { return _name; } std::string& Layer::getType() { return _type; } int& Layer::getNumComputedActsGrads(int deviceID) { return _numComputedActsGrads[deviceID]; } void Layer::addNext(Layer& l) { _next.push_back(&l); _numReplicasNext = l.getNumReplicas(); if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_nextDeviceIDs.size() + 1); _nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addPrev(Layer& l, int replicaIdx) { _prev[replicaIdx].push_back(&l); _numReplicasPrev = l.getNumReplicas(); l.setInputIdx(getName(), _prev[replicaIdx].size() - 1); if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_prevDeviceIDs.size() + 1); _prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addReplica(Layer& l) { assert(_replicas.count(l.getReplicaID()) == 0); _replicas[l.getReplicaID()] = &l; } bool Layer::hasGradProducerNext(std::string& layerName) { bool b = _next.size() == 0; for (int i = 0; i < _next.size(); i++) { b |= _next[i]->hasGradProducerNext(_name); } return b && isGradProducer(layerName); } bool Layer::postInit() { // We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop(). // In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating // it from _prev->getActs() // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); if (!_initialized) { _initialized = true; map<int,int> numGradProducersNext; _numGradProducersNext = 0; for (int r = 0; r < getNumInputReplicas(); ++r) { for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) { (*it)->postInit(); } } _memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name); // _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0] _memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name); for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); _numComputedActsGrads[d] = 0; if (_next[i]->hasGradProducerNext(_name)) { if (numGradProducersNext.count(d) == 0) { numGradProducersNext[d] = 0; } numGradProducersNext[d]++; _numGradProducersNext++; if (_memSrcActsGrad.count(d) == 0) { _memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_memSrcActs.count(d) == 0) { _memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_next.size() == 0) { _numReplicasNext = getNumReplicas(); } /* * Initialize forward broadcaster. First sibling owns it. */ if (getReplicaIdx() == 0 && _convNetThread != NULL) { _actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID())); _actBroadcaster->start(); } /* * Initialize backward reducer. */ if (isGradConsumer() && _numGradProducersNext > 0) { _gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext); _gradReducer->start(); } /* * Initialize specially sorted previous array */ for (int r = 0; r < _prev.size(); ++r) { for (int i = 0; i < _prev[r].size(); ++i) { // Previous devices in reverse order of processing by (sequential) GradReducer _prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID() + 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]); } } return true; } return false; } ActBroadcaster& Layer::getActBroadcaster() { return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers && _prev.size() > 0) { for (int i = 0; i < _prev[0].size(); i++) { _gradConsumer |= _prev[0][i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } bool Layer::isGradProducer(std::string& layerName) { return isGradProducer(); } map<int,vector<Layer*> >& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { return getActs(getDeviceID()); } NVMatrix& Layer::getActs(int deviceID) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(); } NVMatrix& Layer::getActs(int deviceID, int numCases) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(numCases); } NVMatrix& Layer::getActsGrad(int deviceID) { assert(_memSrcActsGrad.count(deviceID) > 0); return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim()); } NVMatrix& Layer::getActsGrad() { return getActsGrad(NVMatrix::getDeviceID()); } map<int, NVMatrix*> Layer::getAllActs() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } map<int, NVMatrix*> Layer::getAllActsGrads() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } int Layer::getDeviceID() { return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID(); } ConvNetThread& Layer::getConvNetThread() { assert(_convNetThread != NULL); return *_convNetThread; } ConvNet& Layer::getConvNet() { return getConvNetThread().getConvNet(); } void Layer::setBwdTerminal(int passIdx) { _bwdTerminal[passIdx] = true; } int Layer::getReplicaID() { return _replicaID; } int Layer::getActivePassPeriod() { return getNumReplicas() / getConvNet().getNumReplicasMin(); } int Layer::getFwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return passIdx % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getBwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getFwdActiveReplicaIdx(int passIdx) { assert(_next.size() > 0); return _next[0]->getFwdActiveInputReplicaIdx(passIdx); } int Layer::getNumReplicas() { return _replicas.size(); } int Layer::getNumSiblingReplicas() { return getNumReplicas() / getNumReplicasNext(); } int Layer::getNumReplicasPrev() { return _numReplicasPrev; } int Layer::getNumReplicasNext() { return _numReplicasNext; } int Layer::getNumInputReplicas() { return _numReplicasPrev / getNumReplicas(); } int Layer::getReplicaIdx() { return getReplicaID() % getNumSiblingReplicas(); } int Layer::getNumLayersPrev() { return _prev.size() > 0 ? _prev[0].size() : 0; } void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) { assert(_memSrcActs[deviceID]->isParent()); delete _memSrcActs[deviceID]; _memSrcActs[deviceID] = &mem; if (_actsTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName())); } } void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) { assert(_memSrcActsGrad[deviceID]->isParent()); delete _memSrcActsGrad[deviceID]; _memSrcActsGrad[deviceID] = &mem; if (_actsGradTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName())); } } MemoryView& Layer::getMemorySourceActs(int deviceID) { return *_memSrcActs[deviceID]; } MemoryView& Layer::getMemorySourceActsGrad(int deviceID) { return *_memSrcActsGrad[deviceID]; } int Layer::getNumOutputs() { return _numOutputs; } void Layer::setInputIdx(std::string& parentName, int idx) { _inputIndices[parentName] = idx; } int Layer::getInputIdx(std::string& parentName) { return _inputIndices[parentName]; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true) { PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron"); _neuronType = pyDictGetString(neuronDict, "type"); _neuron = &Neuron::makeNeuron(neuronDict); } NeuronLayer::~NeuronLayer() { delete _neuron; } void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) { _neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0); } } bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // Special optimization for cross-entropy objective with logistic units. // Better to just compute the input gradient in one go to avoid division by small numbers. bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1 && (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce") && _next[0]->getDeviceID() == getDeviceID() && _next[0]->getNumReplicas() == getNumReplicas(); LayerV& prev = _prev[replicaIdx]; if (doCrossEntGrad) { NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]); float gradCoeff = cost.getCoeff(); labels.transpose(_trans); if (cost.getPosWeight() == 1) { if (scaleTargets == 0) { getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } else { if (scaleTargets == 0) { getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } } return doCrossEntGrad; } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _neuron->activate(*_inputs[0], getActs()); } std::string& NeuronLayer::getNeuronType() { return _neuronType; } /* * ======================= * WeightLayer * ======================= * * The useGrad parameter here merely expresses a preference by the subclass. It may * be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes. * So when computing gradient updates, the subclass must always first check weights.isUseGrad(). * * Note: biases always useGrad. */ WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) : Layer(convNetThread, paramsDict, replicaID, trans) { _weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod"); MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW"); PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed"); /* * When there are multiple replicas, the present implementation * requires that useGrad is true. This is because weights.update() * performs a simultaneous write to both replicas' weightsInc matrix, * which means that the read should come from somewhere else (i.e. a * grads matrix). */ useGrad |= _numReplicas > 1; // Source layers for shared weights stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); _weights = new WeightList(); for (int i = 0; i < weightSourceLayers.size(); i++) { std::string& srcLayerName = weightSourceLayers[i]; int matrixIdx = weightSourceMatrixIndices[i]; PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i); ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule if (srcLayerName == _name) { // Current layer _weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this)); } else if (srcLayerName != "") { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights->addWeights(*new Weights(*srcWeights, lrs, *this)); } else { _weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true); delete &weightSourceLayers; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &wc; delete &wball; _wStep = 0.02; _bStep = 0.005; } WeightLayer::~WeightLayer() { delete _weights; delete _biases; } bool WeightLayer::postInit() { if (Layer::postInit()) { _weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod()); assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0); return true; } return false; } void WeightLayer::fpropCommon(PASS_TYPE passType) { } void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) { if (_biases->getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropBiases(v, passType); } else { _biases->getGrad().resize(_biases->getW()); _biases->getGrad().scale(getBIncScale()); } _biases->incNumUpdates(); } for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropWeights(v, replicaIdx, i, passType); } else { _weights->at(i).getGrad().resize(_weights->at(i).getW()); // This will cause it to forget momentum when shown 0 training cases // and _useGrad = false but it's not too important. _weights->at(i).getGrad().scale(getIncScale(i, passType)); } // Increment its number of updates _weights->at(i).incNumUpdates(); } } } bool WeightLayer::updateWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _weights->update(getConvNet().getTrainingProgress()); _biases->update(getConvNet().getTrainingProgress()); // constrainWeights(); return true; } return false; } bool WeightLayer::constrainWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _constrainWeights(); return true; } return false; } void WeightLayer::_constrainWeights() { } void WeightLayer::copyToCPU() { _weights->copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights->copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradient() { for (int i = 0; i < _weights->getSize(); i++) { getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i)); } getConvNet().checkGradient(_name + " biases", _bStep, *_biases); } void WeightLayer::addReplica(Layer& l) { Layer::addReplica(l); _weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights); _biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights->at(idx); } float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) { // weight update period must be multiple of activation period // TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate. double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses())); if (_weights->at(inpIdx).isUseGrad()) { return passType == PASS_GC ? 1.0f : 1.0f / numCases; } return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases; } float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) { if (_weights->at(inpIdx).isUseGrad()) { return _weights->at(inpIdx).getNumUpdates() > 0; } return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0 : (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f)); } NVMatrix& WeightLayer::getGradTarget(int inpIdx) { return _weights->at(inpIdx).getGrad(); } float WeightLayer::getBGradScale(PASS_TYPE passType) { int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses()); return passType == PASS_GC ? 1.0f : 1.0f / numCases; } float WeightLayer::getBIncScale() { return _biases->getNumUpdates() > 0; } NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) { return _weights->at(inpIdx).getW(); } NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) { return _biases->getW(); } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) { _wStep = 0.01; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } } void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType)); } void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); float scaleGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } void FCLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; // Unfortunate extra weight matrix... _weights->at(i).getW().sumOfSquares(0, _norm2); // norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * SplitFCLayer * ======================= */ SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : FCLayer(convNetThread, paramsDict, replicaID, useGrad) { _numParts = pyDictGetInt(paramsDict, "parts"); } void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true); NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts); NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts); NVMatrixV& splitTarget = getActs().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } deleteElements(splitInput, true); deleteElements(splitWeights, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts); NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1); delete &weights_T; deleteElements(splitV, true); deleteElements(splitWeights_T, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts); NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType)); delete &prevActs_T; deleteElements(splitPrevActs_T, true); deleteElements(splitV, true); deleteElements(splitGradTarget, true); } /* * ======================= * TwoDLayerInterface * ======================= */ TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _imgPixels = _imgSize * _imgSize; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); } LocalLayer::~LocalLayer() { delete _padding; delete _stride; delete _filterSize; delete _channels; delete _imgSize; delete _groups; delete _filterChannels; delete _filterPixels; delete _imgPixels; } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, true) { _sumWidth = pyDictGetInt(paramsDict, "sumWidth"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); _weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin"); _weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax"); } ConvLayer::~ConvLayer() { delete _weightContrastNormMin; delete _weightContrastNormMax; } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(getBiasMatrix(passType)); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(getBiasMatrix(passType)); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { float scaleBGrad = getBGradScale(passType); float scaleInc = getBIncScale(); if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { assert(_weights->at(inpIdx).isUseGrad()); bool doPartialSum = _sumWidth < _modulesX; NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad(); float scaleWGrad = getGradScale(inpIdx, passType); float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum; convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad); if (doPartialSum) { scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0; int outWidth = DIVUP(_modulesX, _sumWidth); _weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); _weightGradTmp.truncate(); } void ConvLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { float fz = _weights->at(i).getW().getNumRows(); NVMatrix tmp; _weights->at(i).getW().sum(0, tmp); _weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad()); // Now _weights->at(i).getGrad() contains zero-mean filters _weights->at(i).getGrad().apply(NVMatrixOps::Square()); _weights->at(i).getGrad().sum(0, tmp); tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz)); // Now tmp has the stdev _weights->at(i).getW().eltwiseMultByVector(tmp); } // It's pretty silly to do both these things but whatever if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; _weights->at(i).getW().sumOfSquares(0, _norm2); // norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType)); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType)); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { float scaleWGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void LocalUnsharedLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall()); } } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& input = *_inputs[0]; input.max(1, _max); input.addVector(_max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); getActs().sum(1, _sum); getActs().eltwiseDivideByVector(_sum); } void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); LayerV& prev = _prev[replicaIdx]; if (_doUpperGrad) { // Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense for (int i = 0; i < _next.size(); ++i) { if (_next[i]->isGradProducer(getName())) { NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); break; } } } else { computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1); } } void SoftmaxLayer::setDoUpperGrad(bool b) { _doUpperGrad = b; } /* * ======================= * ConcatenationLayer * ======================= */ ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets"); _copyOffsets->push_back(_numOutputs); } ConcatenationLayer::~ConcatenationLayer() { delete _copyOffsets; } void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols()); _inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0); } void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view _prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1); delete &copySrc; } /* * ======================= * PassThroughLayer * ======================= */ PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // No-op } void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // No-op } bool PassThroughLayer::postInit() { if (Layer::postInit()) { assert(getNumInputReplicas() == 1); for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) { MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); _prev[0][i]->setMemorySourceActs(getDeviceID(), vActs); _prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad); } return true; } return false; } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } EltwiseSumLayer::~EltwiseSumLayer() { delete _coeffs; } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx)); } void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DropoutLayer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _enable = pyDictGetInt(paramsDict, "enable"); _keep = pyDictGetFloat(paramsDict, "keep"); } void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.apply(DropoutSmallerThanOperator(_keep)); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->copy(getActs()); } } void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1); } } void DropoutLayer::truncBwdActs() { Layer::truncBwdActs(); _keepMask.truncate(); } /* * ======================= * Dropout2Layer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) { } void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.smallerThanScalar(_keep); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->scale(_keep, getActs()); } } void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { if (scaleTargets != 0) { v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)), prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.scale(_keep, prev[inpIdx]->getActsGrad()); } } } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); _start = pyDictGetInt(paramsDict, "start"); _end = pyDictGetInt(paramsDict, "end"); _useBuffer = false; _outstandingCopyRequest = false; _convNet = convNet; } DataLayer::~DataLayer() { for (map<int,hipStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) { checkCudaErrors(hipStreamDestroy(it->second)); } for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } _copier->stop(); delete _copier; } void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) { waitForCopyFinish(); if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) { _useBuffer = !_useBuffer; } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } void DataLayer::waitForCopyFinish() { if (_outstandingCopyRequest) { _copyFinishQueue.dequeue(); assert(_copyFinishQueue.getNumElements() == 0); _outstandingCopyRequest = false; } } hipStream_t DataLayer::getCopyStream(int deviceID) { if (_copyStreams.count(deviceID) == 0) { NVMatrix::setDeviceID(deviceID); checkCudaErrors(hipStreamCreateWithFlags(&_copyStreams[deviceID], hipStreamNonBlocking)); } return _copyStreams[deviceID]; } void DataLayer::copyData(CPUData& data, bool other, int passIdx) { assert(!_outstandingCopyRequest); assert(_copyFinishQueue.getNumElements() == 0); _copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx)); _outstandingCopyRequest = true; } int DataLayer::getNumInputReplicas() { return _convNet->getNumReplicasMax() / getNumReplicas(); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { } NVMatrix& DataLayer::getActs(int deviceID) { return getActs(deviceID, false, -1); } NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) { // printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases); assert(_memSrcActs.count(deviceID) > 0); assert(_memSrcActs2.count(deviceID) > 0); return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases)); } ConvNet& DataLayer::getConvNet() { return *_convNet; } bool DataLayer::postInit() { if (Layer::postInit()) { for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); if (_memSrcActs2.count(d) == 0) { _memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName()); } } intv cpus = getDeviceCPUs(_next[0]->getDeviceID()); _copier = new DataCopyThread(*this, cpus); _copier->start(); return true; } return false; } bool DataLayer::isGradProducer() { return false; } /* * ======================= * DataCopyThread * ======================= */ DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) { } Queue<DataCopyMessage*>& DataCopyThread::getQueue() { return _queue; } void DataCopyThread::stop() { getQueue().enqueue(new DataCopyExitMessage()); join(); } void* DataCopyThread::run() { NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin()); bool exit = false; while(!exit) { DataCopyMessage& msg = *_queue.dequeue(); exit = msg.getType() == DataCopyMessage::EXIT; if (!exit) { CPUData& data = msg.getData(); int passIdx = msg.getPassIdx(); bool other = msg.isOther(); Matrix& dataMatrix = data.getData(_parent->getDataIdx()); // How many times is this layer going to process microbatches from this minibatch? assert(_parent->getNumReplicasNext() == _parent->getNumReplicas()); int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx); if (microIdx >= 0) { if (_requestTimer.isStarted()) { double requestIntervalMsec = _requestTimer.stop(); // Sleep for up to 1/20th the average request interval _sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0)); } _requestTimer.start(); if (other) { // Sleeping a bit is helpful because in typical nets, copying input data // as soon as it's available will produce contention with other communications // that are happening at the time. This is very much a hack, so in the future // it might be good to replace it with something smarter which schedules access // to communication links. usleep(_sleepUsec); } microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas(); // Safer to divup because this way you won't get a minibatch size of 0 int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax()); int microStart = microIdx * microbatchSize; int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize); // Check that this replica has some data. This can be false when, for example, // there are only 7 examples in the minibatch but 8 replicas. if (microStart < microEnd) { assert(dataMatrix.isView() == dataMatrix.isTrans()); int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2); if (dataMatrix.isTrans()) { Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd); // In this case, dataMatrix is a view on memory allocated by Python. _hostMemFwd.copyFromHost(replicaDataMatrix, true); delete &replicaDataMatrix; // view NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); // Note to self: this is the path that gets executed in practice // in my models. It does a transpose & copy simultaneously. hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } else { // Hacky way to copy a slice to _hostMemFwd _hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart); Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans()); dataMatrix.sliceCols(microStart, microEnd, tmp); NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; NVMatrix::setDeviceID(deviceID); NVMatrix::syncStream(_parent->getCopyStream(deviceID)); } _parent->getConvNet().getDataCopyPD().freePipe(pipe); } else { for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; _parent->getActs(deviceID, other, 0); } } } _parent->getCopyFinishQueue().enqueue(1); } delete &msg; } return NULL; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false); } else if(_pool == "maxabs") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true); } else if(_pool == "avg") { return *new AvgPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_abs) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler()); } else { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } } void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * CrossMapPoolLayer * ===================== */ CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputs = pyDictGetInt(paramsDict, "outputChannels"); _pool = pyDictGetString(paramsDict, "pool"); } CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * CrossMapMaxPoolLayer * ===================== */ CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) { } void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler()); } void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1); } /* * ===================== * RandomScaleLayer * ===================== */ RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _maxScale = pyDictGetFloat(paramsDict, "maxScale"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); // The smallest size the image could be after rescaling _minScaledSize = _imgSize / _maxScale; // The number of discrete scales we're considering int numScales = _imgSize - _minScaledSize + 1; // The total number of squares of size _tgtSize that we can extract // from all these scales double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6; // For each scale, record the fraction of the squares that it has. // This will be the probability of sampling this scale. _scaleProbs.push_back(1.0 / numCrops); for (int s = 1; s < numScales; ++s) { _scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops); } } void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (IS_TRAIN(passType)) { // _maxScale is in the range [1, 2) float r = randf; int rescaledSize = _tgtSize; float scaleFactor = _maxScale; // Find which scale we have sampled for (int s = 0; s < _scaleProbs.size(); ++s) { if (r <= _scaleProbs[s]) { rescaledSize += s; float scaleFactorEnd = _imgSize / float(rescaledSize); float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize)); scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart); break; } } assert(rescaledSize >= _tgtSize); int maxStart = rescaledSize - _tgtSize; int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart); if (rescaledSize == _imgSize) { convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX); } else { convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor); convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX); } _rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it. } else if (IS_MULTIVIEW_TEST(passType)) { // for now... _inputs[0]->copy(getActs()); } else if (IS_TEST(passType)) { // Test on center patch convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale); } } void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * CropLayer * ===================== */ CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _startX = pyDictGetInt(paramsDict, "startX"); _startY = pyDictGetInt(paramsDict, "startY"); _tgtSize = pyDictGetInt(paramsDict, "sizeX"); } void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX); } void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _hFilter = pyDictGetMatrix(paramsDict, "filter"); } GaussianBlurLayer::~GaussianBlurLayer() { delete _hFilter; } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad(); convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1); convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * HorizontalReflectionLayer * ===================== */ HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { assert(_channels >= 1 && _channels <= 3); } void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convReflectHorizontal(*_inputs[0], getActs(), _imgSize); } void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); _minDiv = pyDictGetFloat(paramsDict, "minDiv"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); _denoms.truncate(); } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { assert(inpIdx == 0); convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); _meanDiffs.truncate(); } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); _numCases = 0; _aggregated = pyDictGetInt(paramsDict, "aggregated") != 0; } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { if (_coeff != 0) { Layer::bprop(v, passType, passIdx); } } bool CostLayer::fprop(PASS_TYPE passType, int passIdx) { if (Layer::fprop(passType, passIdx)) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL)); return true; } return false; } void CostLayer::fpropCommon(PASS_TYPE passType) { _numCases = Layer::getNumCases(*_inputs[0]); } int CostLayer::getNumCases() { return _numCases; } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { return *new doublev(_costv); } // This is called between microbatches void CostLayer::resetPassIdx() { Layer::resetPassIdx(); _costv.clear(); } CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) { if (type == "cost.crossent") { return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.bce") { return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.dce") { return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.logreg") { return *new LogregCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown cost layer type ") + type; } /* * ===================== * CrossEntCostLayer * ===================== */ CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID(); if (doWork) { computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * BinomialCrossEntropyCostLayer * ===================== */ BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate"); _posWeight = pyDictGetFloat(paramsDict, "posWeight"); } void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs); _costv.clear(); // Cross-entropy cost _costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim()); // If aggregated, we don't produce these outputs because they're not additive. // They have no meaning if this is just a partial cost. if (!_aggregated) { // "Correct" classifications. To compute these we threshold probs // and just count the number of entries that agree with labels. probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.equals(labels); _costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim())); if (_computeSoftmaxErrorRate) { // Also compute top-1 error as if this is softmax and there's only one correct class probs.max(0, _tmpVec); assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis probs.equalsVector(_tmpVec, _correctProbs); _correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present float m = _tmpVec.max(); _correctProbs.eltwiseDivideByVector(_tmpVec); _correctProbs.eltwiseMult(labels); _costv.push_back(numCases - _correctProbs.sum(_tmpbuf)); } } } } void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "neuron" || static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (doWork) { printf("Computing cross-entropy gradient the stupid way\n"); if (scaleTargets == 0) { labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target); } else { labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target); } } } float BinomialCrossEntropyCostLayer::getPosWeight() { return _posWeight; } /* * ===================== * DetectionCrossEntropyCostLayer * ===================== */ DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) { assert(!_aggregated); } void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx); // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); /* * Add information sufficient to compute precision and recall for each class. */ // NOTE: _tmpProbs contains ((probs > 0.5) == labels) labels.sum(1, _numPositive); // sum(labels, 1) _tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels) _tmpProbs.sum(1, _numTruePositive); probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.sum(1, _numDeclaredPositive); _numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true); _numPositive.copyToHost(_hNumPositive, true); _numTruePositive.copyToHost(_hNumTruePositive, true); for (int i = 0; i < labels.getFollowingDim(); ++i) { _costv.push_back(_hNumDeclaredPositive(i, 0)); // 2 _costv.push_back(_hNumPositive(i, 0)); // 3 _costv.push_back(_hNumTruePositive(i, 0)); // 4 } } } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _topk = pyDictGetInt(paramsDict, "topk"); // _numAccumed = 0; } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix* probs = _inputs[1]; _doCompute = !IS_MULTIVIEW_TEST(passType); if (!_doCompute) { if (IS_MULTIVIEW_TEST_START(passType)) { if (_probsAccum.count(passIdx) == 0) { _probsAccum[passIdx] = new NVMatrix(*probs); } probs->copy(*_probsAccum[passIdx]); _numAccumed[passIdx] = 1; } else { _probsAccum[passIdx]->add(*probs); _numAccumed[passIdx] += 1; } if (IS_MULTIVIEW_TEST_END(passType)) { probs = _probsAccum[passIdx]; probs->scale(1.0 / _numAccumed[passIdx]); _doCompute = true; } } if (_doCompute) { int numCases = labels.getNumElements(); probs->max(0,_maxProbs); if (_topk == 1) { computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs); } else { computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk); } _costv.clear(); double top1 = _correctProbs.sum(_tmpbuf); _costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf)); _costv.push_back(numCases - top1); _costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf))); } } } NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) { return *_probsAccum[replicaIdx]; } void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (prev[1]->getType() == "softmax") { static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork); } if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); _costv.push_back(getActs().sum()); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff); }
9d6118aff764e3da3d5e34365235f6bbf0b46b65.cu
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <helper_cuda.h> #include <iostream> #include <set> #include "../../cudaconv3/include/cudaconv2.cuh" #include "../../util/include/matrix.h" #include "../include/layer_kernels.cuh" #include "../include/layer.cuh" #include "../include/data.cuh" #include "../include/util.cuh" #include "../include/weights.cuh" using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : _convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _numOutputs = pyDictGetInt(paramsDict, "outputs"); _numReplicas = pyDictGetInt(paramsDict, "numReplicas"); _numReplicasPrev = 1; _rcvdBInputMsgs = 0; _actBroadcaster = NULL; _gradReducer = NULL; _initialized = false; } Layer::~Layer() { if (_actBroadcaster != NULL) { _actBroadcaster->stop(); delete _actBroadcaster; } if (_gradReducer != NULL) { _gradReducer->stop(); delete _gradReducer; } // For now, gradReducer doesn't have a destructor // delete _gradReducer; for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } } cudaStream_t Layer::getStream() { assert(getDeviceID() >= 0); return NVMatrix::getDefaultStream(getDeviceID()); } void Layer::syncStream() { NVMatrix::syncStream(getStream()); } void Layer::fpropNext(PASS_TYPE passType, int passIdx) { if (_next.size() > 0) { if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) { syncStream(); // Make sure I've finished computing before broadcasting } getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue)); } if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) { _broadcastFinishQueue.dequeue(); assert(_broadcastFinishQueue.getNumElements() == 0); } } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } bool Layer::fprop(PASS_TYPE passType, int passIdx) { _rcvdFInputMsgs++; // I require messages from *all* input replicas because it makes the propagation easier to think about. // Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation // might not actually be finished yet. if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) { // printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID()); int ridx = getFwdActiveInputReplicaIdx(passIdx); assert(getDeviceID() == NVMatrix::getDeviceID()); map<int, NVMatrix*> v; if (ridx >= 0) { for (int i = 0; i < getNumLayersPrev(); i++) { v[i] = &_prev[ridx][i]->getActs(getDeviceID()); } } fprop(v, passType, passIdx); return true; } return false; } void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) { if (getFwdActiveInputReplicaIdx(passIdx) >= 0) { assert(v.size() == getNumLayersPrev()); _inputs.clear(); _inputs.insert(v.begin(), v.end()); int numCases = _inputs[0]->getLeadingDim(); for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemory(numCases); } if (numCases > 0) { //printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases); _rcvdFInputMsgs = getNumExpectedFwdMsgs(); for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) { it->second->transpose(_trans); } getActs().transpose(_trans); fpropCommon(passType); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType, passIdx); } // Then add the rest of the inputs to that for (int i = 0; i < getNumLayersPrev(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx); } } } } fpropNext(passType, passIdx); } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_actsGradTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } if (_actsTarget < 0) { for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { it->second->getMemorySource().truncate(getName()); } } } int Layer::getNumGradProducersNext() { return _numGradProducersNext; } int Layer::getNumExpectedBwdMsgs() { return _numGradProducersNext * getNumSiblingReplicas(); } int Layer::getNumExpectedFwdMsgs() { return getNumLayersPrev() * getNumInputReplicas(); } void Layer::bprop(PASS_TYPE passType, int passIdx) { if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) { // printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID()); if (_gradReducer != NULL) { _gradReducer->waitForFinish(); } // This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages bprop(getActsGrad(), passType, passIdx); if (_bwdTerminal[passIdx]) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL)); } } } void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) { Layer& prev = *_prev[replicaIdx][inputIdx]; if (prev.isGradConsumer() && isGradProducer(prev.getName())) { if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0 bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType); } prev.getNumComputedActsGrads(getDeviceID())++; // Synchronize if the previous layer is going to actually do a reduction. // If the previous layer is on the same GPU as us and has no next layers // on other GPUs then it won't need to do a reduction. if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) { syncStream(); } prev.getGradReducer().enqueueReduction(getDeviceID()); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { v.transpose(_trans); assert(getDeviceID() == NVMatrix::getDeviceID()); int ridx = getBwdActiveInputReplicaIdx(passIdx); LayerV& prev = _prev[ridx]; map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx]; for (int i = 0; i < prev.size(); i++) { _inputs[i]->transpose(_trans); prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); // NOTE: this should be here (before the bpropActs) because if you have a layer // that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite // v which is used in bpropCommon. So bpropCommon must come first. bpropCommon(v, ridx, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) { const set<Layer*>& deviceLayers = it->second; for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) { if (_actsGradTarget != (*it2)->getInputIdx(_name)) { bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name)); } } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0) { bpropActsCall(v, passType, ridx, _actsGradTarget); } } // Synchronization is necessary because the kernel calls that compute my backward acts // execute asynchronously. Therefore I don't want to tell other threads that I've // computed bprop activities for them when in fact I've only called a function which // will eventually compute them. if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) { syncStream(); } if (getConvNet().isConserveMemory()) { truncBwdActs(); } if (isGradProducer()) { /*for (int i = 0; i < prev.size(); i++) { if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) { prev[i]->getGradReducer().enqueueReduction(getDeviceID()); } }*/ // Send backward messages to *all* replicas. // Note that the messages will be dismissed unless the passIdx indicates // that the previous layer should do some work. for (int r = 0; r < getNumInputReplicas(); r++) { for (int i = 0; i < _prev[r].size(); i++) { if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) { _prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx)); } } } } } IActGradReducer& Layer::getGradReducer() { return *_gradReducer; } // This is called between minibatches void Layer::reset() { _rcvdFInputMsgs = 0; _rcvdBInputMsgs = 0; for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) { it->second = 0; } } // This is called between microbatches void Layer::resetPassIdx() { _rcvdFInputMsgs = 0; if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) { reset(); } } /* * Returns number of cases in given matrix. */ int Layer::getNumCases(NVMatrix& v) { return v.getLeadingDim(); } int Layer::incRcvdBInputMsgs() { return ++_rcvdBInputMsgs; } std::string& Layer::getName() { return _name; } std::string& Layer::getType() { return _type; } int& Layer::getNumComputedActsGrads(int deviceID) { return _numComputedActsGrads[deviceID]; } void Layer::addNext(Layer& l) { _next.push_back(&l); _numReplicasNext = l.getNumReplicas(); if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_nextDeviceIDs.size() + 1); _nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addPrev(Layer& l, int replicaIdx) { _prev[replicaIdx].push_back(&l); _numReplicasPrev = l.getNumReplicas(); l.setInputIdx(getName(), _prev[replicaIdx].size() - 1); if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) { int pos = rand() % (_prevDeviceIDs.size() + 1); _prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID()); } } void Layer::addReplica(Layer& l) { assert(_replicas.count(l.getReplicaID()) == 0); _replicas[l.getReplicaID()] = &l; } bool Layer::hasGradProducerNext(std::string& layerName) { bool b = _next.size() == 0; for (int i = 0; i < _next.size(); i++) { b |= _next[i]->hasGradProducerNext(_name); } return b && isGradProducer(layerName); } bool Layer::postInit() { // We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop(). // In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating // it from _prev->getActs() // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); if (!_initialized) { _initialized = true; map<int,int> numGradProducersNext; _numGradProducersNext = 0; for (int r = 0; r < getNumInputReplicas(); ++r) { for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) { (*it)->postInit(); } } _memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name); // _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0] _memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName()) : &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name); for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); _numComputedActsGrads[d] = 0; if (_next[i]->hasGradProducerNext(_name)) { if (numGradProducersNext.count(d) == 0) { numGradProducersNext[d] = 0; } numGradProducersNext[d]++; _numGradProducersNext++; if (_memSrcActsGrad.count(d) == 0) { _memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_memSrcActs.count(d) == 0) { _memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName()); } } if (_next.size() == 0) { _numReplicasNext = getNumReplicas(); } /* * Initialize forward broadcaster. First sibling owns it. */ if (getReplicaIdx() == 0 && _convNetThread != NULL) { _actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID())); _actBroadcaster->start(); } /* * Initialize backward reducer. */ if (isGradConsumer() && _numGradProducersNext > 0) { _gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext); _gradReducer->start(); } /* * Initialize specially sorted previous array */ for (int r = 0; r < _prev.size(); ++r) { for (int i = 0; i < _prev[r].size(); ++i) { // Previous devices in reverse order of processing by (sequential) GradReducer _prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID() + 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]); } } return true; } return false; } ActBroadcaster& Layer::getActBroadcaster() { return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers && _prev.size() > 0) { for (int i = 0; i < _prev[0].size(); i++) { _gradConsumer |= _prev[0][i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } bool Layer::isGradProducer(std::string& layerName) { return isGradProducer(); } map<int,vector<Layer*> >& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { return getActs(getDeviceID()); } NVMatrix& Layer::getActs(int deviceID) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(); } NVMatrix& Layer::getActs(int deviceID, int numCases) { assert(_memSrcActs.count(deviceID) > 0); return _memSrcActs[deviceID]->getMemory(numCases); } NVMatrix& Layer::getActsGrad(int deviceID) { assert(_memSrcActsGrad.count(deviceID) > 0); return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim()); } NVMatrix& Layer::getActsGrad() { return getActsGrad(NVMatrix::getDeviceID()); } map<int, NVMatrix*> Layer::getAllActs() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } map<int, NVMatrix*> Layer::getAllActsGrads() { map<int, NVMatrix*> m; for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) { m[it->first] = &it->second->getMemory(); } return m; } int Layer::getDeviceID() { return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID(); } ConvNetThread& Layer::getConvNetThread() { assert(_convNetThread != NULL); return *_convNetThread; } ConvNet& Layer::getConvNet() { return getConvNetThread().getConvNet(); } void Layer::setBwdTerminal(int passIdx) { _bwdTerminal[passIdx] = true; } int Layer::getReplicaID() { return _replicaID; } int Layer::getActivePassPeriod() { return getNumReplicas() / getConvNet().getNumReplicasMin(); } int Layer::getFwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return passIdx % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getBwdActiveInputReplicaIdx(int passIdx) { const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas(); return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1; } int Layer::getFwdActiveReplicaIdx(int passIdx) { assert(_next.size() > 0); return _next[0]->getFwdActiveInputReplicaIdx(passIdx); } int Layer::getNumReplicas() { return _replicas.size(); } int Layer::getNumSiblingReplicas() { return getNumReplicas() / getNumReplicasNext(); } int Layer::getNumReplicasPrev() { return _numReplicasPrev; } int Layer::getNumReplicasNext() { return _numReplicasNext; } int Layer::getNumInputReplicas() { return _numReplicasPrev / getNumReplicas(); } int Layer::getReplicaIdx() { return getReplicaID() % getNumSiblingReplicas(); } int Layer::getNumLayersPrev() { return _prev.size() > 0 ? _prev[0].size() : 0; } void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) { assert(_memSrcActs[deviceID]->isParent()); delete _memSrcActs[deviceID]; _memSrcActs[deviceID] = &mem; if (_actsTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName())); } } void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) { assert(_memSrcActsGrad[deviceID]->isParent()); delete _memSrcActsGrad[deviceID]; _memSrcActsGrad[deviceID] = &mem; if (_actsGradTarget >= 0 && deviceID == getDeviceID()) { assert(getNumInputReplicas() == 1); _prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName())); } } MemoryView& Layer::getMemorySourceActs(int deviceID) { return *_memSrcActs[deviceID]; } MemoryView& Layer::getMemorySourceActsGrad(int deviceID) { return *_memSrcActsGrad[deviceID]; } int Layer::getNumOutputs() { return _numOutputs; } void Layer::setInputIdx(std::string& parentName, int idx) { _inputIndices[parentName] = idx; } int Layer::getInputIdx(std::string& parentName) { return _inputIndices[parentName]; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true) { PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron"); _neuronType = pyDictGetString(neuronDict, "type"); _neuron = &Neuron::makeNeuron(neuronDict); } NeuronLayer::~NeuronLayer() { delete _neuron; } void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) { _neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0); } } bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // Special optimization for cross-entropy objective with logistic units. // Better to just compute the input gradient in one go to avoid division by small numbers. bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1 && (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce") && _next[0]->getDeviceID() == getDeviceID() && _next[0]->getNumReplicas() == getNumReplicas(); LayerV& prev = _prev[replicaIdx]; if (doCrossEntGrad) { NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]); float gradCoeff = cost.getCoeff(); labels.transpose(_trans); if (cost.getPosWeight() == 1) { if (scaleTargets == 0) { getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } else { if (scaleTargets == 0) { getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad()); } else { getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())), labels, prev[0]->getActsGrad(), prev[0]->getActsGrad()); } } } return doCrossEntGrad; } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _neuron->activate(*_inputs[0], getActs()); } std::string& NeuronLayer::getNeuronType() { return _neuronType; } /* * ======================= * WeightLayer * ======================= * * The useGrad parameter here merely expresses a preference by the subclass. It may * be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes. * So when computing gradient updates, the subclass must always first check weights.isUseGrad(). * * Note: biases always useGrad. */ WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) : Layer(convNetThread, paramsDict, replicaID, trans) { _weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod"); MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW"); PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed"); /* * When there are multiple replicas, the present implementation * requires that useGrad is true. This is because weights.update() * performs a simultaneous write to both replicas' weightsInc matrix, * which means that the read should come from somewhere else (i.e. a * grads matrix). */ useGrad |= _numReplicas > 1; // Source layers for shared weights stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); _weights = new WeightList(); for (int i = 0; i < weightSourceLayers.size(); i++) { std::string& srcLayerName = weightSourceLayers[i]; int matrixIdx = weightSourceMatrixIndices[i]; PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i); ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule if (srcLayerName == _name) { // Current layer _weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this)); } else if (srcLayerName != "") { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights->addWeights(*new Weights(*srcWeights, lrs, *this)); } else { _weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true); delete &weightSourceLayers; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &wc; delete &wball; _wStep = 0.02; _bStep = 0.005; } WeightLayer::~WeightLayer() { delete _weights; delete _biases; } bool WeightLayer::postInit() { if (Layer::postInit()) { _weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod()); assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0); return true; } return false; } void WeightLayer::fpropCommon(PASS_TYPE passType) { } void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) { if (_biases->getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropBiases(v, passType); } else { _biases->getGrad().resize(_biases->getW()); _biases->getGrad().scale(getBIncScale()); } _biases->incNumUpdates(); } for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { if (v.getNumElements() > 0) { bpropWeights(v, replicaIdx, i, passType); } else { _weights->at(i).getGrad().resize(_weights->at(i).getW()); // This will cause it to forget momentum when shown 0 training cases // and _useGrad = false but it's not too important. _weights->at(i).getGrad().scale(getIncScale(i, passType)); } // Increment its number of updates _weights->at(i).incNumUpdates(); } } } bool WeightLayer::updateWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _weights->update(getConvNet().getTrainingProgress()); _biases->update(getConvNet().getTrainingProgress()); // constrainWeights(); return true; } return false; } bool WeightLayer::constrainWeights() { if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) { _constrainWeights(); return true; } return false; } void WeightLayer::_constrainWeights() { } void WeightLayer::copyToCPU() { _weights->copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights->copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradient() { for (int i = 0; i < _weights->getSize(); i++) { getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i)); } getConvNet().checkGradient(_name + " biases", _bStep, *_biases); } void WeightLayer::addReplica(Layer& l) { Layer::addReplica(l); _weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights); _biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights->at(idx); } float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) { // weight update period must be multiple of activation period // TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate. double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses())); if (_weights->at(inpIdx).isUseGrad()) { return passType == PASS_GC ? 1.0f : 1.0f / numCases; } return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases; } float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) { if (_weights->at(inpIdx).isUseGrad()) { return _weights->at(inpIdx).getNumUpdates() > 0; } return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0 : (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f)); } NVMatrix& WeightLayer::getGradTarget(int inpIdx) { return _weights->at(inpIdx).getGrad(); } float WeightLayer::getBGradScale(PASS_TYPE passType) { int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses()); return passType == PASS_GC ? 1.0f : 1.0f / numCases; } float WeightLayer::getBIncScale() { return _biases->getNumUpdates() > 0; } NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) { return _weights->at(inpIdx).getW(); } NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) { return _biases->getW(); } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) { _wStep = 0.01; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } } void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType)); } void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); float scaleGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } void FCLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; // Unfortunate extra weight matrix... _weights->at(i).getW().sumOfSquares(0, _norm2); // norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * SplitFCLayer * ======================= */ SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : FCLayer(convNetThread, paramsDict, replicaID, useGrad) { _numParts = pyDictGetInt(paramsDict, "parts"); } void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true); NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts); NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts); NVMatrixV& splitTarget = getActs().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType), 1, getActs()); } deleteElements(splitInput, true); deleteElements(splitWeights, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose(); _prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts); NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts); NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1); delete &weights_T; deleteElements(splitV, true); deleteElements(splitWeights_T, true); deleteElements(splitTarget, true); } void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose(); NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts); NVMatrixV& splitV = v.splitCols(_numParts); NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts); NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType)); delete &prevActs_T; deleteElements(splitPrevActs_T, true); deleteElements(splitV, true); deleteElements(splitGradTarget, true); } /* * ======================= * TwoDLayerInterface * ======================= */ TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _imgPixels = _imgSize * _imgSize; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad) : WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); } LocalLayer::~LocalLayer() { delete _padding; delete _stride; delete _filterSize; delete _channels; delete _imgSize; delete _groups; delete _filterChannels; delete _filterPixels; delete _imgPixels; } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, true) { _sumWidth = pyDictGetInt(paramsDict, "sumWidth"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); _weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin"); _weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax"); } ConvLayer::~ConvLayer() { delete _weightContrastNormMin; delete _weightContrastNormMax; } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(getBiasMatrix(passType)); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(getBiasMatrix(passType)); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { float scaleBGrad = getBGradScale(passType); float scaleInc = getBIncScale(); if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { assert(_weights->at(inpIdx).isUseGrad()); bool doPartialSum = _sumWidth < _modulesX; NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad(); float scaleWGrad = getGradScale(inpIdx, passType); float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum; convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad); if (doPartialSum) { scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0; int outWidth = DIVUP(_modulesX, _sumWidth); _weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); _weightGradTmp.truncate(); } void ConvLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { float fz = _weights->at(i).getW().getNumRows(); NVMatrix tmp; _weights->at(i).getW().sum(0, tmp); _weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad()); // Now _weights->at(i).getGrad() contains zero-mean filters _weights->at(i).getGrad().apply(NVMatrixOps::Square()); _weights->at(i).getGrad().sum(0, tmp); tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz)); // Now tmp has the stdev _weights->at(i).getW().eltwiseMultByVector(tmp); } // It's pretty silly to do both these things but whatever if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { // NVMatrix norm2; _weights->at(i).getW().sumOfSquares(0, _norm2); // norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall())); _norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall())); _weights->at(i).getW().eltwiseMultByVector(_norm2); } } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LocalLayer(convNetThread, paramsDict, replicaID, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(getBiasMatrix(passType)); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { _biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType)); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) { float scaleWGrad = getGradScale(inpIdx, passType); float scaleInc = getIncScale(inpIdx, passType); localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } void LocalUnsharedLayer::_constrainWeights() { for (int i = 0; i < _weights->getSize(); i++) { if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) { normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall()); } } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& input = *_inputs[0]; input.max(1, _max); input.addVector(_max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); getActs().sum(1, _sum); getActs().eltwiseDivideByVector(_sum); } void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); LayerV& prev = _prev[replicaIdx]; if (_doUpperGrad) { // Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense for (int i = 0; i < _next.size(); ++i) { if (_next[i]->isGradProducer(getName())) { NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); break; } } } else { computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1); } } void SoftmaxLayer::setDoUpperGrad(bool b) { _doUpperGrad = b; } /* * ======================= * ConcatenationLayer * ======================= */ ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets"); _copyOffsets->push_back(_numOutputs); } ConcatenationLayer::~ConcatenationLayer() { delete _copyOffsets; } void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols()); _inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0); } void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view _prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1); delete &copySrc; } /* * ======================= * PassThroughLayer * ======================= */ PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // No-op } void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { // No-op } bool PassThroughLayer::postInit() { if (Layer::postInit()) { assert(getNumInputReplicas() == 1); for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) { MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs())); _prev[0][i]->setMemorySourceActs(getDeviceID(), vActs); _prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad); } return true; } return false; } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } EltwiseSumLayer::~EltwiseSumLayer() { delete _coeffs; } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx)); } void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * DropoutLayer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _enable = pyDictGetInt(paramsDict, "enable"); _keep = pyDictGetFloat(paramsDict, "keep"); } void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.apply(DropoutSmallerThanOperator(_keep)); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->copy(getActs()); } } void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1); } } void DropoutLayer::truncBwdActs() { Layer::truncBwdActs(); _keepMask.truncate(); } /* * ======================= * Dropout2Layer * ======================= * * TODO: optimize away the case when using dopout over relus. Don't need the keepmask. */ Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) { } void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_enable && passType == PASS_TRAIN) { _keepMask.resize(*_inputs[inpIdx]); _keepMask.randomizeUniform(); _keepMask.smallerThanScalar(_keep); _inputs[inpIdx]->eltwiseMult(_keepMask, getActs()); } else { _inputs[inpIdx]->scale(_keep, getActs()); } } void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { LayerV& prev = _prev[replicaIdx]; if (_enable && passType == PASS_TRAIN) { if (scaleTargets != 0) { v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()), _keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad()); } } else { if (scaleTargets != 0) { v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)), prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad()); } else { v.scale(_keep, prev[inpIdx]->getActsGrad()); } } } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); _start = pyDictGetInt(paramsDict, "start"); _end = pyDictGetInt(paramsDict, "end"); _useBuffer = false; _outstandingCopyRequest = false; _convNet = convNet; } DataLayer::~DataLayer() { for (map<int,cudaStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) { checkCudaErrors(cudaStreamDestroy(it->second)); } for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) { if (it->second->getMemorySource().truncate(_name)) { delete &it->second->getMemorySource(); } } _copier->stop(); delete _copier; } void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) { waitForCopyFinish(); if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) { _useBuffer = !_useBuffer; } for (int i = 0; i < _next.size(); i++) { _next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx)); } } void DataLayer::waitForCopyFinish() { if (_outstandingCopyRequest) { _copyFinishQueue.dequeue(); assert(_copyFinishQueue.getNumElements() == 0); _outstandingCopyRequest = false; } } cudaStream_t DataLayer::getCopyStream(int deviceID) { if (_copyStreams.count(deviceID) == 0) { NVMatrix::setDeviceID(deviceID); checkCudaErrors(cudaStreamCreateWithFlags(&_copyStreams[deviceID], cudaStreamNonBlocking)); } return _copyStreams[deviceID]; } void DataLayer::copyData(CPUData& data, bool other, int passIdx) { assert(!_outstandingCopyRequest); assert(_copyFinishQueue.getNumElements() == 0); _copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx)); _outstandingCopyRequest = true; } int DataLayer::getNumInputReplicas() { return _convNet->getNumReplicasMax() / getNumReplicas(); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { } NVMatrix& DataLayer::getActs(int deviceID) { return getActs(deviceID, false, -1); } NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) { // printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases); assert(_memSrcActs.count(deviceID) > 0); assert(_memSrcActs2.count(deviceID) > 0); return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases)); } ConvNet& DataLayer::getConvNet() { return *_convNet; } bool DataLayer::postInit() { if (Layer::postInit()) { for (int i = 0; i < _next.size(); ++i) { int d = _next[i]->getDeviceID(); if (_memSrcActs2.count(d) == 0) { _memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName()); } } intv cpus = getDeviceCPUs(_next[0]->getDeviceID()); _copier = new DataCopyThread(*this, cpus); _copier->start(); return true; } return false; } bool DataLayer::isGradProducer() { return false; } /* * ======================= * DataCopyThread * ======================= */ DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) { } Queue<DataCopyMessage*>& DataCopyThread::getQueue() { return _queue; } void DataCopyThread::stop() { getQueue().enqueue(new DataCopyExitMessage()); join(); } void* DataCopyThread::run() { NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin()); bool exit = false; while(!exit) { DataCopyMessage& msg = *_queue.dequeue(); exit = msg.getType() == DataCopyMessage::EXIT; if (!exit) { CPUData& data = msg.getData(); int passIdx = msg.getPassIdx(); bool other = msg.isOther(); Matrix& dataMatrix = data.getData(_parent->getDataIdx()); // How many times is this layer going to process microbatches from this minibatch? assert(_parent->getNumReplicasNext() == _parent->getNumReplicas()); int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx); if (microIdx >= 0) { if (_requestTimer.isStarted()) { double requestIntervalMsec = _requestTimer.stop(); // Sleep for up to 1/20th the average request interval _sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0)); } _requestTimer.start(); if (other) { // Sleeping a bit is helpful because in typical nets, copying input data // as soon as it's available will produce contention with other communications // that are happening at the time. This is very much a hack, so in the future // it might be good to replace it with something smarter which schedules access // to communication links. usleep(_sleepUsec); } microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas(); // Safer to divup because this way you won't get a minibatch size of 0 int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax()); int microStart = microIdx * microbatchSize; int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize); // Check that this replica has some data. This can be false when, for example, // there are only 7 examples in the minibatch but 8 replicas. if (microStart < microEnd) { assert(dataMatrix.isView() == dataMatrix.isTrans()); int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2); if (dataMatrix.isTrans()) { Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd); // In this case, dataMatrix is a view on memory allocated by Python. _hostMemFwd.copyFromHost(replicaDataMatrix, true); delete &replicaDataMatrix; // view NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); // Note to self: this is the path that gets executed in practice // in my models. It does a transpose & copy simultaneously. hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } else { // Hacky way to copy a slice to _hostMemFwd _hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart); Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans()); dataMatrix.sliceCols(microStart, microEnd, tmp); NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd()); for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; // Copy my output to this guy's GPU NVMatrix::setDeviceID(deviceID); hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID)); } delete &hostMemFwdSlice; } for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; NVMatrix::setDeviceID(deviceID); NVMatrix::syncStream(_parent->getCopyStream(deviceID)); } _parent->getConvNet().getDataCopyPD().freePipe(pipe); } else { for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) { int deviceID = *it; _parent->getActs(deviceID, other, 0); } } } _parent->getCopyFinishQueue().enqueue(1); } delete &msg; } return NULL; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false); } else if(_pool == "maxabs") { return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true); } else if(_pool == "avg") { return *new AvgPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (_abs) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler()); } else { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } } void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * CrossMapPoolLayer * ===================== */ CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputs = pyDictGetInt(paramsDict, "outputChannels"); _pool = pyDictGetString(paramsDict, "pool"); } CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) { std::string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown pooling layer type ") + _pool; } /* * ===================== * CrossMapMaxPoolLayer * ===================== */ CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) { } void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler()); } void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1); } /* * ===================== * RandomScaleLayer * ===================== */ RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _maxScale = pyDictGetFloat(paramsDict, "maxScale"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); // The smallest size the image could be after rescaling _minScaledSize = _imgSize / _maxScale; // The number of discrete scales we're considering int numScales = _imgSize - _minScaledSize + 1; // The total number of squares of size _tgtSize that we can extract // from all these scales double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6; // For each scale, record the fraction of the squares that it has. // This will be the probability of sampling this scale. _scaleProbs.push_back(1.0 / numCrops); for (int s = 1; s < numScales; ++s) { _scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops); } } void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { if (IS_TRAIN(passType)) { // _maxScale is in the range [1, 2) float r = randf; int rescaledSize = _tgtSize; float scaleFactor = _maxScale; // Find which scale we have sampled for (int s = 0; s < _scaleProbs.size(); ++s) { if (r <= _scaleProbs[s]) { rescaledSize += s; float scaleFactorEnd = _imgSize / float(rescaledSize); float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize)); scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart); break; } } assert(rescaledSize >= _tgtSize); int maxStart = rescaledSize - _tgtSize; int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart); if (rescaledSize == _imgSize) { convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX); } else { convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor); convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX); } _rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it. } else if (IS_MULTIVIEW_TEST(passType)) { // for now... _inputs[0]->copy(getActs()); } else if (IS_TEST(passType)) { // Test on center patch convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale); } } void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * CropLayer * ===================== */ CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _startX = pyDictGetInt(paramsDict, "startX"); _startY = pyDictGetInt(paramsDict, "startY"); _tgtSize = pyDictGetInt(paramsDict, "sizeX"); } void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX); } void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _hFilter = pyDictGetMatrix(paramsDict, "filter"); } GaussianBlurLayer::~GaussianBlurLayer() { delete _hFilter; } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad(); convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1); convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * HorizontalReflectionLayer * ===================== */ HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { assert(_channels >= 1 && _channels <= 3); } void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convReflectHorizontal(*_inputs[0], getActs(), _imgSize); } void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) { _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); _minDiv = pyDictGetFloat(paramsDict, "minDiv"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); _denoms.truncate(); } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { assert(inpIdx == 0); convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) { } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv); } void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); _meanDiffs.truncate(); } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) : Layer(convNetThread, paramsDict, replicaID, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); _numCases = 0; _aggregated = pyDictGetInt(paramsDict, "aggregated") != 0; } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) { if (_coeff != 0) { Layer::bprop(v, passType, passIdx); } } bool CostLayer::fprop(PASS_TYPE passType, int passIdx) { if (Layer::fprop(passType, passIdx)) { syncStream(); getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL)); return true; } return false; } void CostLayer::fpropCommon(PASS_TYPE passType) { _numCases = Layer::getNumCases(*_inputs[0]); } int CostLayer::getNumCases() { return _numCases; } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { return *new doublev(_costv); } // This is called between microbatches void CostLayer::resetPassIdx() { Layer::resetPassIdx(); _costv.clear(); } CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) { if (type == "cost.crossent") { return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.bce") { return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.dce") { return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.logreg") { return *new LogregCostLayer(convNetThread, paramsDict, replicaID); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID); } throw std::string("Unknown cost layer type ") + type; } /* * ===================== * CrossEntCostLayer * ===================== */ CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID(); if (doWork) { computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * BinomialCrossEntropyCostLayer * ===================== */ BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate"); _posWeight = pyDictGetFloat(paramsDict, "posWeight"); } void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs); _costv.clear(); // Cross-entropy cost _costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim()); // If aggregated, we don't produce these outputs because they're not additive. // They have no meaning if this is just a partial cost. if (!_aggregated) { // "Correct" classifications. To compute these we threshold probs // and just count the number of entries that agree with labels. probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.equals(labels); _costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim())); if (_computeSoftmaxErrorRate) { // Also compute top-1 error as if this is softmax and there's only one correct class probs.max(0, _tmpVec); assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis probs.equalsVector(_tmpVec, _correctProbs); _correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present float m = _tmpVec.max(); _correctProbs.eltwiseDivideByVector(_tmpVec); _correctProbs.eltwiseMult(labels); _costv.push_back(numCases - _correctProbs.sum(_tmpbuf)); } } } } void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "neuron" || static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (doWork) { printf("Computing cross-entropy gradient the stupid way\n"); if (scaleTargets == 0) { labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target); } else { labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target); } } } float BinomialCrossEntropyCostLayer::getPosWeight() { return _posWeight; } /* * ===================== * DetectionCrossEntropyCostLayer * ===================== */ DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) { assert(!_aggregated); } void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx); // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getLeadingDim(); /* * Add information sufficient to compute precision and recall for each class. */ // NOTE: _tmpProbs contains ((probs > 0.5) == labels) labels.sum(1, _numPositive); // sum(labels, 1) _tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels) _tmpProbs.sum(1, _numTruePositive); probs.biggerThanScalar(0.5, _tmpProbs); _tmpProbs.sum(1, _numDeclaredPositive); _numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true); _numPositive.copyToHost(_hNumPositive, true); _numTruePositive.copyToHost(_hNumTruePositive, true); for (int i = 0; i < labels.getFollowingDim(); ++i) { _costv.push_back(_hNumDeclaredPositive(i, 0)); // 2 _costv.push_back(_hNumPositive(i, 0)); // 3 _costv.push_back(_hNumTruePositive(i, 0)); // 4 } } } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { _topk = pyDictGetInt(paramsDict, "topk"); // _numAccumed = 0; } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix* probs = _inputs[1]; _doCompute = !IS_MULTIVIEW_TEST(passType); if (!_doCompute) { if (IS_MULTIVIEW_TEST_START(passType)) { if (_probsAccum.count(passIdx) == 0) { _probsAccum[passIdx] = new NVMatrix(*probs); } probs->copy(*_probsAccum[passIdx]); _numAccumed[passIdx] = 1; } else { _probsAccum[passIdx]->add(*probs); _numAccumed[passIdx] += 1; } if (IS_MULTIVIEW_TEST_END(passType)) { probs = _probsAccum[passIdx]; probs->scale(1.0 / _numAccumed[passIdx]); _doCompute = true; } } if (_doCompute) { int numCases = labels.getNumElements(); probs->max(0,_maxProbs); if (_topk == 1) { computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs); } else { computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk); } _costv.clear(); double top1 = _correctProbs.sum(_tmpbuf); _costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf)); _costv.push_back(numCases - top1); _costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf))); } } } NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) { return *_probsAccum[replicaIdx]; } void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { LayerV& prev = _prev[replicaIdx]; NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; NVMatrix& target = prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas(); if (prev[1]->getType() == "softmax") { static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork); } if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) { _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); _costv.push_back(getActs().sum()); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff); }
4459fec1e31d2685d996855022bae928cb4f32e9.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // // EXAMPLE OF 2D PATTERN CONVOLUTION CHAPTER 7 // // //////////////////////////////////////////////////////////////////////////// // includes CUDA #include <hip/hip_runtime.h> // includes, system #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #define MASK_WIDTH 5 #define CHECK_ERROR(call) { \ hipError_t err = call; \ if (err != hipSuccess) { \ printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } void printMatrix(float *A, int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { printf("%.2f ", A[i*height+j]); } printf("\n"); } printf("\n"); } // compute vector convoluiton // each thread performs one pair-wise convolution //////////////////////////////////////////////////////////////////////////////// //! Simple matrix convolution kernel //! @param d_N input data in global memory //! @param d_M input mask data in global memory //! @param d_P output data in global memory //! @param height number of rows of the input matrix N //! @param widht number of cols of the input matrix N //////////////////////////////////////////////////////////////////////////////// __global__ void convolution_2D_basic_kernel(float *N, float *M, float *P, int height, int width) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if ( Row < height && Col < width) { int y = Row - (MASK_WIDTH) / 2; int x = Col - (MASK_WIDTH) / 2; float Pvalue = 0.0f; for (int i = 0; i < MASK_WIDTH; i++) { if (y + i >= 0 && y + i < height) { for (int j = 0; j < MASK_WIDTH; j++) { if (x + j >= 0 && x + j < width) { Pvalue += N[(y+i) * width + (x+j)] * M[i * MASK_WIDTH + j]; } } } } P[ Row * width + Col] = Pvalue; } } //////////////////////////////////////////////////////////////////////////////// //! Run a simple matrix convolution for CUDA //////////////////////////////////////////////////////////////////////////////// float convolution_2D_basic(float *h_N, float h_M[MASK_WIDTH][MASK_WIDTH], float *h_P, int height, int width) { float *d_N, *d_M, *d_P; int size = height * width * sizeof(float); int sizeMask_Width = MASK_WIDTH * MASK_WIDTH * sizeof(float); hipEvent_t startTimeCuda, stopTimeCuda; hipEventCreate(&startTimeCuda); hipEventCreate(&stopTimeCuda); //1. Allocate global memory on the device for N, M and P CHECK_ERROR(hipMalloc((void**)&d_N, size)); CHECK_ERROR(hipMalloc((void**)&d_P, size)); CHECK_ERROR(hipMalloc((void**)&d_M, sizeMask_Width)); // copy N and M to device memory hipMemcpy(d_N, h_N, size, hipMemcpyHostToDevice); hipMemcpy(d_M, h_M, sizeMask_Width, hipMemcpyHostToDevice); //2. Kernel launch code - to have the device to perform the actual convolution // ------------------- CUDA COMPUTATION --------------------------- hipEventRecord(startTimeCuda, 0); dim3 dimGrid(ceil(width / 4.0), ceil(height / 4.0), 1); dim3 dimBlock(4.0, 4.0, 1); convolution_2D_basic_kernel << <dimGrid, dimBlock >> >(d_N, d_M, d_P, height, width); hipEventRecord(stopTimeCuda, 0); // ---------------------- CUDA ENDING ----------------------------- hipEventSynchronize(stopTimeCuda); float msTime; hipEventElapsedTime(&msTime, startTimeCuda, stopTimeCuda); printf("KernelTime: %f\n", msTime); //3. copy C from the device memory hipMemcpy(h_P, d_P, size, hipMemcpyDeviceToHost); // // cleanup memory hipFree(d_N); hipFree(d_M); hipFree(d_P); return msTime; } // Perform 2D convolution on the host void sequential_2D_Conv(float *h_N, float h_M[MASK_WIDTH][MASK_WIDTH], float *h_PS, int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int y = i - (MASK_WIDTH) / 2; int x = j - (MASK_WIDTH) / 2; //printf("y = %d, x = %d\n\n", y, x); float Pvalue = 0.0f; for (int k = 0; k < MASK_WIDTH; k++) { if (y + k >= 0 && y + k < height) { for (int t = 0; t < MASK_WIDTH; t++) { if (x + t >= 0 && x + t < width) { Pvalue += h_N[(y+k)*width + (x+t)] * h_M[k][t]; } } } } h_PS[i*width + j] = Pvalue; } } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("%s Starting...\n\n", argv[0]); float *h_P, *h_N, *h_PS; const int rows = 1024; const int cols = 1024; float msTime, msTime_seq; hipEvent_t startTimeCuda, stopTimeCuda; float h_M[MASK_WIDTH][MASK_WIDTH] = { { 1, 2, 3, 2, 1 }, { 2, 3, 4, 3, 2 }, { 3, 4, 5, 4, 3 }, { 2, 3, 4, 3, 2 }, { 1, 2, 3, 2, 1 } }; hipEventCreate(&startTimeCuda); hipEventCreate(&stopTimeCuda); // allocate memory for host vectors h_N = (float*)malloc(sizeof(float)*rows*cols); // input array h_P = (float*)malloc(sizeof(float)*rows*cols); // output array h_PS = (float*)malloc(sizeof(float)*rows*cols); // output array sequential result /* * NB if you use the random numbers you may consider the tollerance error of approximation * between CPU and GPU * srand(time(NULL)); */ for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { h_N[i*rows + j] = (i+j+1) % 10; //h_N[i] = ((float)rand() / (float)(RAND_MAX)) * 100; } } // ---------------------- PARRALLEL CONVOLUTION ------------------------- msTime = convolution_2D_basic(h_N, h_M, h_P, rows, cols); // ---------------------- PERFORM SEQUENTIAL CONVOLUTION ---------------- hipEventRecord(startTimeCuda, 0); sequential_2D_Conv(h_N, h_M, h_PS, rows, cols); hipEventRecord(stopTimeCuda, 0); hipEventSynchronize(stopTimeCuda); hipEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda); printf("HostTime: %f\n", msTime_seq); /* printf("----------------- INPUT MATRIX -----------------\n"); printMatrix(h_N, rows, cols); printf("---------- MATRIX RESULT - SEQUENTIAL ----------\n"); printMatrix(h_PS, rows, cols); printf("---------- MATRIX RESULT - PARALLEL ------------\n"); printMatrix(h_P, rows, cols); */ // check the result for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (h_P[i*rows+j] != h_PS[i*rows+j]) { printf("\x1b[31mError\x1b[0m into result: h_P[%d] = %.2f != %.2f = h_PS[%d]\n", i*rows+j, h_P[i*rows+j], h_PS[i*rows+j], i*rows+j); goto Error; } } } printf("Ok convolution completed with \x1b[32msuccess\x1b[0m!\n\n"); printf("Speedup: %f\n", msTime_seq / msTime); // cleanup memory free(h_N); free(h_P); free(h_PS); #ifdef _WIN32 system("pause"); #endif return 0; Error: free(h_N); free(h_P); free(h_PS); #ifdef _WIN32 system("pause"); #endif return -1; }
4459fec1e31d2685d996855022bae928cb4f32e9.cu
//////////////////////////////////////////////////////////////////////////// // // // EXAMPLE OF 2D PATTERN CONVOLUTION CHAPTER 7 // // //////////////////////////////////////////////////////////////////////////// // includes CUDA #include <cuda_runtime.h> // includes, system #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <cuda.h> #define MASK_WIDTH 5 #define CHECK_ERROR(call) { \ cudaError_t err = call; \ if (err != cudaSuccess) { \ printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } void printMatrix(float *A, int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { printf("%.2f ", A[i*height+j]); } printf("\n"); } printf("\n"); } // compute vector convoluiton // each thread performs one pair-wise convolution //////////////////////////////////////////////////////////////////////////////// //! Simple matrix convolution kernel //! @param d_N input data in global memory //! @param d_M input mask data in global memory //! @param d_P output data in global memory //! @param height number of rows of the input matrix N //! @param widht number of cols of the input matrix N //////////////////////////////////////////////////////////////////////////////// __global__ void convolution_2D_basic_kernel(float *N, float *M, float *P, int height, int width) { int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if ( Row < height && Col < width) { int y = Row - (MASK_WIDTH) / 2; int x = Col - (MASK_WIDTH) / 2; float Pvalue = 0.0f; for (int i = 0; i < MASK_WIDTH; i++) { if (y + i >= 0 && y + i < height) { for (int j = 0; j < MASK_WIDTH; j++) { if (x + j >= 0 && x + j < width) { Pvalue += N[(y+i) * width + (x+j)] * M[i * MASK_WIDTH + j]; } } } } P[ Row * width + Col] = Pvalue; } } //////////////////////////////////////////////////////////////////////////////// //! Run a simple matrix convolution for CUDA //////////////////////////////////////////////////////////////////////////////// float convolution_2D_basic(float *h_N, float h_M[MASK_WIDTH][MASK_WIDTH], float *h_P, int height, int width) { float *d_N, *d_M, *d_P; int size = height * width * sizeof(float); int sizeMask_Width = MASK_WIDTH * MASK_WIDTH * sizeof(float); cudaEvent_t startTimeCuda, stopTimeCuda; cudaEventCreate(&startTimeCuda); cudaEventCreate(&stopTimeCuda); //1. Allocate global memory on the device for N, M and P CHECK_ERROR(cudaMalloc((void**)&d_N, size)); CHECK_ERROR(cudaMalloc((void**)&d_P, size)); CHECK_ERROR(cudaMalloc((void**)&d_M, sizeMask_Width)); // copy N and M to device memory cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice); cudaMemcpy(d_M, h_M, sizeMask_Width, cudaMemcpyHostToDevice); //2. Kernel launch code - to have the device to perform the actual convolution // ------------------- CUDA COMPUTATION --------------------------- cudaEventRecord(startTimeCuda, 0); dim3 dimGrid(ceil(width / 4.0), ceil(height / 4.0), 1); dim3 dimBlock(4.0, 4.0, 1); convolution_2D_basic_kernel << <dimGrid, dimBlock >> >(d_N, d_M, d_P, height, width); cudaEventRecord(stopTimeCuda, 0); // ---------------------- CUDA ENDING ----------------------------- cudaEventSynchronize(stopTimeCuda); float msTime; cudaEventElapsedTime(&msTime, startTimeCuda, stopTimeCuda); printf("KernelTime: %f\n", msTime); //3. copy C from the device memory cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost); // // cleanup memory cudaFree(d_N); cudaFree(d_M); cudaFree(d_P); return msTime; } // Perform 2D convolution on the host void sequential_2D_Conv(float *h_N, float h_M[MASK_WIDTH][MASK_WIDTH], float *h_PS, int height, int width) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int y = i - (MASK_WIDTH) / 2; int x = j - (MASK_WIDTH) / 2; //printf("y = %d, x = %d\n\n", y, x); float Pvalue = 0.0f; for (int k = 0; k < MASK_WIDTH; k++) { if (y + k >= 0 && y + k < height) { for (int t = 0; t < MASK_WIDTH; t++) { if (x + t >= 0 && x + t < width) { Pvalue += h_N[(y+k)*width + (x+t)] * h_M[k][t]; } } } } h_PS[i*width + j] = Pvalue; } } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("%s Starting...\n\n", argv[0]); float *h_P, *h_N, *h_PS; const int rows = 1024; const int cols = 1024; float msTime, msTime_seq; cudaEvent_t startTimeCuda, stopTimeCuda; float h_M[MASK_WIDTH][MASK_WIDTH] = { { 1, 2, 3, 2, 1 }, { 2, 3, 4, 3, 2 }, { 3, 4, 5, 4, 3 }, { 2, 3, 4, 3, 2 }, { 1, 2, 3, 2, 1 } }; cudaEventCreate(&startTimeCuda); cudaEventCreate(&stopTimeCuda); // allocate memory for host vectors h_N = (float*)malloc(sizeof(float)*rows*cols); // input array h_P = (float*)malloc(sizeof(float)*rows*cols); // output array h_PS = (float*)malloc(sizeof(float)*rows*cols); // output array sequential result /* * NB if you use the random numbers you may consider the tollerance error of approximation * between CPU and GPU * srand(time(NULL)); */ for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { h_N[i*rows + j] = (i+j+1) % 10; //h_N[i] = ((float)rand() / (float)(RAND_MAX)) * 100; } } // ---------------------- PARRALLEL CONVOLUTION ------------------------- msTime = convolution_2D_basic(h_N, h_M, h_P, rows, cols); // ---------------------- PERFORM SEQUENTIAL CONVOLUTION ---------------- cudaEventRecord(startTimeCuda, 0); sequential_2D_Conv(h_N, h_M, h_PS, rows, cols); cudaEventRecord(stopTimeCuda, 0); cudaEventSynchronize(stopTimeCuda); cudaEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda); printf("HostTime: %f\n", msTime_seq); /* printf("----------------- INPUT MATRIX -----------------\n"); printMatrix(h_N, rows, cols); printf("---------- MATRIX RESULT - SEQUENTIAL ----------\n"); printMatrix(h_PS, rows, cols); printf("---------- MATRIX RESULT - PARALLEL ------------\n"); printMatrix(h_P, rows, cols); */ // check the result for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (h_P[i*rows+j] != h_PS[i*rows+j]) { printf("\x1b[31mError\x1b[0m into result: h_P[%d] = %.2f != %.2f = h_PS[%d]\n", i*rows+j, h_P[i*rows+j], h_PS[i*rows+j], i*rows+j); goto Error; } } } printf("Ok convolution completed with \x1b[32msuccess\x1b[0m!\n\n"); printf("Speedup: %f\n", msTime_seq / msTime); // cleanup memory free(h_N); free(h_P); free(h_PS); #ifdef _WIN32 system("pause"); #endif return 0; Error: free(h_N); free(h_P); free(h_PS); #ifdef _WIN32 system("pause"); #endif return -1; }
4472fcc4dba306dc3c169f992ac8fc9cf469f334.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <time.h> #include <string.h> #include <stdio.h> #include <stdlib.h> // Matlab - mex #include <mex.h> #include <matrix.h> // CUDA #include <hip/hip_runtime_api.h> //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #define dbg_print mexPrintf //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// template<class T> class CRBM_Data { public: T *data_input; // input data T *data_kernel; // kernel T *h_bias; // bias of hidden layer T *v_bias; // bias of visible layer T *h_sample; // hidden values of h_sample T *h_sample_init; // initialization of hidden layer T *h_state; // the active matrix T *v_sample; // the visible layer values T gauss; // gaussian parameter int H, W; // input image H & W int N; // image number int Wfilter, Hfilter; // kernel W & H int Wres, Hres; // output data W & H int Hstride, Wstride; // stride of H & W int Hpool, Wpool; // pool size of H & W int n_map_v, n_map_h; // map number of v & h char type_input; // type of inputdata int run_on_gpu; // run on GPU (1, default) or CPU (0) public: CRBM_Data(void) { run_on_gpu = 1; init(); } ~CRBM_Data(void) { if( run_on_gpu ) release(); else release_no_free(); } int init(void) { data_input = NULL; data_kernel = NULL; h_bias = NULL; v_bias = NULL; h_sample = NULL; h_sample_init = NULL; h_state = NULL; v_sample = NULL; return 0; } void release(void) { if( data_input != NULL ) delete [] data_input; if( data_kernel != NULL ) delete [] data_kernel; if( h_bias != NULL ) delete [] h_bias; if( v_bias != NULL ) delete [] v_bias; if( h_sample != NULL ) delete [] h_sample; if( h_sample_init != NULL ) delete [] h_sample_init; if( h_state != NULL ) delete [] h_state; if( v_sample != NULL ) delete [] v_sample; data_input = NULL; data_kernel = NULL; h_bias = NULL; v_bias = NULL; h_sample = NULL; h_sample_init = NULL; h_state = NULL; v_sample = NULL; } void release_no_free(void) { data_input = NULL; data_kernel = NULL; h_bias = NULL; v_bias = NULL; h_sample = NULL; h_sample_init = NULL; h_state = NULL; v_sample = NULL; } // NOTE: this function must be called after parameters have been set. void set_data_input(double *di) { if( !run_on_gpu ) { // FIXME: not safe if( sizeof(double) == sizeof(T) ) { data_input = (float*)di; } else { dbg_print("ERR: input data type is wrong! please input double type!\n"); } return; } int n = H*W*n_map_v*N; if( data_input == NULL ) data_input = new T[n]; for(int i=0; i<n; i++) data_input[i] = float(di[i]); } // NOTE: this function must be called after parameters have been set. // FIXME: only call by GPU type void get_data_input(double **di) { int n = H*W*n_map_v*N; for(int i=0; i<n; i++) di[i] = data_input[i]; } // NOTE: this function must be called after parameters have been set. void set_data_kernel(double *di) { if( !run_on_gpu ) { // FIXME: not safe if( sizeof(double) == sizeof(T) ) { data_kernel = (float*)di; } else { dbg_print("ERR: input data type is wrong! please input double type!\n"); } return; } int n = Hfilter*Wfilter*n_map_v*n_map_h; if( data_kernel == NULL ) data_kernel = new T[n]; for(int i=0; i<n; i++) data_kernel[i] = float(di[i]); } // NOTE: this function must be called after parameters have been set. // FIXME: only call by GPU type void get_data_kernel(double **di) { int n = Hfilter*Wfilter*n_map_v*n_map_h; for(int i=0; i<n; i++) di[i] = data_kernel[i]; } }; /*** ------------------CUDA CONVOLUTION INFERENCE------------------------- ***/ __global__ void conv_cuda_infer(float *da, float *db, float *dc, int H, int W, int Hres, int Wres, int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni) { int vmap_idx = blockIdx.x, hmap_idx = blockIdx.y; int conv_xi = threadIdx.x, conv_yi; int ii, jj; float *da_, *db_, *dc_; float sum; // debug //arr_calc[vmap_idx*n_map_h + hmap_idx] = 1; // get array pointers da_ = da + ni*H*W*n_map_v + H*W*vmap_idx; // input data db_ = db + Hfilter*Wfilter*n_map_v*hmap_idx + // conv kernel Hfilter*Wfilter*vmap_idx; dc_ = dc + ni*Hres*Wres*n_map_h*n_map_v + // output data Hres*Wres*n_map_v*hmap_idx + Hres*Wres*vmap_idx; // begin calculation for(conv_yi=0; conv_yi<Hres; conv_yi++) { sum = 0; for(jj =0; jj < Wfilter; jj++) { for(ii = 0; ii<Hfilter; ii++) { sum += da_[conv_yi*Hstride+ii + H*(conv_xi*Wstride+jj)] * db_[ii + jj*Hfilter]; } } dc_[conv_yi+Hres*conv_xi] = sum; } } /*** ------------------CUDA CONVOLUTION RECONSTRUCTION--------------------- ***/ __global__ void conv_cuda_recon(float *da, float *db, float *dc, int H_off, int W_off, int H, int W, int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni) { int hmap_idx = blockIdx.x, vmap_idx = blockIdx.y; int conv_xi = threadIdx.x, conv_yi; int ii, jj; float *da_, *db_, *dc_; float sum; // get array pointers da_ = da + ni*H_off*W_off*n_map_h + H_off*W_off*hmap_idx; // input data db_ = db + Hfilter*Wfilter*n_map_v*hmap_idx + // conv kernel Hfilter*Wfilter*vmap_idx; dc_ = dc + ni*H*W*n_map_v*n_map_h + // output data H*W*n_map_h*vmap_idx + H*W*hmap_idx; // begin calculation for(conv_yi=0; conv_yi<H; conv_yi++) { sum = 0; for(jj =0; jj < Wfilter; jj++) { for(ii = 0; ii<Hfilter; ii++) { sum += da_[conv_yi*Hstride+ii + H_off*(conv_xi*Wstride+jj)] * db_[Hfilter*Wfilter-1-(ii + jj*Hfilter)]; } } dc_[conv_yi+H*conv_xi] = sum; } } /*** -------------------------CUDA MERGE INFERENCE------------------------- ***/ __global__ void conv_merge_infer(float *dc, float *dh, float *dd, int H, int W, int Hres, int Wres,int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni, char type_input, float gauss) { int hmap_idx = blockIdx.x, vmap_idx; int jj,ii; float *dc_, *dd_; dd_ = dd + ni*Hres*Wres*n_map_h + Hres*Wres*hmap_idx; // merge maps to single feature map for(vmap_idx = 0; vmap_idx < n_map_v; vmap_idx++) { dc_ = dc + ni*Hres*Wres*n_map_h*n_map_v + Hres*Wres*n_map_v*hmap_idx + Hres*Wres*vmap_idx; for(jj = 0; jj < Wres; jj++) { for(ii = 0; ii < Hres; ii++) { dd_[ii+jj*Hres] += dc_[ii+jj*Hres]; } } } // apply bias for(jj = 0; jj < Wres; jj++) { for(ii = 0; ii < Hres; ii++) { if (type_input == 'B') dd_[ii+jj*Hres] = exp(dd_[ii+jj*Hres] + dh[hmap_idx]); if (type_input == 'G') dd_[ii+jj*Hres] = exp(1.0/(gauss*gauss)*(dd_[ii+jj*Hres] + dh[hmap_idx])); } } } /*** -------------------------CUDA MERGE RECONSTRUCTION---------------------- ***/ __global__ void conv_merge_recon(float *dc, float *dv, float *dd, int H_off, int W_off, int H, int W,int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni, char type_input) { int vmap_idx = blockIdx.x, hmap_idx; int jj,ii; float *dc_, *dd_; dd_ = dd + ni*H*W*n_map_v + H*W*vmap_idx; // merge maps to single feature map for(hmap_idx = 0; hmap_idx < n_map_h; hmap_idx++) { dc_ = dc + ni*H*W*n_map_v*n_map_h + H*W*n_map_h*vmap_idx + H*W*hmap_idx; for(jj = 0; jj < W; jj++) { for(ii = 0; ii < H; ii++) { dd_[ii+jj*H] += dc_[ii+jj*H]; } } } // apply bias for(jj = 0; jj < W; jj++) { for(ii = 0; ii < H; ii++) { if (type_input == 'B') dd_[ii+jj*H] = 1.0/(1.0+exp(-(dd_[ii+jj*H] + dv[vmap_idx]))); if (type_input == 'G') dd_[ii+jj*H] = dd_[ii+jj*H] + dv[vmap_idx]; } } } //BOTTOM-UP: POSITIVE UPDATE void crbm_inference2D(CRBM_Data<float> *p) { int ni, i, j, ii, jj, nh, nv, id, H, W, n_map_v, n_map_h, N, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, Hres, Wres; int *_id; float sum, rnd, pro_sum, gauss; float *block; bool done; char type_input; H = p->H; W = p->W; N = p->N; Hres = p->Hres; Wres = p->Wres; Hpool = p->Hpool; Wpool = p->Wpool; n_map_v = p->n_map_v; n_map_h = p->n_map_h; Hfilter = p->Hfilter; Wfilter = p->Wfilter; Hstride = p->Hstride; Wstride = p->Wstride; gauss = p->gauss; type_input = p->type_input; // Initialize matrixs j = Hres*Wres*n_map_h*N; block = new float[j]; for(i= 0; i< j; i++) block[i] = 0; _id = new int[Hpool*Wpool]; for(i= 0; i< Hpool*Wpool; i++) _id[i] = 0; /***---------------------------CUDA CODE------------------------------***/ int SIZE_IMAGE, SIZE_FILTER, SIZE_OUTPUT; float *da, *db, *dc, *dd, *dh, *fc; j = Hres*Wres*n_map_v*n_map_h*N; fc = new float[j]; for(i=0; i< j; i++) fc[i] = 0; //hipHostMalloc(&fc, sizeof(float)*Hres*Wres*n_map_v*n_map_h*N); //memset(fc, 0, sizeof(float)*Hres*Wres*n_map_v*n_map_h*N); SIZE_IMAGE = H * W * n_map_v * N; SIZE_FILTER = Hfilter * Wfilter * n_map_v * n_map_h; SIZE_OUTPUT = Hres * Wres * n_map_h * N; hipMalloc(&da, sizeof(float) * SIZE_IMAGE); hipMalloc(&db, sizeof(float) * SIZE_FILTER); hipMalloc(&dc, sizeof(float) * SIZE_OUTPUT*n_map_v); hipMalloc(&dd, sizeof(float) * SIZE_OUTPUT); hipMalloc(&dh, sizeof(float) * n_map_h); hipMemcpy(da,p->data_input, sizeof(float)*SIZE_IMAGE, hipMemcpyHostToDevice); hipMemcpy(db,p->data_kernel, sizeof(float)*SIZE_FILTER, hipMemcpyHostToDevice); hipMemcpy(dc,fc, sizeof(float)*SIZE_OUTPUT*n_map_v,hipMemcpyHostToDevice); hipMemcpy(dd,block, sizeof(float)*SIZE_OUTPUT ,hipMemcpyHostToDevice); hipMemcpy(dh,p->h_bias, sizeof(float)*n_map_h, hipMemcpyHostToDevice); dim3 blocks(n_map_v, n_map_h); dim3 threads(Wres, 1); dim3 blocks2(n_map_h, 1); dim3 threads2(1, 1); for(ni=0; ni< N; ni++){ hipLaunchKernelGGL(( conv_cuda_infer), dim3(blocks), dim3(threads), 0, 0, da, db, dc, H, W, Hres, Wres, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, n_map_v,n_map_h, ni); hipLaunchKernelGGL(( conv_merge_infer), dim3(blocks2), dim3(threads2), 0, 0, dc,dh, dd, H, W, Hres, Wres, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, n_map_v, n_map_h, ni, type_input, gauss); } hipMemcpy(block, dd, sizeof(float) * SIZE_OUTPUT, hipMemcpyDeviceToHost); hipFree(da); hipFree(db); hipFree(dc); hipFree(dd); hipFree(dh); //hipHostFree(fc); delete [] fc; /***---------------------------CUDA END------------------------------***/ /*** CONVOLUTION & GET HIDDEN ACTIVATION STATE ***/ for(ni=0; ni< N; ni++){ for(nh=0; nh< n_map_h; nh++){ //GET HIDDEN ACTIVATION STATE for(j=0; j< floor(Wres/Wpool); j++){ for(i=0; i< floor(Hres/Hpool); i++){ sum = 0; for(jj=0; jj< Wpool; jj++){ _id[jj*Hpool] = i*Hpool + (j*Wpool+jj)*Hres + Hres*Wres*nh + Hres*Wres*n_map_h*ni; sum += block[_id[jj*Hpool]]; for(ii=1; ii< Hpool; ii++){ _id[jj*Hpool+ii] = _id[jj*Hpool+ii-1] + 1; sum += block[_id[jj*Hpool+ii]]; } } done = false; rnd = rand() % 10000 / 10000.0; pro_sum = 0.0; for(jj=0; jj< Hpool*Wpool; jj++){ p->h_sample[_id[jj]] = block[_id[jj]]/(1.0+sum); pro_sum += p->h_sample[_id[jj]]; //Randomly generate the hidden state: at most one unit is activated if(done == false){ if(pro_sum >= rnd){ p->h_state[_id[jj]] = 1; done = true; } } } } } } } delete [] _id; delete [] block; return; } // UP-DOWN: NEGATIVE UPDATE void crbm_reconstruct2D(CRBM_Data<float> *p) { int ni, i, j, ii, jj, nh, nv, id, H, W, n_map_v, n_map_h, N, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, Hres, Wres, offset_h, offset_w, H_off, W_off; float *h_state_off, *v; char type_input; H = p->H; W = p->W; N = p->N; Hres = p->Hres; Wres = p->Wres; Hpool = p->Hpool; Wpool = p->Wpool; n_map_v = p->n_map_v; n_map_h = p->n_map_h; Hfilter = p->Hfilter; Wfilter = p->Wfilter; Hstride = p->Hstride; Wstride = p->Wstride; type_input = p->type_input; j = H*W*n_map_v*N; v = new float[j]; for(i=0; i< j; i++) v[i] = 0; //extend the matrix of h_state offset_h = (H-1)*Hstride*Hstride+(Hfilter-1)*Hstride+Hfilter-H; offset_w = (W-1)*Wstride*Wstride+(Wfilter-1)*Wstride+Wfilter-W; H_off = Hres + offset_h; W_off = Wres + offset_w; j = H_off*W_off*n_map_h*N; h_state_off = new float[j]; for(i=0; i< j; i++) h_state_off[i] = 0; for(ni=0; ni< N; ni++){ for(nh=0; nh< n_map_h; nh++){ for(j=0; j< Wres; j++){ for(i=0; i< Hres; i++){ h_state_off[i + offset_h/2 + H_off*(j+offset_w/2) + H_off*W_off*nh + H_off*W_off*n_map_h*ni] = p->h_state[i + Hres*j + Hres*Wres*nh + Hres*Wres*n_map_h*ni]; } } } } /***--------------------------CUDA CODE----------------------------***/ if (0) { int SIZE_IMAGE, SIZE_FILTER, SIZE_OUTPUT; float *da, *db, *dc, *dd, *dv, *fc; j = H*W*n_map_h*n_map_v*N; fc = new float[j]; for(i=0; i< j; i++) fc[i] = 0; SIZE_IMAGE = H_off * W_off * n_map_h * N; SIZE_FILTER = Hfilter * Wfilter * n_map_v * n_map_h; SIZE_OUTPUT = H * W * n_map_v * N; hipMalloc(&da, sizeof(float) * SIZE_IMAGE); hipMalloc(&db, sizeof(float) * SIZE_FILTER); hipMalloc(&dc, sizeof(float) * SIZE_OUTPUT*n_map_h); hipMalloc(&dd, sizeof(float) * SIZE_OUTPUT); hipMalloc(&dv, sizeof(float) * n_map_h); hipMemcpy(da,h_state_off, sizeof(float)*SIZE_IMAGE, hipMemcpyHostToDevice); hipMemcpy(db,p->data_kernel, sizeof(float)*SIZE_FILTER, hipMemcpyHostToDevice); hipMemcpy(dc,fc, sizeof(float)*SIZE_OUTPUT*n_map_v,hipMemcpyHostToDevice); hipMemcpy(dd,v, sizeof(float)*SIZE_OUTPUT ,hipMemcpyHostToDevice); hipMemcpy(dv,p->v_bias, sizeof(float)*n_map_h, hipMemcpyHostToDevice); dim3 blocks(n_map_h, n_map_v); dim3 threads(W, 1); dim3 blocks2(n_map_v, 1); dim3 threads2(1, 1); for(ni=0; ni< N; ni++){ hipLaunchKernelGGL(( conv_cuda_recon), dim3(blocks), dim3(threads), 0, 0, da, db, dc, H_off, W_off, H, W, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, n_map_v,n_map_h, ni); hipLaunchKernelGGL(( conv_merge_recon), dim3(blocks2), dim3(threads2), 0, 0, dc,dv, dd, H_off, W_off, H, W, Hfilter, Wfilter, Hstride,Wstride,Hpool,Wpool, n_map_v,n_map_h, ni,type_input); } hipMemcpy(v, dd, sizeof(float) * SIZE_OUTPUT, hipMemcpyDeviceToHost); for(i=0; i< H*W*n_map_v*N; i++) p->v_sample[i] = v[i]; } /***---------------------------CUDA END---------------------------***/ //do the convolution for(ni=0; ni< N; ni++){ for(nv=0; nv< n_map_v; nv++){ for(j=0; j< W; j++){ for(i=0; i< H; i++){ id = i + H*j + H*W*nv + H*W*n_map_v*ni; v[id] = 0; for (nh = 0; nh< n_map_h; nh++){ for (jj = 0; jj< Wfilter; jj++){ for (ii = 0; ii < Hfilter; ii++){ v[id] += h_state_off[(i*Hstride+ii) + H_off*(j*Wstride+jj) + H_off*W_off*nh + H_off*W_off*n_map_h*ni] * p->data_kernel[Hfilter*Wfilter-1-(ii+Hfilter*jj) + Hfilter*Wfilter*nv + Hfilter*Wfilter*n_map_v*nh]; } } } v[id] += p->v_bias[nv]; if (type_input == 'B') p->v_sample[id] = 1.0/(1.0+exp(-v[id])); if (type_input == 'G') p->v_sample[id] = v[id]; } } } } delete [] h_state_off; delete [] v; //delete [] fc; return; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /***------------------LOAD DATA FROM MATLAB-------------------***/ const mxArray *model, *layer, *batch_data; double *data_input, *data_kernel, *s_filter, *stride, *pool, *v_bias, *h_bias, *gaussian; mwSize *dim_v, *dim_h; int i, j, ii, jj, ni, nv, nh, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, H, W, Hres, Wres, n_map_v, n_map_h, ndim_v, N, id; mxChar *type; //Check the structure of the fisrt input data if(mxIsStruct(prhs[0]) & mxIsStruct(prhs[1])){ model = prhs[0]; layer = prhs[1]; batch_data = prhs[2]; } else{ mexErrMsgTxt("You must specify two structure arrays!"); } gaussian = mxGetPr(mxGetField(model,0,"start_gau")); type = mxGetChars(mxGetField(layer,0,"type_input")); data_input = mxGetPr(batch_data); data_kernel = mxGetPr(mxGetField(model,0,"W")); s_filter = mxGetPr(mxGetField(layer,0,"s_filter")); stride = mxGetPr(mxGetField(layer,0,"stride")); pool = mxGetPr(mxGetField(layer,0,"s_pool")); n_map_v = mxGetScalar(mxGetField(layer,0,"n_map_v")); n_map_h = mxGetScalar(mxGetField(layer,0,"n_map_h")); dim_v = (mwSize*)mxGetDimensions(mxGetField(model,0,"v_input")); dim_h = (mwSize*)mxGetDimensions(mxGetField(model,0,"h_input")); ndim_v = mxGetNumberOfDimensions(mxGetField(model,0,"v_input")); v_bias = mxGetPr(mxGetField(model,0,"v_bias")); h_bias = mxGetPr(mxGetField(model,0,"h_bias")); // check the number of images if (ndim_v == 4) N = dim_v[3]; else N = 1; CRBM_Data<float> crbm_data; crbm_data.Hfilter = int(s_filter[0]); crbm_data.Wfilter = int(s_filter[1]); crbm_data.Hstride = int(stride[0]); crbm_data.Wstride = int(stride[1]); crbm_data.n_map_v = n_map_v; crbm_data.n_map_h = n_map_h; crbm_data.Hpool = int(pool[0]); crbm_data.Wpool = int(pool[1]); crbm_data.H = int(dim_v[0]); crbm_data.W = int(dim_v[1]); crbm_data.Hres = int(dim_h[0]); crbm_data.Wres = int(dim_h[1]); crbm_data.N = N; crbm_data.type_input = type[0]; crbm_data.gauss = gaussian[0]; Hfilter = int(s_filter[0]); Wfilter = int(s_filter[1]); Hstride = int(stride[0]); Wstride = int(stride[1]); Hpool = int(pool[0]); Wpool = int(pool[1]); H = int(dim_v[0]); W = int(dim_v[1]); Hres = int(dim_h[0]); Wres = int(dim_h[1]); // convert mex data to inner data //crbm_data.set_data_input(&data_input); //crbm_data.set_data_kernel(&data_kernel); j = H*W*n_map_v*N; crbm_data.data_input = new float[j]; for(i=0; i< j; i++) crbm_data.data_input[i] = data_input[i]; j = Hfilter*Wfilter*n_map_v*n_map_h; crbm_data.data_kernel = new float[j]; for(i=0; i< j; i++) crbm_data.data_kernel[i] = data_kernel[i]; // h_sample, h_sample_init, h_state j = crbm_data.Hres*crbm_data.Wres*n_map_h*N; crbm_data.h_sample_init = new float[j]; crbm_data.h_sample = new float[j]; crbm_data.h_state = new float[j]; for(i =0 ; i < j; i++){ crbm_data.h_sample_init[i] = 0; crbm_data.h_sample[i] = 0; crbm_data.h_state[i] = 0; } // v_sample j = crbm_data.H*crbm_data.W*n_map_v*N; crbm_data.v_sample = new float[j]; for(i=0; i< j; i++) crbm_data.v_sample[i] = 0; // h_bias crbm_data.h_bias = new float[n_map_h]; for(i=0; i< n_map_h; i++) crbm_data.h_bias[i] = float(h_bias[i]); // v_bias crbm_data.v_bias = new float[n_map_v]; for(i=0; i< n_map_v; i++) crbm_data.v_bias[i] = float(v_bias[i]); /***------------------ GIBBS SAMPLE------------------------ ***/ hipSetDevice(0); // with gpu crbm_inference2D(&crbm_data); j = crbm_data.Hres*crbm_data.Wres*n_map_h*N; for(i=0; i< j; i++) crbm_data.h_sample_init[i] = crbm_data.h_sample[i]; crbm_reconstruct2D(&crbm_data); j = crbm_data.H*crbm_data.W*n_map_v*N; for(i=0; i< j; i++) crbm_data.data_input[i] = crbm_data.v_sample[i]; crbm_inference2D(&crbm_data); /***----------------CALCULATE DW---------------------------***/ double *dW; mxArray *dW_array; mwSize *dim_w; dim_w = (mwSize*)mxGetDimensions(mxGetField(model,0,"W")); dW_array = mxCreateNumericArray(4,dim_w,mxDOUBLE_CLASS, mxREAL); dW = mxGetPr(dW_array); for(ni = 0; ni < N; ni++){ for(nh = 0; nh < n_map_h; nh++){ for (j = 0; j < Wfilter; j++){ for (i = 0; i < Hfilter; i++){ for(nv = 0; nv < n_map_v; nv++){ for(jj = 0; jj < Wres; jj++){ for (ii = 0; ii < Hres; ii++){ id = i + Hfilter*j + Hfilter*Wfilter*nv + Hfilter*Wfilter*n_map_v*nh; dW[id] += (data_input[(ii*Hstride+i) + H*(jj*Wstride+j) + H*W*nv+H*W*n_map_v*ni] * crbm_data.h_sample_init[(ii+Hres*jj) + Hres*Wres*nh + Hres*Wres*n_map_h*ni] - crbm_data.v_sample[(ii*Hstride+i) + H*(jj*Wstride+j) + H*W*nv+H*W*n_map_v*ni] * crbm_data.h_sample[(ii+Hres*jj) + Hres*Wres*nh + Hres*Wres*n_map_h*ni]); } } } } } } } /*-------RETURN: (h_sample, h_sample_init, v_sample, dW) TO MATLAB----------*/ mxArray *h_sample_array, *h_sam_in_array, *v_sample_array; mwSize *dim_hi, *dim_vi; double *h_sample, *h_sample_init, *v_sample; dim_hi = (mwSize*)mxMalloc(sizeof(mwSize)*4); dim_vi = (mwSize*)mxMalloc(sizeof(mwSize)*4); dim_hi[0] = Hres; dim_hi[1] = Wres; dim_hi[2] = n_map_h; dim_hi[3] = N; dim_vi[0] = H; dim_vi[1] = W; dim_vi[2] = n_map_v; dim_vi[3] = N; h_sample_array = mxCreateNumericArray(4,dim_hi,mxDOUBLE_CLASS, mxREAL); h_sample = mxGetPr(h_sample_array); h_sam_in_array = mxCreateNumericArray(4,dim_hi,mxDOUBLE_CLASS, mxREAL); h_sample_init = mxGetPr(h_sam_in_array); v_sample_array = mxCreateNumericArray(4,dim_vi,mxDOUBLE_CLASS, mxREAL); v_sample = mxGetPr(v_sample_array); // set the values to mex type matrix j = Hres*Wres*n_map_h*N; for(i=0; i< j; i++){ h_sample_init[i] = crbm_data.h_sample_init[i]; h_sample[i] = crbm_data.h_sample[i]; } j = H*W*n_map_v*N; for(i=0; i< j; i++) v_sample[i] = crbm_data.v_sample[i]; const char *fieldname[] = {"h_sample","h_sample_init","v_sample","dW"}; mxArray *struct_array; struct_array = plhs[0] = mxCreateStructMatrix(1,1,4,fieldname); mxSetField(struct_array,0,"h_sample",h_sample_array); mxSetField(struct_array,0,"h_sample_init",h_sam_in_array); mxSetField(struct_array,0,"v_sample",v_sample_array); mxSetField(struct_array,0,"dW",dW_array); mxFree(dim_vi); mxFree(dim_hi); }
4472fcc4dba306dc3c169f992ac8fc9cf469f334.cu
#include <math.h> #include <time.h> #include <string.h> #include <stdio.h> #include <stdlib.h> // Matlab - mex #include <mex.h> #include <matrix.h> // CUDA #include <cuda_runtime_api.h> //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #define dbg_print mexPrintf //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// template<class T> class CRBM_Data { public: T *data_input; // input data T *data_kernel; // kernel T *h_bias; // bias of hidden layer T *v_bias; // bias of visible layer T *h_sample; // hidden values of h_sample T *h_sample_init; // initialization of hidden layer T *h_state; // the active matrix T *v_sample; // the visible layer values T gauss; // gaussian parameter int H, W; // input image H & W int N; // image number int Wfilter, Hfilter; // kernel W & H int Wres, Hres; // output data W & H int Hstride, Wstride; // stride of H & W int Hpool, Wpool; // pool size of H & W int n_map_v, n_map_h; // map number of v & h char type_input; // type of inputdata int run_on_gpu; // run on GPU (1, default) or CPU (0) public: CRBM_Data(void) { run_on_gpu = 1; init(); } ~CRBM_Data(void) { if( run_on_gpu ) release(); else release_no_free(); } int init(void) { data_input = NULL; data_kernel = NULL; h_bias = NULL; v_bias = NULL; h_sample = NULL; h_sample_init = NULL; h_state = NULL; v_sample = NULL; return 0; } void release(void) { if( data_input != NULL ) delete [] data_input; if( data_kernel != NULL ) delete [] data_kernel; if( h_bias != NULL ) delete [] h_bias; if( v_bias != NULL ) delete [] v_bias; if( h_sample != NULL ) delete [] h_sample; if( h_sample_init != NULL ) delete [] h_sample_init; if( h_state != NULL ) delete [] h_state; if( v_sample != NULL ) delete [] v_sample; data_input = NULL; data_kernel = NULL; h_bias = NULL; v_bias = NULL; h_sample = NULL; h_sample_init = NULL; h_state = NULL; v_sample = NULL; } void release_no_free(void) { data_input = NULL; data_kernel = NULL; h_bias = NULL; v_bias = NULL; h_sample = NULL; h_sample_init = NULL; h_state = NULL; v_sample = NULL; } // NOTE: this function must be called after parameters have been set. void set_data_input(double *di) { if( !run_on_gpu ) { // FIXME: not safe if( sizeof(double) == sizeof(T) ) { data_input = (float*)di; } else { dbg_print("ERR: input data type is wrong! please input double type!\n"); } return; } int n = H*W*n_map_v*N; if( data_input == NULL ) data_input = new T[n]; for(int i=0; i<n; i++) data_input[i] = float(di[i]); } // NOTE: this function must be called after parameters have been set. // FIXME: only call by GPU type void get_data_input(double **di) { int n = H*W*n_map_v*N; for(int i=0; i<n; i++) di[i] = data_input[i]; } // NOTE: this function must be called after parameters have been set. void set_data_kernel(double *di) { if( !run_on_gpu ) { // FIXME: not safe if( sizeof(double) == sizeof(T) ) { data_kernel = (float*)di; } else { dbg_print("ERR: input data type is wrong! please input double type!\n"); } return; } int n = Hfilter*Wfilter*n_map_v*n_map_h; if( data_kernel == NULL ) data_kernel = new T[n]; for(int i=0; i<n; i++) data_kernel[i] = float(di[i]); } // NOTE: this function must be called after parameters have been set. // FIXME: only call by GPU type void get_data_kernel(double **di) { int n = Hfilter*Wfilter*n_map_v*n_map_h; for(int i=0; i<n; i++) di[i] = data_kernel[i]; } }; /*** ------------------CUDA CONVOLUTION INFERENCE------------------------- ***/ __global__ void conv_cuda_infer(float *da, float *db, float *dc, int H, int W, int Hres, int Wres, int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni) { int vmap_idx = blockIdx.x, hmap_idx = blockIdx.y; int conv_xi = threadIdx.x, conv_yi; int ii, jj; float *da_, *db_, *dc_; float sum; // debug //arr_calc[vmap_idx*n_map_h + hmap_idx] = 1; // get array pointers da_ = da + ni*H*W*n_map_v + H*W*vmap_idx; // input data db_ = db + Hfilter*Wfilter*n_map_v*hmap_idx + // conv kernel Hfilter*Wfilter*vmap_idx; dc_ = dc + ni*Hres*Wres*n_map_h*n_map_v + // output data Hres*Wres*n_map_v*hmap_idx + Hres*Wres*vmap_idx; // begin calculation for(conv_yi=0; conv_yi<Hres; conv_yi++) { sum = 0; for(jj =0; jj < Wfilter; jj++) { for(ii = 0; ii<Hfilter; ii++) { sum += da_[conv_yi*Hstride+ii + H*(conv_xi*Wstride+jj)] * db_[ii + jj*Hfilter]; } } dc_[conv_yi+Hres*conv_xi] = sum; } } /*** ------------------CUDA CONVOLUTION RECONSTRUCTION--------------------- ***/ __global__ void conv_cuda_recon(float *da, float *db, float *dc, int H_off, int W_off, int H, int W, int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni) { int hmap_idx = blockIdx.x, vmap_idx = blockIdx.y; int conv_xi = threadIdx.x, conv_yi; int ii, jj; float *da_, *db_, *dc_; float sum; // get array pointers da_ = da + ni*H_off*W_off*n_map_h + H_off*W_off*hmap_idx; // input data db_ = db + Hfilter*Wfilter*n_map_v*hmap_idx + // conv kernel Hfilter*Wfilter*vmap_idx; dc_ = dc + ni*H*W*n_map_v*n_map_h + // output data H*W*n_map_h*vmap_idx + H*W*hmap_idx; // begin calculation for(conv_yi=0; conv_yi<H; conv_yi++) { sum = 0; for(jj =0; jj < Wfilter; jj++) { for(ii = 0; ii<Hfilter; ii++) { sum += da_[conv_yi*Hstride+ii + H_off*(conv_xi*Wstride+jj)] * db_[Hfilter*Wfilter-1-(ii + jj*Hfilter)]; } } dc_[conv_yi+H*conv_xi] = sum; } } /*** -------------------------CUDA MERGE INFERENCE------------------------- ***/ __global__ void conv_merge_infer(float *dc, float *dh, float *dd, int H, int W, int Hres, int Wres,int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni, char type_input, float gauss) { int hmap_idx = blockIdx.x, vmap_idx; int jj,ii; float *dc_, *dd_; dd_ = dd + ni*Hres*Wres*n_map_h + Hres*Wres*hmap_idx; // merge maps to single feature map for(vmap_idx = 0; vmap_idx < n_map_v; vmap_idx++) { dc_ = dc + ni*Hres*Wres*n_map_h*n_map_v + Hres*Wres*n_map_v*hmap_idx + Hres*Wres*vmap_idx; for(jj = 0; jj < Wres; jj++) { for(ii = 0; ii < Hres; ii++) { dd_[ii+jj*Hres] += dc_[ii+jj*Hres]; } } } // apply bias for(jj = 0; jj < Wres; jj++) { for(ii = 0; ii < Hres; ii++) { if (type_input == 'B') dd_[ii+jj*Hres] = exp(dd_[ii+jj*Hres] + dh[hmap_idx]); if (type_input == 'G') dd_[ii+jj*Hres] = exp(1.0/(gauss*gauss)*(dd_[ii+jj*Hres] + dh[hmap_idx])); } } } /*** -------------------------CUDA MERGE RECONSTRUCTION---------------------- ***/ __global__ void conv_merge_recon(float *dc, float *dv, float *dd, int H_off, int W_off, int H, int W,int Hfilter,int Wfilter, int Hstride, int Wstride, int Hpool, int Wpool, int n_map_v, int n_map_h, int ni, char type_input) { int vmap_idx = blockIdx.x, hmap_idx; int jj,ii; float *dc_, *dd_; dd_ = dd + ni*H*W*n_map_v + H*W*vmap_idx; // merge maps to single feature map for(hmap_idx = 0; hmap_idx < n_map_h; hmap_idx++) { dc_ = dc + ni*H*W*n_map_v*n_map_h + H*W*n_map_h*vmap_idx + H*W*hmap_idx; for(jj = 0; jj < W; jj++) { for(ii = 0; ii < H; ii++) { dd_[ii+jj*H] += dc_[ii+jj*H]; } } } // apply bias for(jj = 0; jj < W; jj++) { for(ii = 0; ii < H; ii++) { if (type_input == 'B') dd_[ii+jj*H] = 1.0/(1.0+exp(-(dd_[ii+jj*H] + dv[vmap_idx]))); if (type_input == 'G') dd_[ii+jj*H] = dd_[ii+jj*H] + dv[vmap_idx]; } } } //BOTTOM-UP: POSITIVE UPDATE void crbm_inference2D(CRBM_Data<float> *p) { int ni, i, j, ii, jj, nh, nv, id, H, W, n_map_v, n_map_h, N, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, Hres, Wres; int *_id; float sum, rnd, pro_sum, gauss; float *block; bool done; char type_input; H = p->H; W = p->W; N = p->N; Hres = p->Hres; Wres = p->Wres; Hpool = p->Hpool; Wpool = p->Wpool; n_map_v = p->n_map_v; n_map_h = p->n_map_h; Hfilter = p->Hfilter; Wfilter = p->Wfilter; Hstride = p->Hstride; Wstride = p->Wstride; gauss = p->gauss; type_input = p->type_input; // Initialize matrixs j = Hres*Wres*n_map_h*N; block = new float[j]; for(i= 0; i< j; i++) block[i] = 0; _id = new int[Hpool*Wpool]; for(i= 0; i< Hpool*Wpool; i++) _id[i] = 0; /***---------------------------CUDA CODE------------------------------***/ int SIZE_IMAGE, SIZE_FILTER, SIZE_OUTPUT; float *da, *db, *dc, *dd, *dh, *fc; j = Hres*Wres*n_map_v*n_map_h*N; fc = new float[j]; for(i=0; i< j; i++) fc[i] = 0; //cudaMallocHost(&fc, sizeof(float)*Hres*Wres*n_map_v*n_map_h*N); //memset(fc, 0, sizeof(float)*Hres*Wres*n_map_v*n_map_h*N); SIZE_IMAGE = H * W * n_map_v * N; SIZE_FILTER = Hfilter * Wfilter * n_map_v * n_map_h; SIZE_OUTPUT = Hres * Wres * n_map_h * N; cudaMalloc(&da, sizeof(float) * SIZE_IMAGE); cudaMalloc(&db, sizeof(float) * SIZE_FILTER); cudaMalloc(&dc, sizeof(float) * SIZE_OUTPUT*n_map_v); cudaMalloc(&dd, sizeof(float) * SIZE_OUTPUT); cudaMalloc(&dh, sizeof(float) * n_map_h); cudaMemcpy(da,p->data_input, sizeof(float)*SIZE_IMAGE, cudaMemcpyHostToDevice); cudaMemcpy(db,p->data_kernel, sizeof(float)*SIZE_FILTER, cudaMemcpyHostToDevice); cudaMemcpy(dc,fc, sizeof(float)*SIZE_OUTPUT*n_map_v,cudaMemcpyHostToDevice); cudaMemcpy(dd,block, sizeof(float)*SIZE_OUTPUT ,cudaMemcpyHostToDevice); cudaMemcpy(dh,p->h_bias, sizeof(float)*n_map_h, cudaMemcpyHostToDevice); dim3 blocks(n_map_v, n_map_h); dim3 threads(Wres, 1); dim3 blocks2(n_map_h, 1); dim3 threads2(1, 1); for(ni=0; ni< N; ni++){ conv_cuda_infer<<<blocks, threads>>>(da, db, dc, H, W, Hres, Wres, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, n_map_v,n_map_h, ni); conv_merge_infer<<<blocks2, threads2>>>(dc,dh, dd, H, W, Hres, Wres, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, n_map_v, n_map_h, ni, type_input, gauss); } cudaMemcpy(block, dd, sizeof(float) * SIZE_OUTPUT, cudaMemcpyDeviceToHost); cudaFree(da); cudaFree(db); cudaFree(dc); cudaFree(dd); cudaFree(dh); //cudaFreeHost(fc); delete [] fc; /***---------------------------CUDA END------------------------------***/ /*** CONVOLUTION & GET HIDDEN ACTIVATION STATE ***/ for(ni=0; ni< N; ni++){ for(nh=0; nh< n_map_h; nh++){ //GET HIDDEN ACTIVATION STATE for(j=0; j< floor(Wres/Wpool); j++){ for(i=0; i< floor(Hres/Hpool); i++){ sum = 0; for(jj=0; jj< Wpool; jj++){ _id[jj*Hpool] = i*Hpool + (j*Wpool+jj)*Hres + Hres*Wres*nh + Hres*Wres*n_map_h*ni; sum += block[_id[jj*Hpool]]; for(ii=1; ii< Hpool; ii++){ _id[jj*Hpool+ii] = _id[jj*Hpool+ii-1] + 1; sum += block[_id[jj*Hpool+ii]]; } } done = false; rnd = rand() % 10000 / 10000.0; pro_sum = 0.0; for(jj=0; jj< Hpool*Wpool; jj++){ p->h_sample[_id[jj]] = block[_id[jj]]/(1.0+sum); pro_sum += p->h_sample[_id[jj]]; //Randomly generate the hidden state: at most one unit is activated if(done == false){ if(pro_sum >= rnd){ p->h_state[_id[jj]] = 1; done = true; } } } } } } } delete [] _id; delete [] block; return; } // UP-DOWN: NEGATIVE UPDATE void crbm_reconstruct2D(CRBM_Data<float> *p) { int ni, i, j, ii, jj, nh, nv, id, H, W, n_map_v, n_map_h, N, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, Hres, Wres, offset_h, offset_w, H_off, W_off; float *h_state_off, *v; char type_input; H = p->H; W = p->W; N = p->N; Hres = p->Hres; Wres = p->Wres; Hpool = p->Hpool; Wpool = p->Wpool; n_map_v = p->n_map_v; n_map_h = p->n_map_h; Hfilter = p->Hfilter; Wfilter = p->Wfilter; Hstride = p->Hstride; Wstride = p->Wstride; type_input = p->type_input; j = H*W*n_map_v*N; v = new float[j]; for(i=0; i< j; i++) v[i] = 0; //extend the matrix of h_state offset_h = (H-1)*Hstride*Hstride+(Hfilter-1)*Hstride+Hfilter-H; offset_w = (W-1)*Wstride*Wstride+(Wfilter-1)*Wstride+Wfilter-W; H_off = Hres + offset_h; W_off = Wres + offset_w; j = H_off*W_off*n_map_h*N; h_state_off = new float[j]; for(i=0; i< j; i++) h_state_off[i] = 0; for(ni=0; ni< N; ni++){ for(nh=0; nh< n_map_h; nh++){ for(j=0; j< Wres; j++){ for(i=0; i< Hres; i++){ h_state_off[i + offset_h/2 + H_off*(j+offset_w/2) + H_off*W_off*nh + H_off*W_off*n_map_h*ni] = p->h_state[i + Hres*j + Hres*Wres*nh + Hres*Wres*n_map_h*ni]; } } } } /***--------------------------CUDA CODE----------------------------***/ if (0) { int SIZE_IMAGE, SIZE_FILTER, SIZE_OUTPUT; float *da, *db, *dc, *dd, *dv, *fc; j = H*W*n_map_h*n_map_v*N; fc = new float[j]; for(i=0; i< j; i++) fc[i] = 0; SIZE_IMAGE = H_off * W_off * n_map_h * N; SIZE_FILTER = Hfilter * Wfilter * n_map_v * n_map_h; SIZE_OUTPUT = H * W * n_map_v * N; cudaMalloc(&da, sizeof(float) * SIZE_IMAGE); cudaMalloc(&db, sizeof(float) * SIZE_FILTER); cudaMalloc(&dc, sizeof(float) * SIZE_OUTPUT*n_map_h); cudaMalloc(&dd, sizeof(float) * SIZE_OUTPUT); cudaMalloc(&dv, sizeof(float) * n_map_h); cudaMemcpy(da,h_state_off, sizeof(float)*SIZE_IMAGE, cudaMemcpyHostToDevice); cudaMemcpy(db,p->data_kernel, sizeof(float)*SIZE_FILTER, cudaMemcpyHostToDevice); cudaMemcpy(dc,fc, sizeof(float)*SIZE_OUTPUT*n_map_v,cudaMemcpyHostToDevice); cudaMemcpy(dd,v, sizeof(float)*SIZE_OUTPUT ,cudaMemcpyHostToDevice); cudaMemcpy(dv,p->v_bias, sizeof(float)*n_map_h, cudaMemcpyHostToDevice); dim3 blocks(n_map_h, n_map_v); dim3 threads(W, 1); dim3 blocks2(n_map_v, 1); dim3 threads2(1, 1); for(ni=0; ni< N; ni++){ conv_cuda_recon<<<blocks, threads>>>(da, db, dc, H_off, W_off, H, W, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, n_map_v,n_map_h, ni); conv_merge_recon<<<blocks2, threads2>>>(dc,dv, dd, H_off, W_off, H, W, Hfilter, Wfilter, Hstride,Wstride,Hpool,Wpool, n_map_v,n_map_h, ni,type_input); } cudaMemcpy(v, dd, sizeof(float) * SIZE_OUTPUT, cudaMemcpyDeviceToHost); for(i=0; i< H*W*n_map_v*N; i++) p->v_sample[i] = v[i]; } /***---------------------------CUDA END---------------------------***/ //do the convolution for(ni=0; ni< N; ni++){ for(nv=0; nv< n_map_v; nv++){ for(j=0; j< W; j++){ for(i=0; i< H; i++){ id = i + H*j + H*W*nv + H*W*n_map_v*ni; v[id] = 0; for (nh = 0; nh< n_map_h; nh++){ for (jj = 0; jj< Wfilter; jj++){ for (ii = 0; ii < Hfilter; ii++){ v[id] += h_state_off[(i*Hstride+ii) + H_off*(j*Wstride+jj) + H_off*W_off*nh + H_off*W_off*n_map_h*ni] * p->data_kernel[Hfilter*Wfilter-1-(ii+Hfilter*jj) + Hfilter*Wfilter*nv + Hfilter*Wfilter*n_map_v*nh]; } } } v[id] += p->v_bias[nv]; if (type_input == 'B') p->v_sample[id] = 1.0/(1.0+exp(-v[id])); if (type_input == 'G') p->v_sample[id] = v[id]; } } } } delete [] h_state_off; delete [] v; //delete [] fc; return; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /***------------------LOAD DATA FROM MATLAB-------------------***/ const mxArray *model, *layer, *batch_data; double *data_input, *data_kernel, *s_filter, *stride, *pool, *v_bias, *h_bias, *gaussian; mwSize *dim_v, *dim_h; int i, j, ii, jj, ni, nv, nh, Hfilter, Wfilter, Hstride, Wstride, Hpool, Wpool, H, W, Hres, Wres, n_map_v, n_map_h, ndim_v, N, id; mxChar *type; //Check the structure of the fisrt input data if(mxIsStruct(prhs[0]) & mxIsStruct(prhs[1])){ model = prhs[0]; layer = prhs[1]; batch_data = prhs[2]; } else{ mexErrMsgTxt("You must specify two structure arrays!"); } gaussian = mxGetPr(mxGetField(model,0,"start_gau")); type = mxGetChars(mxGetField(layer,0,"type_input")); data_input = mxGetPr(batch_data); data_kernel = mxGetPr(mxGetField(model,0,"W")); s_filter = mxGetPr(mxGetField(layer,0,"s_filter")); stride = mxGetPr(mxGetField(layer,0,"stride")); pool = mxGetPr(mxGetField(layer,0,"s_pool")); n_map_v = mxGetScalar(mxGetField(layer,0,"n_map_v")); n_map_h = mxGetScalar(mxGetField(layer,0,"n_map_h")); dim_v = (mwSize*)mxGetDimensions(mxGetField(model,0,"v_input")); dim_h = (mwSize*)mxGetDimensions(mxGetField(model,0,"h_input")); ndim_v = mxGetNumberOfDimensions(mxGetField(model,0,"v_input")); v_bias = mxGetPr(mxGetField(model,0,"v_bias")); h_bias = mxGetPr(mxGetField(model,0,"h_bias")); // check the number of images if (ndim_v == 4) N = dim_v[3]; else N = 1; CRBM_Data<float> crbm_data; crbm_data.Hfilter = int(s_filter[0]); crbm_data.Wfilter = int(s_filter[1]); crbm_data.Hstride = int(stride[0]); crbm_data.Wstride = int(stride[1]); crbm_data.n_map_v = n_map_v; crbm_data.n_map_h = n_map_h; crbm_data.Hpool = int(pool[0]); crbm_data.Wpool = int(pool[1]); crbm_data.H = int(dim_v[0]); crbm_data.W = int(dim_v[1]); crbm_data.Hres = int(dim_h[0]); crbm_data.Wres = int(dim_h[1]); crbm_data.N = N; crbm_data.type_input = type[0]; crbm_data.gauss = gaussian[0]; Hfilter = int(s_filter[0]); Wfilter = int(s_filter[1]); Hstride = int(stride[0]); Wstride = int(stride[1]); Hpool = int(pool[0]); Wpool = int(pool[1]); H = int(dim_v[0]); W = int(dim_v[1]); Hres = int(dim_h[0]); Wres = int(dim_h[1]); // convert mex data to inner data //crbm_data.set_data_input(&data_input); //crbm_data.set_data_kernel(&data_kernel); j = H*W*n_map_v*N; crbm_data.data_input = new float[j]; for(i=0; i< j; i++) crbm_data.data_input[i] = data_input[i]; j = Hfilter*Wfilter*n_map_v*n_map_h; crbm_data.data_kernel = new float[j]; for(i=0; i< j; i++) crbm_data.data_kernel[i] = data_kernel[i]; // h_sample, h_sample_init, h_state j = crbm_data.Hres*crbm_data.Wres*n_map_h*N; crbm_data.h_sample_init = new float[j]; crbm_data.h_sample = new float[j]; crbm_data.h_state = new float[j]; for(i =0 ; i < j; i++){ crbm_data.h_sample_init[i] = 0; crbm_data.h_sample[i] = 0; crbm_data.h_state[i] = 0; } // v_sample j = crbm_data.H*crbm_data.W*n_map_v*N; crbm_data.v_sample = new float[j]; for(i=0; i< j; i++) crbm_data.v_sample[i] = 0; // h_bias crbm_data.h_bias = new float[n_map_h]; for(i=0; i< n_map_h; i++) crbm_data.h_bias[i] = float(h_bias[i]); // v_bias crbm_data.v_bias = new float[n_map_v]; for(i=0; i< n_map_v; i++) crbm_data.v_bias[i] = float(v_bias[i]); /***------------------ GIBBS SAMPLE------------------------ ***/ cudaSetDevice(0); // with gpu crbm_inference2D(&crbm_data); j = crbm_data.Hres*crbm_data.Wres*n_map_h*N; for(i=0; i< j; i++) crbm_data.h_sample_init[i] = crbm_data.h_sample[i]; crbm_reconstruct2D(&crbm_data); j = crbm_data.H*crbm_data.W*n_map_v*N; for(i=0; i< j; i++) crbm_data.data_input[i] = crbm_data.v_sample[i]; crbm_inference2D(&crbm_data); /***----------------CALCULATE DW---------------------------***/ double *dW; mxArray *dW_array; mwSize *dim_w; dim_w = (mwSize*)mxGetDimensions(mxGetField(model,0,"W")); dW_array = mxCreateNumericArray(4,dim_w,mxDOUBLE_CLASS, mxREAL); dW = mxGetPr(dW_array); for(ni = 0; ni < N; ni++){ for(nh = 0; nh < n_map_h; nh++){ for (j = 0; j < Wfilter; j++){ for (i = 0; i < Hfilter; i++){ for(nv = 0; nv < n_map_v; nv++){ for(jj = 0; jj < Wres; jj++){ for (ii = 0; ii < Hres; ii++){ id = i + Hfilter*j + Hfilter*Wfilter*nv + Hfilter*Wfilter*n_map_v*nh; dW[id] += (data_input[(ii*Hstride+i) + H*(jj*Wstride+j) + H*W*nv+H*W*n_map_v*ni] * crbm_data.h_sample_init[(ii+Hres*jj) + Hres*Wres*nh + Hres*Wres*n_map_h*ni] - crbm_data.v_sample[(ii*Hstride+i) + H*(jj*Wstride+j) + H*W*nv+H*W*n_map_v*ni] * crbm_data.h_sample[(ii+Hres*jj) + Hres*Wres*nh + Hres*Wres*n_map_h*ni]); } } } } } } } /*-------RETURN: (h_sample, h_sample_init, v_sample, dW) TO MATLAB----------*/ mxArray *h_sample_array, *h_sam_in_array, *v_sample_array; mwSize *dim_hi, *dim_vi; double *h_sample, *h_sample_init, *v_sample; dim_hi = (mwSize*)mxMalloc(sizeof(mwSize)*4); dim_vi = (mwSize*)mxMalloc(sizeof(mwSize)*4); dim_hi[0] = Hres; dim_hi[1] = Wres; dim_hi[2] = n_map_h; dim_hi[3] = N; dim_vi[0] = H; dim_vi[1] = W; dim_vi[2] = n_map_v; dim_vi[3] = N; h_sample_array = mxCreateNumericArray(4,dim_hi,mxDOUBLE_CLASS, mxREAL); h_sample = mxGetPr(h_sample_array); h_sam_in_array = mxCreateNumericArray(4,dim_hi,mxDOUBLE_CLASS, mxREAL); h_sample_init = mxGetPr(h_sam_in_array); v_sample_array = mxCreateNumericArray(4,dim_vi,mxDOUBLE_CLASS, mxREAL); v_sample = mxGetPr(v_sample_array); // set the values to mex type matrix j = Hres*Wres*n_map_h*N; for(i=0; i< j; i++){ h_sample_init[i] = crbm_data.h_sample_init[i]; h_sample[i] = crbm_data.h_sample[i]; } j = H*W*n_map_v*N; for(i=0; i< j; i++) v_sample[i] = crbm_data.v_sample[i]; const char *fieldname[] = {"h_sample","h_sample_init","v_sample","dW"}; mxArray *struct_array; struct_array = plhs[0] = mxCreateStructMatrix(1,1,4,fieldname); mxSetField(struct_array,0,"h_sample",h_sample_array); mxSetField(struct_array,0,"h_sample_init",h_sam_in_array); mxSetField(struct_array,0,"v_sample",v_sample_array); mxSetField(struct_array,0,"dW",dW_array); mxFree(dim_vi); mxFree(dim_hi); }
a6ab8cecc7aedf985637a385b74a8a95793e7c6a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/experimental/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/detail/column_utilities.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/distance.h> #include <thrust/equal.h> #include <thrust/execution_policy.h> #include <thrust/generate.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/logical.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <numeric> #include <sstream> namespace cudf { namespace test { namespace { std::unique_ptr<column> generate_all_row_indices(size_type num_rows) { auto indices = cudf::make_fixed_width_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED); thrust::sequence(rmm::exec_policy(cudf::get_default_stream()), indices->mutable_view().begin<size_type>(), indices->mutable_view().end<size_type>(), 0); return indices; } // generate the rows indices that should be checked for the child column of a list column. // // - if we are just checking for equivalence, we can skip any rows that are nulls. this allows // things like non-empty rows that have been nullified after creation. they may actually contain // values, but since the row is null they don't matter for equivalency. // // - if we are checking for exact equality, we need to check all rows. // // This allows us to differentiate between: // // List<int32_t>: // Length : 1 // Offsets : 0, 4 // Null count: 1 // 0 // 0, 1, 2, 3 // // List<int32_t>: // Length : 1 // Offsets : 0, 0 // Null count: 1 // 0 // std::unique_ptr<column> generate_child_row_indices(lists_column_view const& c, column_view const& row_indices, bool check_exact_equality) { // if we are checking for exact equality, we should be checking for "unsanitized" data that may // be hiding underneath nulls. so check all rows instead of just non-null rows if (check_exact_equality) { return generate_all_row_indices(c.get_sliced_child(cudf::get_default_stream()).size()); } // Example input // List<int32_t>: // Length : 7 // Offsets : 0, 3, 6, 8, 11, 14, 16, 19 // | | <-- non-null input rows // Null count: 5 // 0010100 // 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7 // | | | | | <-- child rows of non-null rows // // Desired output: [6, 7, 11, 12, 13] // compute total # of child row indices we will be emitting. auto row_size_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), validity = c.null_mask(), offsets = c.offsets().begin<offset_type>(), offset = c.offset()] __device__(int index) { // both null mask and offsets data are not pre-sliced. so we need to add the column offset to // every incoming index. auto const true_index = row_indices[index] + offset; return !validity || cudf::bit_is_set(validity, true_index) ? (offsets[true_index + 1] - offsets[true_index]) : 0; }); auto const output_size = thrust::reduce(rmm::exec_policy(cudf::get_default_stream()), row_size_iter, row_size_iter + row_indices.size()); // no output. done. auto result = cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED); if (output_size == 0) { return result; } // for all input rows, what position in the output column they will start at. // // output_row_start = [0, 0, 0, 2, 2, 5, 5] // | | <-- non-null input rows // auto output_row_start = cudf::make_fixed_width_column( data_type{type_id::INT32}, row_indices.size(), mask_state::UNALLOCATED); thrust::exclusive_scan(rmm::exec_policy(cudf::get_default_stream()), row_size_iter, row_size_iter + row_indices.size(), output_row_start->mutable_view().begin<size_type>()); // fill result column with 1s // // result = [1, 1, 1, 1, 1] // thrust::generate(rmm::exec_policy(cudf::get_default_stream()), result->mutable_view().begin<size_type>(), result->mutable_view().end<size_type>(), [] __device__() { return 1; }); // scatter the output row positions into result buffer // // result = [6, 1, 11, 1, 1] // auto output_row_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), offsets = c.offsets().begin<offset_type>(), offset = c.offset(), first_offset = cudf::detail::get_value<offset_type>( c.offsets(), c.offset(), cudf::get_default_stream())] __device__(int index) { auto const true_index = row_indices[index] + offset; return offsets[true_index] - first_offset; }); thrust::scatter_if(rmm::exec_policy(cudf::get_default_stream()), output_row_iter, output_row_iter + row_indices.size(), output_row_start->view().begin<size_type>(), row_size_iter, result->mutable_view().begin<size_type>(), [] __device__(auto row_size) { return row_size != 0; }); // generate keys for each output row // // result = [1, 1, 2, 2, 2] // auto keys = cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED); thrust::generate(rmm::exec_policy(cudf::get_default_stream()), keys->mutable_view().begin<size_type>(), keys->mutable_view().end<size_type>(), [] __device__() { return 0; }); thrust::scatter_if(rmm::exec_policy(cudf::get_default_stream()), row_size_iter, row_size_iter + row_indices.size(), output_row_start->view().begin<size_type>(), row_size_iter, keys->mutable_view().begin<size_type>(), [] __device__(auto row_size) { return row_size != 0; }); thrust::inclusive_scan(rmm::exec_policy(cudf::get_default_stream()), keys->view().begin<size_type>(), keys->view().end<size_type>(), keys->mutable_view().begin<size_type>()); // scan by key to generate final child row indices. // input // result = [6, 1, 11, 1, 1] // keys = [1, 1, 2, 2, 2] // // output // result = [6, 7, 11, 12, 13] // thrust::inclusive_scan_by_key(rmm::exec_policy(cudf::get_default_stream()), keys->view().begin<size_type>(), keys->view().end<size_type>(), result->view().begin<size_type>(), result->mutable_view().begin<size_type>()); return result; } #define PROP_EXPECT_EQ(a, b) \ do { \ if (verbosity == debug_output_level::QUIET) { \ if (a != b) { return false; } \ } else { \ EXPECT_EQ(a, b); \ if (a != b) { \ if (verbosity == debug_output_level::FIRST_ERROR) { \ return false; \ } else { \ result = false; \ } \ } \ } \ } while (0) template <bool check_exact_equality> struct column_property_comparator { bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs) { return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs; } size_type count_nulls(cudf::column_view const& c, cudf::column_view const& row_indices) { auto validity_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), validity = c.null_mask(), offset = c.offset()] __device__(int index) { // both null mask and offsets data are not pre-sliced. so we need to add the column offset // to every incoming index. auto const true_index = row_indices[index] + offset; return !validity || cudf::bit_is_set(validity, true_index) ? 0 : 1; }); return thrust::reduce(rmm::exec_policy(cudf::get_default_stream()), validity_iter, validity_iter + row_indices.size()); } bool compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { bool result = true; if (check_exact_equality) { PROP_EXPECT_EQ(lhs.type(), rhs.type()); } else { PROP_EXPECT_EQ(types_equivalent(lhs.type(), rhs.type()), true); } auto const lhs_size = check_exact_equality ? lhs.size() : lhs_row_indices.size(); auto const rhs_size = check_exact_equality ? rhs.size() : rhs_row_indices.size(); PROP_EXPECT_EQ(lhs_size, rhs_size); if (lhs_size > 0 && check_exact_equality) { PROP_EXPECT_EQ(lhs.nullable(), rhs.nullable()); } // DISCUSSION: does this make sense, semantically? auto const lhs_null_count = check_exact_equality ? lhs.null_count() : count_nulls(lhs, lhs_row_indices); auto const rhs_null_count = check_exact_equality ? rhs.null_count() : count_nulls(rhs, rhs_row_indices); PROP_EXPECT_EQ(lhs_null_count, rhs_null_count); // equivalent, but not exactly equal columns can have a different number of children if their // sizes are both 0. Specifically, empty string columns may or may not have children. if (check_exact_equality || (lhs.size() > 0 && lhs.null_count() < lhs.size())) { PROP_EXPECT_EQ(lhs.num_children(), rhs.num_children()); } return result; } template <typename T, std::enable_if_t<!std::is_same_v<T, cudf::list_view> && !std::is_same_v<T, cudf::struct_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { return compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity); } template <typename T, std::enable_if_t<std::is_same_v<T, cudf::list_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } cudf::lists_column_view lhs_l(lhs); cudf::lists_column_view rhs_l(rhs); // recurse // note: if a column is all nulls (and we are checking for exact equality) or otherwise empty, // no indices are generated and no recursion happens auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices, check_exact_equality); if (lhs_child_indices->size() > 0) { auto lhs_child = lhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child = rhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices, check_exact_equality); return cudf::type_dispatcher(lhs_child.type(), column_property_comparator<check_exact_equality>{}, lhs_child, rhs_child, *lhs_child_indices, *rhs_child_indices, verbosity); } return true; } template <typename T, std::enable_if_t<std::is_same_v<T, cudf::struct_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); for (size_type i = 0; i < lhs.num_children(); i++) { column_view lhs_child = l_scv.get_sliced_child(i, cudf::get_default_stream()); column_view rhs_child = r_scv.get_sliced_child(i, cudf::get_default_stream()); if (!cudf::type_dispatcher(lhs_child.type(), column_property_comparator<check_exact_equality>{}, lhs_child, rhs_child, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } } return true; } }; template <typename DeviceComparator> class corresponding_rows_unequal { public: corresponding_rows_unequal(column_device_view lhs_row_indices_, column_device_view rhs_row_indices_, size_type /*fp_ulps*/, DeviceComparator comp_, column_device_view /*lhs*/, column_device_view /*rhs*/) : lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_), comp(comp_) { } __device__ bool operator()(size_type index) { using cudf::experimental::row::lhs_index_type; using cudf::experimental::row::rhs_index_type; return !comp(lhs_index_type{lhs_row_indices.element<size_type>(index)}, rhs_index_type{rhs_row_indices.element<size_type>(index)}); } column_device_view lhs_row_indices; column_device_view rhs_row_indices; DeviceComparator comp; }; template <typename DeviceComparator> class corresponding_rows_not_equivalent { column_device_view lhs_row_indices; column_device_view rhs_row_indices; size_type const fp_ulps; DeviceComparator comp; column_device_view lhs; column_device_view rhs; public: corresponding_rows_not_equivalent(column_device_view lhs_row_indices_, column_device_view rhs_row_indices_, size_type fp_ulps_, DeviceComparator comp_, column_device_view lhs_, column_device_view rhs_) : lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_), fp_ulps(fp_ulps_), comp(comp_), lhs(lhs_), rhs(rhs_) { } struct typed_element_not_equivalent { template <typename T> __device__ std::enable_if_t<std::is_floating_point_v<T>, bool> operator()( column_device_view const& lhs, column_device_view const& rhs, size_type lhs_index, size_type rhs_index, size_type fp_ulps) { if (lhs.is_valid(lhs_index) and rhs.is_valid(rhs_index)) { T const x = lhs.element<T>(lhs_index); T const y = rhs.element<T>(rhs_index); // Must handle inf and nan separately if (std::isinf(x) || std::isinf(y)) { return x != y; // comparison of (inf==inf) returns true } else if (std::isnan(x) || std::isnan(y)) { return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false } else { T const abs_x_minus_y = std::abs(x - y); return abs_x_minus_y >= std::numeric_limits<T>::min() && abs_x_minus_y > std::numeric_limits<T>::epsilon() * std::abs(x + y) * fp_ulps; } } else { // if either is null, then the inequality was checked already return true; } } template <typename T, typename... Args> __device__ std::enable_if_t<not std::is_floating_point_v<T>, bool> operator()(Args...) { // Non-floating point inequality is checked already return true; } }; __device__ bool operator()(size_type index) { using cudf::experimental::row::lhs_index_type; using cudf::experimental::row::rhs_index_type; auto const lhs_index = lhs_row_indices.element<size_type>(index); auto const rhs_index = rhs_row_indices.element<size_type>(index); if (not comp(lhs_index_type{lhs_index}, rhs_index_type{rhs_index})) { return type_dispatcher( lhs.type(), typed_element_not_equivalent{}, lhs, rhs, lhs_index, rhs_index, fp_ulps); } return false; } }; // Stringify the inconsistent values resulted from the comparison of two columns element-wise std::string stringify_column_differences(cudf::device_span<int const> differences, column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, int depth) { CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty"); std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : ""; // move the differences to the host. auto h_differences = cudf::detail::make_host_vector_sync(differences, cudf::get_default_stream()); if (verbosity == debug_output_level::ALL_ERRORS) { std::ostringstream buffer; buffer << depth_str << "differences:" << std::endl; auto source_table = cudf::table_view({lhs, rhs}); auto diff_column = fixed_width_column_wrapper<int32_t>(h_differences.begin(), h_differences.end()); auto diff_table = cudf::gather(source_table, diff_column); // Need to pull back the differences auto const h_left_strings = to_strings(diff_table->get_column(0)); auto const h_right_strings = to_strings(diff_table->get_column(1)); for (size_t i = 0; i < h_differences.size(); ++i) buffer << depth_str << "lhs[" << h_differences[i] << "] = " << h_left_strings[i] << ", rhs[" << h_differences[i] << "] = " << h_right_strings[i] << std::endl; return buffer.str(); } else { auto const index = h_differences[0]; // only stringify first difference auto const lhs_index = cudf::detail::get_value<size_type>(lhs_row_indices, index, cudf::get_default_stream()); auto const rhs_index = cudf::detail::get_value<size_type>(rhs_row_indices, index, cudf::get_default_stream()); auto diff_lhs = cudf::slice(lhs, {lhs_index, lhs_index + 1}).front(); auto diff_rhs = cudf::slice(rhs, {rhs_index, rhs_index + 1}).front(); return depth_str + "first difference: " + "lhs[" + std::to_string(index) + "] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) + "] = " + to_string(diff_rhs, ""); } } // non-nested column types template <typename T, bool check_exact_equality> struct column_comparator_impl { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { auto d_lhs_row_indices = cudf::column_device_view::create(lhs_row_indices); auto d_rhs_row_indices = cudf::column_device_view::create(rhs_row_indices); auto d_lhs = cudf::column_device_view::create(lhs); auto d_rhs = cudf::column_device_view::create(rhs); auto lhs_tview = table_view{{lhs}}; auto rhs_tview = table_view{{rhs}}; auto const comparator = cudf::experimental::row::equality::two_table_comparator{ lhs_tview, rhs_tview, cudf::get_default_stream()}; auto const has_nulls = cudf::has_nulls(lhs_tview) or cudf::has_nulls(rhs_tview); auto const device_comparator = comparator.equal_to<false>(cudf::nullate::DYNAMIC{has_nulls}); using ComparatorType = std::conditional_t<check_exact_equality, corresponding_rows_unequal<decltype(device_comparator)>, corresponding_rows_not_equivalent<decltype(device_comparator)>>; auto differences = rmm::device_uvector<int>( lhs_row_indices.size(), cudf::get_default_stream()); // worst case: everything different auto input_iter = thrust::make_counting_iterator(0); auto diff_map = rmm::device_uvector<bool>(lhs_row_indices.size(), cudf::get_default_stream()); thrust::transform( rmm::exec_policy(cudf::get_default_stream()), input_iter, input_iter + lhs_row_indices.size(), diff_map.begin(), ComparatorType( *d_lhs_row_indices, *d_rhs_row_indices, fp_ulps, device_comparator, *d_lhs, *d_rhs)); auto diff_iter = thrust::copy_if(rmm::exec_policy(cudf::get_default_stream()), input_iter, input_iter + lhs_row_indices.size(), diff_map.begin(), differences.begin(), thrust::identity<bool>{}); differences.resize(thrust::distance(differences.begin(), diff_iter), cudf::get_default_stream()); // shrink back down if (not differences.is_empty()) { if (verbosity != debug_output_level::QUIET) { // GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda. [&]() { GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth); }(); } return false; } return true; } }; // forward declaration for nested-type recursion. template <bool check_exact_equality> struct column_comparator; // specialization for list columns template <bool check_exact_equality> struct column_comparator_impl<list_view, check_exact_equality> { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { lists_column_view lhs_l(lhs); lists_column_view rhs_l(rhs); CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(), "List column size mismatch"); if (lhs_row_indices.is_empty()) { return true; } // worst case - everything is different rmm::device_uvector<int> differences(lhs_row_indices.size(), cudf::get_default_stream()); // compare offsets, taking slicing into account // left side size_type lhs_shift = cudf::detail::get_value<size_type>( lhs_l.offsets(), lhs_l.offset(), cudf::get_default_stream()); auto lhs_offsets = thrust::make_transform_iterator( lhs_l.offsets().begin<size_type>() + lhs_l.offset(), [lhs_shift] __device__(size_type offset) { return offset - lhs_shift; }); auto lhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // right side size_type rhs_shift = cudf::detail::get_value<size_type>( rhs_l.offsets(), rhs_l.offset(), cudf::get_default_stream()); auto rhs_offsets = thrust::make_transform_iterator( rhs_l.offsets().begin<size_type>() + rhs_l.offset(), [rhs_shift] __device__(size_type offset) { return offset - rhs_shift; }); auto rhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // when checking for equivalency, we can't compare offset values directly, we can only // compare lengths of the rows, and only if valid. as a concrete example, you could have two // equivalent columns with the following data: // // column A // offsets = [0, 3, 5, 7] // validity = [0, 1, 1, 1] // // column B // offsets = [0, 0, 2, 4] // validity = [0, 1, 1, 1] // // Row 0 in column A happens to have a positive length, even though the row is null, but column // B does not. So the offsets for the remaining valid rows are fundamentally different even // though the row lengths are the same. // auto input_iter = thrust::make_counting_iterator(0); auto diff_iter = thrust::copy_if( rmm::exec_policy(cudf::get_default_stream()), input_iter, input_iter + lhs_row_indices.size(), differences.begin(), [lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, lhs_indices = lhs_row_indices.begin<size_type>(), rhs_indices = rhs_row_indices.begin<size_type>()] __device__(size_type index) { auto const lhs_index = lhs_indices[index]; auto const rhs_index = rhs_indices[index]; // check for validity match if (lhs_valids[lhs_index] != rhs_valids[rhs_index]) { return true; } // if the row is valid, check that the length of the list is the same. do this // for both the equivalency and exact equality checks. if (lhs_valids[lhs_index] && ((lhs_offsets[lhs_index + 1] - lhs_offsets[lhs_index]) != (rhs_offsets[rhs_index + 1] - rhs_offsets[rhs_index]))) { return true; } // if validity matches -and- is false, we can ignore the actual offset values. this // is technically not checking "equal()", but it's how the non-list code path handles it if (!lhs_valids[lhs_index]) { return false; } // if checking exact equality, compare the actual offset values if (check_exact_equality && lhs_offsets[lhs_index] != rhs_offsets[rhs_index]) { return true; } return false; }); differences.resize(thrust::distance(differences.begin(), diff_iter), cudf::get_default_stream()); // shrink back down if (not differences.is_empty()) { if (verbosity != debug_output_level::QUIET) { // GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda. [&]() { GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth); }(); } return false; } // recurse // note: if a column is all nulls (and we are only checking for equivalence) or otherwise empty, // no indices are generated and no recursion happens auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices, check_exact_equality); if (lhs_child_indices->size() > 0) { auto lhs_child = lhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child = rhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices, check_exact_equality); return cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, *lhs_child_indices, *rhs_child_indices, verbosity, fp_ulps, depth + 1); } return true; } }; template <bool check_exact_equality> struct column_comparator_impl<struct_view, check_exact_equality> { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); for (size_type i = 0; i < lhs.num_children(); i++) { column_view lhs_child = l_scv.get_sliced_child(i, cudf::get_default_stream()); column_view rhs_child = r_scv.get_sliced_child(i, cudf::get_default_stream()); if (!cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, lhs_row_indices, rhs_row_indices, verbosity, fp_ulps, depth + 1)) { return false; } } return true; } }; template <bool check_exact_equality> struct column_comparator { template <typename T> bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth = 0) { // compare properties if (!cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } // compare values column_comparator_impl<T, check_exact_equality> comparator{}; return comparator(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, fp_ulps, depth); } }; } // namespace namespace detail { /** * @copydoc cudf::test::expect_column_properties_equal */ bool expect_column_properties_equal(column_view const& lhs, column_view const& rhs, debug_output_level verbosity) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity); } /** * @copydoc cudf::test::expect_column_properties_equivalent */ bool expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs, debug_output_level verbosity) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity); } /** * @copydoc cudf::test::expect_columns_equal */ bool expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity, cudf::test::default_ulp); } /** * @copydoc cudf::test::expect_columns_equivalent */ bool expect_columns_equivalent(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity, size_type fp_ulps) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity, fp_ulps); } /** * @copydoc cudf::test::expect_equal_buffers */ void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes) { if (size_bytes > 0) { EXPECT_NE(nullptr, lhs); EXPECT_NE(nullptr, rhs); } auto typed_lhs = static_cast<char const*>(lhs); auto typed_rhs = static_cast<char const*>(rhs); EXPECT_TRUE(thrust::equal( rmm::exec_policy(cudf::get_default_stream()), typed_lhs, typed_lhs + size_bytes, typed_rhs)); } } // namespace detail /** * @copydoc cudf::test::expect_column_empty */ void expect_column_empty(cudf::column_view const& col) { EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); } /** * @copydoc cudf::test::bitmask_to_host */ std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c) { if (c.nullable()) { auto num_bitmasks = num_bitmask_words(c.size()); std::vector<bitmask_type> host_bitmask(num_bitmasks); if (c.offset() == 0) { CUDF_CUDA_TRY(hipMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type), hipMemcpyDefault)); } else { auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size()); CUDF_CUDA_TRY(hipMemcpy( host_bitmask.data(), mask.data(), num_bitmasks * sizeof(bitmask_type), hipMemcpyDefault)); } return host_bitmask; } else { return std::vector<bitmask_type>{}; } } namespace { template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr> static auto numeric_to_string_precise(T value) { return std::to_string(value); } template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr> static auto numeric_to_string_precise(T value) { std::ostringstream o; o << std::setprecision(std::numeric_limits<T>::max_digits10) << value; return o.str(); } static auto duration_suffix(cudf::duration_D) { return " days"; } static auto duration_suffix(cudf::duration_s) { return " seconds"; } static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; } static auto duration_suffix(cudf::duration_us) { return " microseconds"; } static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; } std::string get_nested_type_str(cudf::column_view const& view) { if (view.type().id() == cudf::type_id::LIST) { lists_column_view lcv(view); return cudf::type_to_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">"; } if (view.type().id() == cudf::type_id::STRUCT) { std::ostringstream out; out << cudf::type_to_name(view.type()) + "<"; std::transform(view.child_begin(), view.child_end(), std::ostream_iterator<std::string>(out, ","), [&out](auto const col) { return get_nested_type_str(col); }); out << ">"; return out.str(); } return cudf::type_to_name(view.type()); } template <typename NestedColumnView> std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ") { column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index); CUDF_EXPECTS(offsets.type().id() == type_id::INT32, "Column does not appear to be an offsets column"); CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!"); size_type output_size = c.size() + 1; // the first offset value to normalize everything against size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), cudf::get_default_stream()); rmm::device_uvector<size_type> shifted_offsets(output_size, cudf::get_default_stream()); // normalize the offset values for the column offset size_type const* d_offsets = offsets.head<size_type>() + c.offset(); thrust::transform( rmm::exec_policy(cudf::get_default_stream()), d_offsets, d_offsets + output_size, shifted_offsets.begin(), [first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); }); auto const h_shifted_offsets = cudf::detail::make_host_vector_sync(shifted_offsets, cudf::get_default_stream()); std::ostringstream buffer; for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) { buffer << h_shifted_offsets[idx]; if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; } } return buffer.str(); } struct column_view_printer { template <typename Element, std::enable_if_t<is_numeric<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el); }); } } template <typename Element, std::enable_if_t<is_timestamp<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { // For timestamps, convert timestamp column to column of strings, then // call string version std::string format = [&]() { if constexpr (std::is_same_v<cudf::timestamp_s, Element>) { return std::string{"%Y-%m-%dT%H:%M:%SZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_ms, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_us, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_ns, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"}; } return std::string{"%Y-%m-%d"}; }(); auto col_as_strings = cudf::strings::from_timestamps(col, format); if (col_as_strings->size() == 0) { return; } this->template operator()<cudf::string_view>(*col_as_strings, out, indent); } template <typename Element, std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto const h_data = cudf::test::to_host<Element>(col); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), std::back_inserter(out), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? static_cast<std::string>(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(std::cbegin(h_data.first), std::cend(h_data.first), std::back_inserter(out), [col](auto const& fp) { return static_cast<std::string>(fp); }); } } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::string_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { // // Implementation for strings, call special to_host variant // if (col.is_empty()) return; auto h_data = cudf::test::to_host<std::string>(col); // explicitly replace '\r' and '\n' characters with "\r" and "\n" strings respectively. auto cleaned = [](std::string_view in) { std::string out(in); auto replace_char = [](std::string& out, char c, std::string_view repl) { for (std::string::size_type pos{}; out.npos != (pos = out.find(c, pos)); pos++) { out.replace(pos, 1, repl); } }; replace_char(out, '\r', "\\r"); replace_char(out, '\n', "\\n"); return out; }; out.resize(col.size()); std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? cleaned(h_data.first[idx]) : std::string("NULL"); }); } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::dictionary32>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { cudf::dictionary_column_view dictionary(col); if (col.is_empty()) return; std::vector<std::string> keys = to_strings(dictionary.keys()); std::vector<std::string> indices = to_strings({dictionary.indices().type(), dictionary.size(), dictionary.indices().head(), dictionary.null_mask(), dictionary.null_count(), dictionary.offset()}); out.insert(out.end(), keys.begin(), keys.end()); if (!indices.empty()) { std::string first = "\x08 : " + indices.front(); // use : as delimiter out.push_back(first); // between keys and indices out.insert(out.end(), indices.begin() + 1, indices.end()); } } // Print the tick counts with the units template <typename Element, std::enable_if_t<is_duration<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx].count()) + duration_suffix(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el.count()) + duration_suffix(el); }); } } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::list_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { lists_column_view lcv(col); // propagate slicing to the child if necessary column_view child = lcv.get_sliced_child(cudf::get_default_stream()); bool const is_sliced = lcv.offset() > 0 || child.offset() > 0; std::string tmp = get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent + "Length : " + std::to_string(lcv.size()) + "\n" + indent + "Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" + (lcv.parent().nullable() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" + detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n" : "") + // non-nested types don't typically display their null masks, so do it here for convenience. (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + (detail::to_string(child, ", ", indent + " ")) + "\n"; out.push_back(tmp); } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::struct_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { structs_column_view view{col}; std::ostringstream out_stream; out_stream << get_nested_type_str(col) << ":\n" << indent << "Length : " << view.size() << ":\n"; if (view.nullable()) { out_stream << indent << "Null count: " << view.null_count() << "\n" << detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n"; } auto iter = thrust::make_counting_iterator(0); std::transform( iter, iter + view.num_children(), std::ostream_iterator<std::string>(out_stream, "\n"), [&](size_type index) { auto child = view.get_sliced_child(index, cudf::get_default_stream()); // non-nested types don't typically display their null masks, so do it here for convenience. return (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + detail::to_string(child, ", ", indent + " "); }); out.push_back(out_stream.str()); } }; } // namespace namespace detail { /** * @copydoc cudf::test::detail::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent) { std::vector<std::string> reply; cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent); return reply; } /** * @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string) * * @param indent Indentation for all output */ std::string to_string(cudf::column_view const& col, std::string const& delimiter, std::string const& indent) { std::ostringstream buffer; std::vector<std::string> h_data = to_strings(col, indent); buffer << indent; std::copy(h_data.begin(), h_data.end() - (!h_data.empty()), std::ostream_iterator<std::string>(buffer, delimiter.c_str())); if (!h_data.empty()) buffer << h_data.back(); return buffer.str(); } /** * @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string) * * @param indent Indentation for all output. See comment in `to_strings` for * a detailed description. */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size, std::string const& indent) { std::ostringstream buffer; buffer << indent; for (int idx = null_mask_size - 1; idx >= 0; idx--) { buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0"); } return buffer.str(); } } // namespace detail /** * @copydoc cudf::test::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col) { return detail::to_strings(col); } /** * @copydoc cudf::test::to_string(cudf::column_view, std::string) */ std::string to_string(cudf::column_view const& col, std::string const& delimiter) { return detail::to_string(col, delimiter); } /** * @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type) */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size) { return detail::to_string(null_mask, null_mask_size); } /** * @copydoc cudf::test::print */ void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter) { os << to_string(col, delimiter) << std::endl; } /** * @copydoc cudf::test::validate_host_masks */ bool validate_host_masks(std::vector<bitmask_type> const& expected_mask, std::vector<bitmask_type> const& got_mask, size_type number_of_elements) { return std::all_of(thrust::make_counting_iterator(0), thrust::make_counting_iterator(number_of_elements), [&expected_mask, &got_mask](auto index) { return cudf::bit_is_set(expected_mask.data(), index) == cudf::bit_is_set(got_mask.data(), index); }); } } // namespace test } // namespace cudf
a6ab8cecc7aedf985637a385b74a8a95793e7c6a.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/copying.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/experimental/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/detail/column_utilities.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/distance.h> #include <thrust/equal.h> #include <thrust/execution_policy.h> #include <thrust/generate.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/logical.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <thrust/scatter.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <numeric> #include <sstream> namespace cudf { namespace test { namespace { std::unique_ptr<column> generate_all_row_indices(size_type num_rows) { auto indices = cudf::make_fixed_width_column(data_type{type_id::INT32}, num_rows, mask_state::UNALLOCATED); thrust::sequence(rmm::exec_policy(cudf::get_default_stream()), indices->mutable_view().begin<size_type>(), indices->mutable_view().end<size_type>(), 0); return indices; } // generate the rows indices that should be checked for the child column of a list column. // // - if we are just checking for equivalence, we can skip any rows that are nulls. this allows // things like non-empty rows that have been nullified after creation. they may actually contain // values, but since the row is null they don't matter for equivalency. // // - if we are checking for exact equality, we need to check all rows. // // This allows us to differentiate between: // // List<int32_t>: // Length : 1 // Offsets : 0, 4 // Null count: 1 // 0 // 0, 1, 2, 3 // // List<int32_t>: // Length : 1 // Offsets : 0, 0 // Null count: 1 // 0 // std::unique_ptr<column> generate_child_row_indices(lists_column_view const& c, column_view const& row_indices, bool check_exact_equality) { // if we are checking for exact equality, we should be checking for "unsanitized" data that may // be hiding underneath nulls. so check all rows instead of just non-null rows if (check_exact_equality) { return generate_all_row_indices(c.get_sliced_child(cudf::get_default_stream()).size()); } // Example input // List<int32_t>: // Length : 7 // Offsets : 0, 3, 6, 8, 11, 14, 16, 19 // | | <-- non-null input rows // Null count: 5 // 0010100 // 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 7 // | | | | | <-- child rows of non-null rows // // Desired output: [6, 7, 11, 12, 13] // compute total # of child row indices we will be emitting. auto row_size_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), validity = c.null_mask(), offsets = c.offsets().begin<offset_type>(), offset = c.offset()] __device__(int index) { // both null mask and offsets data are not pre-sliced. so we need to add the column offset to // every incoming index. auto const true_index = row_indices[index] + offset; return !validity || cudf::bit_is_set(validity, true_index) ? (offsets[true_index + 1] - offsets[true_index]) : 0; }); auto const output_size = thrust::reduce(rmm::exec_policy(cudf::get_default_stream()), row_size_iter, row_size_iter + row_indices.size()); // no output. done. auto result = cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED); if (output_size == 0) { return result; } // for all input rows, what position in the output column they will start at. // // output_row_start = [0, 0, 0, 2, 2, 5, 5] // | | <-- non-null input rows // auto output_row_start = cudf::make_fixed_width_column( data_type{type_id::INT32}, row_indices.size(), mask_state::UNALLOCATED); thrust::exclusive_scan(rmm::exec_policy(cudf::get_default_stream()), row_size_iter, row_size_iter + row_indices.size(), output_row_start->mutable_view().begin<size_type>()); // fill result column with 1s // // result = [1, 1, 1, 1, 1] // thrust::generate(rmm::exec_policy(cudf::get_default_stream()), result->mutable_view().begin<size_type>(), result->mutable_view().end<size_type>(), [] __device__() { return 1; }); // scatter the output row positions into result buffer // // result = [6, 1, 11, 1, 1] // auto output_row_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), offsets = c.offsets().begin<offset_type>(), offset = c.offset(), first_offset = cudf::detail::get_value<offset_type>( c.offsets(), c.offset(), cudf::get_default_stream())] __device__(int index) { auto const true_index = row_indices[index] + offset; return offsets[true_index] - first_offset; }); thrust::scatter_if(rmm::exec_policy(cudf::get_default_stream()), output_row_iter, output_row_iter + row_indices.size(), output_row_start->view().begin<size_type>(), row_size_iter, result->mutable_view().begin<size_type>(), [] __device__(auto row_size) { return row_size != 0; }); // generate keys for each output row // // result = [1, 1, 2, 2, 2] // auto keys = cudf::make_fixed_width_column(data_type{type_id::INT32}, output_size, mask_state::UNALLOCATED); thrust::generate(rmm::exec_policy(cudf::get_default_stream()), keys->mutable_view().begin<size_type>(), keys->mutable_view().end<size_type>(), [] __device__() { return 0; }); thrust::scatter_if(rmm::exec_policy(cudf::get_default_stream()), row_size_iter, row_size_iter + row_indices.size(), output_row_start->view().begin<size_type>(), row_size_iter, keys->mutable_view().begin<size_type>(), [] __device__(auto row_size) { return row_size != 0; }); thrust::inclusive_scan(rmm::exec_policy(cudf::get_default_stream()), keys->view().begin<size_type>(), keys->view().end<size_type>(), keys->mutable_view().begin<size_type>()); // scan by key to generate final child row indices. // input // result = [6, 1, 11, 1, 1] // keys = [1, 1, 2, 2, 2] // // output // result = [6, 7, 11, 12, 13] // thrust::inclusive_scan_by_key(rmm::exec_policy(cudf::get_default_stream()), keys->view().begin<size_type>(), keys->view().end<size_type>(), result->view().begin<size_type>(), result->mutable_view().begin<size_type>()); return result; } #define PROP_EXPECT_EQ(a, b) \ do { \ if (verbosity == debug_output_level::QUIET) { \ if (a != b) { return false; } \ } else { \ EXPECT_EQ(a, b); \ if (a != b) { \ if (verbosity == debug_output_level::FIRST_ERROR) { \ return false; \ } else { \ result = false; \ } \ } \ } \ } while (0) template <bool check_exact_equality> struct column_property_comparator { bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs) { return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs; } size_type count_nulls(cudf::column_view const& c, cudf::column_view const& row_indices) { auto validity_iter = cudf::detail::make_counting_transform_iterator( 0, [row_indices = row_indices.begin<size_type>(), validity = c.null_mask(), offset = c.offset()] __device__(int index) { // both null mask and offsets data are not pre-sliced. so we need to add the column offset // to every incoming index. auto const true_index = row_indices[index] + offset; return !validity || cudf::bit_is_set(validity, true_index) ? 0 : 1; }); return thrust::reduce(rmm::exec_policy(cudf::get_default_stream()), validity_iter, validity_iter + row_indices.size()); } bool compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { bool result = true; if (check_exact_equality) { PROP_EXPECT_EQ(lhs.type(), rhs.type()); } else { PROP_EXPECT_EQ(types_equivalent(lhs.type(), rhs.type()), true); } auto const lhs_size = check_exact_equality ? lhs.size() : lhs_row_indices.size(); auto const rhs_size = check_exact_equality ? rhs.size() : rhs_row_indices.size(); PROP_EXPECT_EQ(lhs_size, rhs_size); if (lhs_size > 0 && check_exact_equality) { PROP_EXPECT_EQ(lhs.nullable(), rhs.nullable()); } // DISCUSSION: does this make sense, semantically? auto const lhs_null_count = check_exact_equality ? lhs.null_count() : count_nulls(lhs, lhs_row_indices); auto const rhs_null_count = check_exact_equality ? rhs.null_count() : count_nulls(rhs, rhs_row_indices); PROP_EXPECT_EQ(lhs_null_count, rhs_null_count); // equivalent, but not exactly equal columns can have a different number of children if their // sizes are both 0. Specifically, empty string columns may or may not have children. if (check_exact_equality || (lhs.size() > 0 && lhs.null_count() < lhs.size())) { PROP_EXPECT_EQ(lhs.num_children(), rhs.num_children()); } return result; } template <typename T, std::enable_if_t<!std::is_same_v<T, cudf::list_view> && !std::is_same_v<T, cudf::struct_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { return compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity); } template <typename T, std::enable_if_t<std::is_same_v<T, cudf::list_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } cudf::lists_column_view lhs_l(lhs); cudf::lists_column_view rhs_l(rhs); // recurse // note: if a column is all nulls (and we are checking for exact equality) or otherwise empty, // no indices are generated and no recursion happens auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices, check_exact_equality); if (lhs_child_indices->size() > 0) { auto lhs_child = lhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child = rhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices, check_exact_equality); return cudf::type_dispatcher(lhs_child.type(), column_property_comparator<check_exact_equality>{}, lhs_child, rhs_child, *lhs_child_indices, *rhs_child_indices, verbosity); } return true; } template <typename T, std::enable_if_t<std::is_same_v<T, cudf::struct_view>>* = nullptr> bool operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& lhs_row_indices, cudf::column_view const& rhs_row_indices, debug_output_level verbosity) { if (!compare_common(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); for (size_type i = 0; i < lhs.num_children(); i++) { column_view lhs_child = l_scv.get_sliced_child(i, cudf::get_default_stream()); column_view rhs_child = r_scv.get_sliced_child(i, cudf::get_default_stream()); if (!cudf::type_dispatcher(lhs_child.type(), column_property_comparator<check_exact_equality>{}, lhs_child, rhs_child, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } } return true; } }; template <typename DeviceComparator> class corresponding_rows_unequal { public: corresponding_rows_unequal(column_device_view lhs_row_indices_, column_device_view rhs_row_indices_, size_type /*fp_ulps*/, DeviceComparator comp_, column_device_view /*lhs*/, column_device_view /*rhs*/) : lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_), comp(comp_) { } __device__ bool operator()(size_type index) { using cudf::experimental::row::lhs_index_type; using cudf::experimental::row::rhs_index_type; return !comp(lhs_index_type{lhs_row_indices.element<size_type>(index)}, rhs_index_type{rhs_row_indices.element<size_type>(index)}); } column_device_view lhs_row_indices; column_device_view rhs_row_indices; DeviceComparator comp; }; template <typename DeviceComparator> class corresponding_rows_not_equivalent { column_device_view lhs_row_indices; column_device_view rhs_row_indices; size_type const fp_ulps; DeviceComparator comp; column_device_view lhs; column_device_view rhs; public: corresponding_rows_not_equivalent(column_device_view lhs_row_indices_, column_device_view rhs_row_indices_, size_type fp_ulps_, DeviceComparator comp_, column_device_view lhs_, column_device_view rhs_) : lhs_row_indices(lhs_row_indices_), rhs_row_indices(rhs_row_indices_), fp_ulps(fp_ulps_), comp(comp_), lhs(lhs_), rhs(rhs_) { } struct typed_element_not_equivalent { template <typename T> __device__ std::enable_if_t<std::is_floating_point_v<T>, bool> operator()( column_device_view const& lhs, column_device_view const& rhs, size_type lhs_index, size_type rhs_index, size_type fp_ulps) { if (lhs.is_valid(lhs_index) and rhs.is_valid(rhs_index)) { T const x = lhs.element<T>(lhs_index); T const y = rhs.element<T>(rhs_index); // Must handle inf and nan separately if (std::isinf(x) || std::isinf(y)) { return x != y; // comparison of (inf==inf) returns true } else if (std::isnan(x) || std::isnan(y)) { return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false } else { T const abs_x_minus_y = std::abs(x - y); return abs_x_minus_y >= std::numeric_limits<T>::min() && abs_x_minus_y > std::numeric_limits<T>::epsilon() * std::abs(x + y) * fp_ulps; } } else { // if either is null, then the inequality was checked already return true; } } template <typename T, typename... Args> __device__ std::enable_if_t<not std::is_floating_point_v<T>, bool> operator()(Args...) { // Non-floating point inequality is checked already return true; } }; __device__ bool operator()(size_type index) { using cudf::experimental::row::lhs_index_type; using cudf::experimental::row::rhs_index_type; auto const lhs_index = lhs_row_indices.element<size_type>(index); auto const rhs_index = rhs_row_indices.element<size_type>(index); if (not comp(lhs_index_type{lhs_index}, rhs_index_type{rhs_index})) { return type_dispatcher( lhs.type(), typed_element_not_equivalent{}, lhs, rhs, lhs_index, rhs_index, fp_ulps); } return false; } }; // Stringify the inconsistent values resulted from the comparison of two columns element-wise std::string stringify_column_differences(cudf::device_span<int const> differences, column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, int depth) { CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty"); std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : ""; // move the differences to the host. auto h_differences = cudf::detail::make_host_vector_sync(differences, cudf::get_default_stream()); if (verbosity == debug_output_level::ALL_ERRORS) { std::ostringstream buffer; buffer << depth_str << "differences:" << std::endl; auto source_table = cudf::table_view({lhs, rhs}); auto diff_column = fixed_width_column_wrapper<int32_t>(h_differences.begin(), h_differences.end()); auto diff_table = cudf::gather(source_table, diff_column); // Need to pull back the differences auto const h_left_strings = to_strings(diff_table->get_column(0)); auto const h_right_strings = to_strings(diff_table->get_column(1)); for (size_t i = 0; i < h_differences.size(); ++i) buffer << depth_str << "lhs[" << h_differences[i] << "] = " << h_left_strings[i] << ", rhs[" << h_differences[i] << "] = " << h_right_strings[i] << std::endl; return buffer.str(); } else { auto const index = h_differences[0]; // only stringify first difference auto const lhs_index = cudf::detail::get_value<size_type>(lhs_row_indices, index, cudf::get_default_stream()); auto const rhs_index = cudf::detail::get_value<size_type>(rhs_row_indices, index, cudf::get_default_stream()); auto diff_lhs = cudf::slice(lhs, {lhs_index, lhs_index + 1}).front(); auto diff_rhs = cudf::slice(rhs, {rhs_index, rhs_index + 1}).front(); return depth_str + "first difference: " + "lhs[" + std::to_string(index) + "] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) + "] = " + to_string(diff_rhs, ""); } } // non-nested column types template <typename T, bool check_exact_equality> struct column_comparator_impl { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { auto d_lhs_row_indices = cudf::column_device_view::create(lhs_row_indices); auto d_rhs_row_indices = cudf::column_device_view::create(rhs_row_indices); auto d_lhs = cudf::column_device_view::create(lhs); auto d_rhs = cudf::column_device_view::create(rhs); auto lhs_tview = table_view{{lhs}}; auto rhs_tview = table_view{{rhs}}; auto const comparator = cudf::experimental::row::equality::two_table_comparator{ lhs_tview, rhs_tview, cudf::get_default_stream()}; auto const has_nulls = cudf::has_nulls(lhs_tview) or cudf::has_nulls(rhs_tview); auto const device_comparator = comparator.equal_to<false>(cudf::nullate::DYNAMIC{has_nulls}); using ComparatorType = std::conditional_t<check_exact_equality, corresponding_rows_unequal<decltype(device_comparator)>, corresponding_rows_not_equivalent<decltype(device_comparator)>>; auto differences = rmm::device_uvector<int>( lhs_row_indices.size(), cudf::get_default_stream()); // worst case: everything different auto input_iter = thrust::make_counting_iterator(0); auto diff_map = rmm::device_uvector<bool>(lhs_row_indices.size(), cudf::get_default_stream()); thrust::transform( rmm::exec_policy(cudf::get_default_stream()), input_iter, input_iter + lhs_row_indices.size(), diff_map.begin(), ComparatorType( *d_lhs_row_indices, *d_rhs_row_indices, fp_ulps, device_comparator, *d_lhs, *d_rhs)); auto diff_iter = thrust::copy_if(rmm::exec_policy(cudf::get_default_stream()), input_iter, input_iter + lhs_row_indices.size(), diff_map.begin(), differences.begin(), thrust::identity<bool>{}); differences.resize(thrust::distance(differences.begin(), diff_iter), cudf::get_default_stream()); // shrink back down if (not differences.is_empty()) { if (verbosity != debug_output_level::QUIET) { // GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda. [&]() { GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth); }(); } return false; } return true; } }; // forward declaration for nested-type recursion. template <bool check_exact_equality> struct column_comparator; // specialization for list columns template <bool check_exact_equality> struct column_comparator_impl<list_view, check_exact_equality> { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { lists_column_view lhs_l(lhs); lists_column_view rhs_l(rhs); CUDF_EXPECTS(lhs_row_indices.size() == rhs_row_indices.size(), "List column size mismatch"); if (lhs_row_indices.is_empty()) { return true; } // worst case - everything is different rmm::device_uvector<int> differences(lhs_row_indices.size(), cudf::get_default_stream()); // compare offsets, taking slicing into account // left side size_type lhs_shift = cudf::detail::get_value<size_type>( lhs_l.offsets(), lhs_l.offset(), cudf::get_default_stream()); auto lhs_offsets = thrust::make_transform_iterator( lhs_l.offsets().begin<size_type>() + lhs_l.offset(), [lhs_shift] __device__(size_type offset) { return offset - lhs_shift; }); auto lhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // right side size_type rhs_shift = cudf::detail::get_value<size_type>( rhs_l.offsets(), rhs_l.offset(), cudf::get_default_stream()); auto rhs_offsets = thrust::make_transform_iterator( rhs_l.offsets().begin<size_type>() + rhs_l.offset(), [rhs_shift] __device__(size_type offset) { return offset - rhs_shift; }); auto rhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // when checking for equivalency, we can't compare offset values directly, we can only // compare lengths of the rows, and only if valid. as a concrete example, you could have two // equivalent columns with the following data: // // column A // offsets = [0, 3, 5, 7] // validity = [0, 1, 1, 1] // // column B // offsets = [0, 0, 2, 4] // validity = [0, 1, 1, 1] // // Row 0 in column A happens to have a positive length, even though the row is null, but column // B does not. So the offsets for the remaining valid rows are fundamentally different even // though the row lengths are the same. // auto input_iter = thrust::make_counting_iterator(0); auto diff_iter = thrust::copy_if( rmm::exec_policy(cudf::get_default_stream()), input_iter, input_iter + lhs_row_indices.size(), differences.begin(), [lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, lhs_indices = lhs_row_indices.begin<size_type>(), rhs_indices = rhs_row_indices.begin<size_type>()] __device__(size_type index) { auto const lhs_index = lhs_indices[index]; auto const rhs_index = rhs_indices[index]; // check for validity match if (lhs_valids[lhs_index] != rhs_valids[rhs_index]) { return true; } // if the row is valid, check that the length of the list is the same. do this // for both the equivalency and exact equality checks. if (lhs_valids[lhs_index] && ((lhs_offsets[lhs_index + 1] - lhs_offsets[lhs_index]) != (rhs_offsets[rhs_index + 1] - rhs_offsets[rhs_index]))) { return true; } // if validity matches -and- is false, we can ignore the actual offset values. this // is technically not checking "equal()", but it's how the non-list code path handles it if (!lhs_valids[lhs_index]) { return false; } // if checking exact equality, compare the actual offset values if (check_exact_equality && lhs_offsets[lhs_index] != rhs_offsets[rhs_index]) { return true; } return false; }); differences.resize(thrust::distance(differences.begin(), diff_iter), cudf::get_default_stream()); // shrink back down if (not differences.is_empty()) { if (verbosity != debug_output_level::QUIET) { // GTEST_FAIL() does a return that conflicts with our return type. so hide it in a lambda. [&]() { GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, depth); }(); } return false; } // recurse // note: if a column is all nulls (and we are only checking for equivalence) or otherwise empty, // no indices are generated and no recursion happens auto lhs_child_indices = generate_child_row_indices(lhs_l, lhs_row_indices, check_exact_equality); if (lhs_child_indices->size() > 0) { auto lhs_child = lhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child = rhs_l.get_sliced_child(cudf::get_default_stream()); auto rhs_child_indices = generate_child_row_indices(rhs_l, rhs_row_indices, check_exact_equality); return cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, *lhs_child_indices, *rhs_child_indices, verbosity, fp_ulps, depth + 1); } return true; } }; template <bool check_exact_equality> struct column_comparator_impl<struct_view, check_exact_equality> { bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth) { structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); for (size_type i = 0; i < lhs.num_children(); i++) { column_view lhs_child = l_scv.get_sliced_child(i, cudf::get_default_stream()); column_view rhs_child = r_scv.get_sliced_child(i, cudf::get_default_stream()); if (!cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, lhs_row_indices, rhs_row_indices, verbosity, fp_ulps, depth + 1)) { return false; } } return true; } }; template <bool check_exact_equality> struct column_comparator { template <typename T> bool operator()(column_view const& lhs, column_view const& rhs, column_view const& lhs_row_indices, column_view const& rhs_row_indices, debug_output_level verbosity, size_type fp_ulps, int depth = 0) { // compare properties if (!cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity)) { return false; } // compare values column_comparator_impl<T, check_exact_equality> comparator{}; return comparator(lhs, rhs, lhs_row_indices, rhs_row_indices, verbosity, fp_ulps, depth); } }; } // namespace namespace detail { /** * @copydoc cudf::test::expect_column_properties_equal */ bool expect_column_properties_equal(column_view const& lhs, column_view const& rhs, debug_output_level verbosity) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity); } /** * @copydoc cudf::test::expect_column_properties_equivalent */ bool expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs, debug_output_level verbosity) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity); } /** * @copydoc cudf::test::expect_columns_equal */ bool expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity, cudf::test::default_ulp); } /** * @copydoc cudf::test::expect_columns_equivalent */ bool expect_columns_equivalent(cudf::column_view const& lhs, cudf::column_view const& rhs, debug_output_level verbosity, size_type fp_ulps) { auto lhs_indices = generate_all_row_indices(lhs.size()); auto rhs_indices = generate_all_row_indices(rhs.size()); return cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, *lhs_indices, *rhs_indices, verbosity, fp_ulps); } /** * @copydoc cudf::test::expect_equal_buffers */ void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes) { if (size_bytes > 0) { EXPECT_NE(nullptr, lhs); EXPECT_NE(nullptr, rhs); } auto typed_lhs = static_cast<char const*>(lhs); auto typed_rhs = static_cast<char const*>(rhs); EXPECT_TRUE(thrust::equal( rmm::exec_policy(cudf::get_default_stream()), typed_lhs, typed_lhs + size_bytes, typed_rhs)); } } // namespace detail /** * @copydoc cudf::test::expect_column_empty */ void expect_column_empty(cudf::column_view const& col) { EXPECT_EQ(0, col.size()); EXPECT_EQ(0, col.null_count()); } /** * @copydoc cudf::test::bitmask_to_host */ std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c) { if (c.nullable()) { auto num_bitmasks = num_bitmask_words(c.size()); std::vector<bitmask_type> host_bitmask(num_bitmasks); if (c.offset() == 0) { CUDF_CUDA_TRY(cudaMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type), cudaMemcpyDefault)); } else { auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size()); CUDF_CUDA_TRY(cudaMemcpy( host_bitmask.data(), mask.data(), num_bitmasks * sizeof(bitmask_type), cudaMemcpyDefault)); } return host_bitmask; } else { return std::vector<bitmask_type>{}; } } namespace { template <typename T, std::enable_if_t<std::is_integral_v<T>>* = nullptr> static auto numeric_to_string_precise(T value) { return std::to_string(value); } template <typename T, std::enable_if_t<std::is_floating_point_v<T>>* = nullptr> static auto numeric_to_string_precise(T value) { std::ostringstream o; o << std::setprecision(std::numeric_limits<T>::max_digits10) << value; return o.str(); } static auto duration_suffix(cudf::duration_D) { return " days"; } static auto duration_suffix(cudf::duration_s) { return " seconds"; } static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; } static auto duration_suffix(cudf::duration_us) { return " microseconds"; } static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; } std::string get_nested_type_str(cudf::column_view const& view) { if (view.type().id() == cudf::type_id::LIST) { lists_column_view lcv(view); return cudf::type_to_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">"; } if (view.type().id() == cudf::type_id::STRUCT) { std::ostringstream out; out << cudf::type_to_name(view.type()) + "<"; std::transform(view.child_begin(), view.child_end(), std::ostream_iterator<std::string>(out, ","), [&out](auto const col) { return get_nested_type_str(col); }); out << ">"; return out.str(); } return cudf::type_to_name(view.type()); } template <typename NestedColumnView> std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ") { column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index); CUDF_EXPECTS(offsets.type().id() == type_id::INT32, "Column does not appear to be an offsets column"); CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!"); size_type output_size = c.size() + 1; // the first offset value to normalize everything against size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), cudf::get_default_stream()); rmm::device_uvector<size_type> shifted_offsets(output_size, cudf::get_default_stream()); // normalize the offset values for the column offset size_type const* d_offsets = offsets.head<size_type>() + c.offset(); thrust::transform( rmm::exec_policy(cudf::get_default_stream()), d_offsets, d_offsets + output_size, shifted_offsets.begin(), [first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); }); auto const h_shifted_offsets = cudf::detail::make_host_vector_sync(shifted_offsets, cudf::get_default_stream()); std::ostringstream buffer; for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) { buffer << h_shifted_offsets[idx]; if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; } } return buffer.str(); } struct column_view_printer { template <typename Element, std::enable_if_t<is_numeric<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el); }); } } template <typename Element, std::enable_if_t<is_timestamp<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { // For timestamps, convert timestamp column to column of strings, then // call string version std::string format = [&]() { if constexpr (std::is_same_v<cudf::timestamp_s, Element>) { return std::string{"%Y-%m-%dT%H:%M:%SZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_ms, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%3fZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_us, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%6fZ"}; } else if constexpr (std::is_same_v<cudf::timestamp_ns, Element>) { return std::string{"%Y-%m-%dT%H:%M:%S.%9fZ"}; } return std::string{"%Y-%m-%d"}; }(); auto col_as_strings = cudf::strings::from_timestamps(col, format); if (col_as_strings->size() == 0) { return; } this->template operator()<cudf::string_view>(*col_as_strings, out, indent); } template <typename Element, std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto const h_data = cudf::test::to_host<Element>(col); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), std::back_inserter(out), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? static_cast<std::string>(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(std::cbegin(h_data.first), std::cend(h_data.first), std::back_inserter(out), [col](auto const& fp) { return static_cast<std::string>(fp); }); } } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::string_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { // // Implementation for strings, call special to_host variant // if (col.is_empty()) return; auto h_data = cudf::test::to_host<std::string>(col); // explicitly replace '\r' and '\n' characters with "\r" and "\n" strings respectively. auto cleaned = [](std::string_view in) { std::string out(in); auto replace_char = [](std::string& out, char c, std::string_view repl) { for (std::string::size_type pos{}; out.npos != (pos = out.find(c, pos)); pos++) { out.replace(pos, 1, repl); } }; replace_char(out, '\r', "\\r"); replace_char(out, '\n', "\\n"); return out; }; out.resize(col.size()); std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? cleaned(h_data.first[idx]) : std::string("NULL"); }); } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::dictionary32>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { cudf::dictionary_column_view dictionary(col); if (col.is_empty()) return; std::vector<std::string> keys = to_strings(dictionary.keys()); std::vector<std::string> indices = to_strings({dictionary.indices().type(), dictionary.size(), dictionary.indices().head(), dictionary.null_mask(), dictionary.null_count(), dictionary.offset()}); out.insert(out.end(), keys.begin(), keys.end()); if (!indices.empty()) { std::string first = "\x08 : " + indices.front(); // use : as delimiter out.push_back(first); // between keys and indices out.insert(out.end(), indices.begin() + 1, indices.end()); } } // Print the tick counts with the units template <typename Element, std::enable_if_t<is_duration<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx].count()) + duration_suffix(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el.count()) + duration_suffix(el); }); } } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::list_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { lists_column_view lcv(col); // propagate slicing to the child if necessary column_view child = lcv.get_sliced_child(cudf::get_default_stream()); bool const is_sliced = lcv.offset() > 0 || child.offset() > 0; std::string tmp = get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent + "Length : " + std::to_string(lcv.size()) + "\n" + indent + "Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" + (lcv.parent().nullable() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" + detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n" : "") + // non-nested types don't typically display their null masks, so do it here for convenience. (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + (detail::to_string(child, ", ", indent + " ")) + "\n"; out.push_back(tmp); } template <typename Element, std::enable_if_t<std::is_same_v<Element, cudf::struct_view>>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { structs_column_view view{col}; std::ostringstream out_stream; out_stream << get_nested_type_str(col) << ":\n" << indent << "Length : " << view.size() << ":\n"; if (view.nullable()) { out_stream << indent << "Null count: " << view.null_count() << "\n" << detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n"; } auto iter = thrust::make_counting_iterator(0); std::transform( iter, iter + view.num_children(), std::ostream_iterator<std::string>(out_stream, "\n"), [&](size_type index) { auto child = view.get_sliced_child(index, cudf::get_default_stream()); // non-nested types don't typically display their null masks, so do it here for convenience. return (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + detail::to_string(child, ", ", indent + " "); }); out.push_back(out_stream.str()); } }; } // namespace namespace detail { /** * @copydoc cudf::test::detail::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent) { std::vector<std::string> reply; cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent); return reply; } /** * @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string) * * @param indent Indentation for all output */ std::string to_string(cudf::column_view const& col, std::string const& delimiter, std::string const& indent) { std::ostringstream buffer; std::vector<std::string> h_data = to_strings(col, indent); buffer << indent; std::copy(h_data.begin(), h_data.end() - (!h_data.empty()), std::ostream_iterator<std::string>(buffer, delimiter.c_str())); if (!h_data.empty()) buffer << h_data.back(); return buffer.str(); } /** * @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string) * * @param indent Indentation for all output. See comment in `to_strings` for * a detailed description. */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size, std::string const& indent) { std::ostringstream buffer; buffer << indent; for (int idx = null_mask_size - 1; idx >= 0; idx--) { buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0"); } return buffer.str(); } } // namespace detail /** * @copydoc cudf::test::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col) { return detail::to_strings(col); } /** * @copydoc cudf::test::to_string(cudf::column_view, std::string) */ std::string to_string(cudf::column_view const& col, std::string const& delimiter) { return detail::to_string(col, delimiter); } /** * @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type) */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size) { return detail::to_string(null_mask, null_mask_size); } /** * @copydoc cudf::test::print */ void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter) { os << to_string(col, delimiter) << std::endl; } /** * @copydoc cudf::test::validate_host_masks */ bool validate_host_masks(std::vector<bitmask_type> const& expected_mask, std::vector<bitmask_type> const& got_mask, size_type number_of_elements) { return std::all_of(thrust::make_counting_iterator(0), thrust::make_counting_iterator(number_of_elements), [&expected_mask, &got_mask](auto index) { return cudf::bit_is_set(expected_mask.data(), index) == cudf::bit_is_set(got_mask.data(), index); }); } } // namespace test } // namespace cudf
e2e3d673f9480b2ff9367fab414c7a76beb0e3a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "orttraining/training_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const T3& alpha, const T3& beta, const T1& lambda, const T3& epsilon, const T3& alpha_correction, const T3& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T3 g_unscaled = T3(T1(g) / g_scale); // A constant in Lamb's equation. const T3 one = T3(1.0f); // Update exponentially-averaged historical gradient const T3 m1_new_tmp = alpha * m1 + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T3 m2_new_tmp = beta * m2 + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T3 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T3 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T2 d_tmp = lambda * w + T1(m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon)); // Things are updated only if the direction is finite. if (_IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, T3 alpha, T3 beta, T1 lambda, T3 epsilon, T1 max_norm, T3 alpha_correction, T3 beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( hipStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, T3 alpha, T3 beta, T1 lambda, T3 epsilon, T1 max_norm, T3 alpha_correction, T3 beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ hipStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ T3 alpha, \ T3 beta, \ T1 lambda, \ T3 epsilon, \ T1 max_norm, \ T3 alpha_correction, \ T3 beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (_IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( hipStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); hipLaunchKernelGGL(( _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ hipStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const T1 lambda, const T3 alpha, const T3 beta, const T3 epsilon, const T1 max_norm, const T3 alpha_correction, const T3 beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( hipStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const T1 lambda, const T3 alpha, const T3 beta, const T3 epsilon, const T1 max_norm, const T3 alpha_correction, const T3 beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorComputeDirectionImpl<T1, T2, T3>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ hipStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const T1 lambda, \ const T3 alpha, \ const T3 beta, \ const T3 epsilon, \ const T1 max_norm, \ const T3 alpha_correction, \ const T3 beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( hipStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; hipLaunchKernelGGL(( LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP>), dim3(block_count), dim3(thread_count), 0, stream, chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ hipStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } sync_range_and_lock.CopyToGpu(); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); hipLaunchKernelGGL(( LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf>), dim3(chunk_group.chunk_count), dim3(thread_count), shared_memory_size, stream, chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(hipStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
e2e3d673f9480b2ff9367fab414c7a76beb0e3a8.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_allocator.h" #include "core/providers/cuda/cuda_common.h" #include "core/providers/cuda/atomic/common.cuh" #include "core/providers/cuda/reduction/reduction_utils.cuh" #include "orttraining/training_ops/cuda/math/isfinite.cuh" #include "orttraining/training_ops/cuda/optimizer/common.h" #include "orttraining/training_ops/cuda/optimizer/common.cuh" #include "orttraining/training_ops/cuda/optimizer/lamb.h" namespace onnxruntime { namespace cuda { template <typename T1, typename T2, typename T3> __device__ __forceinline__ void _LambComputeDirectionRule( const T1& g_scale, const T1& w, const T2& g, const T3& m1, const T3& m2, const T3& alpha, const T3& beta, const T1& lambda, const T3& epsilon, const T3& alpha_correction, const T3& beta_correction, T2& d, T3& m1_new, T3& m2_new) { // Actual gradient. The scale is a product of loss' scale and // global gradient norm (if the norm > 1). const T3 g_unscaled = T3(T1(g) / g_scale); // A constant in Lamb's equation. const T3 one = T3(1.0f); // Update exponentially-averaged historical gradient const T3 m1_new_tmp = alpha * m1 + (one - alpha) * g_unscaled; // Update exponentially-averaged historical squared gradient const T3 m2_new_tmp = beta * m2 + (one - beta) * g_unscaled * g_unscaled; // Compute unbiased 1st-order momentom. // The value alpha_correction is usually (1-alpha^t), // where t is the number of executed training iterations. const T3 m1_new_tmp_corrected = m1_new_tmp / alpha_correction; // Compute unbiased 2nd-order momentom. // The value beta_correction is usually (1-beta^t), // where t is the number of executed training iterations. const T3 m2_new_tmp_corrected = m2_new_tmp / beta_correction; // Save regularized update direction to output. const T2 d_tmp = lambda * w + T1(m1_new_tmp_corrected / (_Sqrt(m2_new_tmp_corrected) + epsilon)); // Things are updated only if the direction is finite. if (_IsFiniteScalar(d_tmp)) { d = d_tmp; m1_new = m1_new_tmp; m2_new = m2_new_tmp; } else { d = T2(0); m1_new = m1; m2_new = m2; } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void _LambComputeDirectionImpl( const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* g_norm, T3 alpha, T3 beta, T1 lambda, T3 epsilon, T1 max_norm, T3 alpha_correction, T3 beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); _LambComputeDirectionRule( scale, weights[id], grads[id], moment_1[id], moment_2[id], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, update_direction[id], moment_1_out[id], moment_2_out[id]); } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambComputeDirection( cudaStream_t stream, const T1* weights, const T2* grads, const T3* moment_1, const T3* moment_2, const T1* loss_scale, const T_GRAD_NORM* grad_norm, T3 alpha, T3 beta, T1 lambda, T3 epsilon, T1 max_norm, T3 alpha_correction, T3 beta_correction, T2* update_direction, T3* moment_1_out, T3* moment_2_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambComputeDirectionImpl<T1, T2, T3, T_GRAD_NORM><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( weights, grads, moment_1, moment_2, loss_scale, grad_norm, alpha, beta, lambda, epsilon, max_norm, alpha_correction, beta_correction, update_direction, moment_1_out, moment_2_out, N); } #define SPECIALIZED_LAMB_COMPUTE_DIRECTION(T1, T2, T3, T_GRAD_NORM) \ template void LambComputeDirection( \ cudaStream_t stream, \ const T1* weights, \ const T2* grads, \ const T3* moment_1, \ const T3* moment_2, \ const T1* loss_scale, \ const T_GRAD_NORM* grad_norm, \ T3 alpha, \ T3 beta, \ T1 lambda, \ T3 epsilon, \ T1 max_norm, \ T3 alpha_correction, \ T3 beta_correction, \ T2* weights_out, \ T3* moment_1_out, \ T3* moment_2_out, \ size_t count); SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, float, float, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(double, double, double, double) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, half, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, half) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, nv_bfloat16, float) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, nv_bfloat16) SPECIALIZED_LAMB_COMPUTE_DIRECTION(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __device__ __forceinline__ void _LambUpdateRule( const T1 eta, const float ratio_min, const float ratio_max, const T2 r_norm, const T2 w_norm, const T2 w, const T3 d, T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { // Confidence coefficeint of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. const T2 delta = -ratio * T2(d); const T2 w_new_tmp = w + delta; if (_IsFiniteScalar(w_new_tmp)) { if (g_new) { *g_new = T3(delta); } if (w_new) { *w_new = w_new_tmp; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w_new_tmp); } } } else { if (g_new) { *g_new = T3(0); } if (w_new) { *w_new = w; if (w_mixed_precision_new) { *w_mixed_precision_new = T_MIXED_PRECISION_FP(w); } } } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void _LambUpdateImpl( const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, CUDA_LONG N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, weights[id], update_direction[id], weights_out != nullptr ? weights_out + id : nullptr, gradients_out != nullptr ? gradients_out + id : nullptr, mixed_precision_weights_out != nullptr ? mixed_precision_weights_out + id : nullptr); } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambUpdate( cudaStream_t stream, const T1* eta, const float ratio_min, const float ratio_max, const T2* r_norm, const T2* w_norm, const T2* weights, const T3* update_direction, T2* weights_out, T3* gradients_out, T_MIXED_PRECISION_FP* mixed_precision_weights_out, size_t count) { int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock)); CUDA_LONG N = static_cast<CUDA_LONG>(count); _LambUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( eta, ratio_min, ratio_max, r_norm, w_norm, weights, update_direction, weights_out, gradients_out, mixed_precision_weights_out, N); } #define INSTANTIATE_LAMB_UPDATE(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambUpdate( \ cudaStream_t stream, \ const T1* eta, \ const float ratio_min, \ const float ratio_max, \ const T2* r_norm, \ const T2* w_norm, \ const T2* weights, \ const T3* update_direction, \ T2* weights_out, \ T3* gradients_out, \ T_MIXED_PRECISION_FP* mixed_precision_weights_out, \ size_t count); INSTANTIATE_LAMB_UPDATE(float, float, float, half) INSTANTIATE_LAMB_UPDATE(double, double, double, half) INSTANTIATE_LAMB_UPDATE(half, float, half, half) INSTANTIATE_LAMB_UPDATE(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_UPDATE(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_UPDATE(float, float, nv_bfloat16, nv_bfloat16) #endif template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> __global__ void LambMultiTensorComputeDirectionImpl( ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const T1 lambda, const T3 alpha, const T3 beta, const T3 epsilon, const T1 max_norm, const T3 alpha_correction, const T3 beta_correction) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T1* w = reinterpret_cast<const T1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; T2* g = reinterpret_cast<T2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; const T3* m1 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* m2 = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T3* m1_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start; T3* m2_new = reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start; const T1 scale = _ComputeGradScale<T1, T_GRAD_NORM, T1>(loss_scale, g_norm, max_norm); #pragma unroll for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambComputeDirectionRule( scale, w[i], g[i], m1[i], m2[i], alpha, beta, lambda, epsilon, alpha_correction, beta_correction, g[i], m1_new[i], m2_new[i]); } } template <typename T1, typename T2, typename T3, typename T_GRAD_NORM> void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( cudaStream_t stream, ChunkGroup<6> chunk_group, const T1* loss_scale, const T_GRAD_NORM* g_norm, const T1 lambda, const T3 alpha, const T3 beta, const T3 epsilon, const T1 max_norm, const T3 alpha_correction, const T3 beta_correction) { const int thread_count = ChunkGroup<6>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorComputeDirectionImpl<T1, T2, T3><<<block_count, thread_count, 0, stream>>>( chunk_group, loss_scale, g_norm, lambda, alpha, beta, epsilon, max_norm, alpha_correction, beta_correction); } #define INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(T1, T2, T3, T_GRAD_NORM) \ template void LambMultiTensorComputeDirectionFunctor<T1, T2, T3, T_GRAD_NORM>::operator()( \ cudaStream_t stream, \ ChunkGroup<6> chunk_group, \ const T1* loss_scale, \ const T_GRAD_NORM* g_norm, \ const T1 lambda, \ const T3 alpha, \ const T3 beta, \ const T3 epsilon, \ const T1 max_norm, \ const T3 alpha_correction, \ const T3 beta_correction); INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, float, float, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(double, double, double, double) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, half, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, half) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, half, float, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, nv_bfloat16, float) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16) INSTANTIATE_LAMB_STAGE1_MULTI_TENSOR_FUNCTOR(float, nv_bfloat16, float, float) #endif template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> __global__ void LambMultiTensorUpdateImpl( ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const T2* w_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[0][group_index]); const T2* r_norm = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[1][group_index]); const T2* w = reinterpret_cast<const T2*>(chunk_group.tensor_ptrs[2][group_index]) + chunk_start; const T3* d = reinterpret_cast<const T3*>(chunk_group.tensor_ptrs[3][group_index]) + chunk_start; T2* w_new = chunk_group.tensor_ptrs[4][group_index] != nullptr ? reinterpret_cast<T2*>(chunk_group.tensor_ptrs[4][group_index]) + chunk_start : nullptr; T3* g_new = chunk_group.tensor_ptrs[5][group_index] != nullptr ? reinterpret_cast<T3*>(chunk_group.tensor_ptrs[5][group_index]) + chunk_start : nullptr; T_MIXED_PRECISION_FP* w_mixed_precision_new = chunk_group.tensor_ptrs[6][group_index] != nullptr ? reinterpret_cast<T_MIXED_PRECISION_FP*>(chunk_group.tensor_ptrs[6][group_index]) + chunk_start : nullptr; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x) { _LambUpdateRule( *eta, ratio_min, ratio_max, *r_norm, *w_norm, w[i], d[i], w_new != nullptr ? w_new + i : nullptr, g_new != nullptr ? g_new + i : nullptr, w_mixed_precision_new != nullptr ? w_mixed_precision_new + i : nullptr); } } template <typename T1, typename T2, typename T3, typename T_MIXED_PRECISION_FP> void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( cudaStream_t stream, ChunkGroup<7> chunk_group, const T1* eta, const float ratio_min, const float ratio_max) { const int thread_count = ChunkGroup<7>::thread_count_per_block; const int block_count = chunk_group.chunk_count; LambMultiTensorUpdateImpl<T1, T2, T3, T_MIXED_PRECISION_FP><<<block_count, thread_count, 0, stream>>>( chunk_group, eta, ratio_min, ratio_max); } #define INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(T1, T2, T3, T_MIXED_PRECISION_FP) \ template void LambMultiTensorUpdateFunctor<T1, T2, T3, T_MIXED_PRECISION_FP>::operator()( \ cudaStream_t stream, \ ChunkGroup<7> chunk_group, \ const T1* eta, \ const float ratio_min, \ const float ratio_max); INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(half, float, half, half) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, half, half) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, float, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(double, double, double, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(nv_bfloat16, float, nv_bfloat16, nv_bfloat16) INSTANTIATE_LAMB_MULTI_TENSOR_UPDATE_FUNCTOR(float, float, nv_bfloat16, nv_bfloat16) #endif // w_buffer[i], d_buffer[i] is used to store the squared sum of all elements processed by the i-th block. // sync_range_and_lock is used for a well ordered reduction over blocks spanning the same tensor template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> __launch_bounds__(ChunkGroup<4>::thread_count_per_block) __global__ void LambMultiTensorReductionImpl( ChunkGroup<4> chunk_group, TOut1* w_buffer, TOut2* d_buffer, LambMultiTensorSyncRangeAndLock* sync_range_and_lock) { const int group_index = chunk_group.block_index_to_tensor_group_index[blockIdx.x]; const int tensor_size = chunk_group.tensor_sizes[group_index]; const int chunk_size = chunk_group.chunk_size; const int chunk_start = chunk_group.block_index_to_chunk_start_index[blockIdx.x]; const TIn1* w = reinterpret_cast<const TIn1*>(chunk_group.tensor_ptrs[0][group_index]) + chunk_start; const TIn2* d = reinterpret_cast<const TIn2*>(chunk_group.tensor_ptrs[1][group_index]) + chunk_start; TOut1* w_norm = reinterpret_cast<TOut1*>(chunk_group.tensor_ptrs[2][group_index]); TOut2* d_norm = reinterpret_cast<TOut2*>(chunk_group.tensor_ptrs[3][group_index]); TBuf d_sum = TBuf(0.f); TBuf w_sum = TBuf(0.f); constexpr int load_count_per_thread = 4; for (int i = threadIdx.x; i < chunk_size && i + chunk_start < tensor_size; i += blockDim.x * load_count_per_thread) { #pragma unroll for (int j = 0; j < load_count_per_thread; ++j) { const int index_in_chunk = i + j * blockDim.x; const int index_in_tensor = chunk_start + index_in_chunk; if (index_in_chunk < chunk_size && index_in_tensor < tensor_size) { const TBuf w_element = TBuf(w[index_in_chunk]); const TBuf d_element = TBuf(d[index_in_chunk]); w_sum += w_element * w_element; d_sum += d_element * d_element; } } } // Thread count in a block must be a multiple of GPU_WARP_SIZE. #pragma unroll for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) { w_sum += WARP_SHFL_DOWN(w_sum, stride); d_sum += WARP_SHFL_DOWN(d_sum, stride); } const int warp_count_in_block = blockDim.x / GPU_WARP_SIZE; const int lid = threadIdx.x % GPU_WARP_SIZE; const int wid = threadIdx.x / GPU_WARP_SIZE; // Shape is 2 x warp_count_in_block. extern __shared__ unsigned char shared_memory_[]; TBuf* shared_memory = reinterpret_cast<TBuf*>(shared_memory_); TBuf* w_shared_memory_ = shared_memory; TBuf* d_shared_memory_ = shared_memory + warp_count_in_block; if (lid == 0) { w_shared_memory_[wid] = w_sum; d_shared_memory_[wid] = d_sum; } __syncthreads(); #pragma unroll for (int stride = warp_count_in_block / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride) { w_shared_memory_[threadIdx.x] += w_shared_memory_[threadIdx.x + stride]; d_shared_memory_[threadIdx.x] += d_shared_memory_[threadIdx.x + stride]; } __syncthreads(); } // ascertain the range of blocks with the associated tensor // note: if non-ordered reduction is OK, then atomicAdd over blocks could suffice const int leading_block_in_tensor = sync_range_and_lock[group_index].leading_block; const int num_blocks_in_tensor = sync_range_and_lock[group_index].number_blocks; if (num_blocks_in_tensor == 1) { if (threadIdx.x == 0) { *w_norm = TOut1(w_shared_memory_[0]); *d_norm = TOut2(d_shared_memory_[0]); } return; } if (threadIdx.x == 0) { w_buffer[blockIdx.x] = w_shared_memory_[0]; d_buffer[blockIdx.x] = d_shared_memory_[0]; } __threadfence(); __syncthreads(); // use lock to determine if this is last block for given tensor __shared__ bool is_last_block_done; if (threadIdx.x == 0) { int* p_lock = &sync_range_and_lock[group_index].completed_blocks; int counter = atomicAdd(p_lock, 1); is_last_block_done = (counter == num_blocks_in_tensor - 1); } __syncthreads(); // only last block to finish for associated tensor enters below if (is_last_block_done) { const int pow2_bound = least_pow2_bound(num_blocks_in_tensor); int blockid = leading_block_in_tensor + threadIdx.x; for (int stride = pow2_bound / 2; stride > 0; stride /= 2) { if (threadIdx.x < stride && threadIdx.x + stride < num_blocks_in_tensor) { w_buffer[blockid] += w_buffer[blockid + stride]; d_buffer[blockid] += d_buffer[blockid + stride]; } __syncthreads(); } if (threadIdx.x == 0) { *w_norm = TOut1(w_buffer[leading_block_in_tensor]); *d_norm = TOut2(d_buffer[leading_block_in_tensor]); } } } CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_range_and_lock(ChunkGroup<4> chunk_group, const CudaKernel& kernel) { const int num_blocks = chunk_group.chunk_count; // sync_range_and_lock is a struct consisting of (start_block, num_blocks, lock) for each tensor // Note: Adding such info to chunk group causes overflow (unless max tensors is reduced) const int max_tensors = ChunkGroup<4>::max_tensor_group_count; LambMultiTensorSyncRangeAndLock initial = {0, 0, 0}; CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> sync_range_and_lock(&kernel, initial, max_tensors); for (int block_index = num_blocks - 1; block_index >= 0; block_index--) { int tensor_index = chunk_group.block_index_to_tensor_group_index[block_index]; auto& tensor_block_span = sync_range_and_lock.CpuPtr()[tensor_index]; tensor_block_span.leading_block = block_index; tensor_block_span.number_blocks++; } sync_range_and_lock.CopyToGpu(); return sync_range_and_lock; } template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf> void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size) { // thread count per block. constexpr int thread_count = ChunkGroup<4>::thread_count_per_block; // shared memory's size per block. const int shared_memory_size = thread_count / GPU_WARP_SIZE * 2 * sizeof(TBuf); // Enforce assumptions used inside this reduction CUDA kernel. ORT_ENFORCE(thread_count % GPU_WARP_SIZE == 0); ORT_ENFORCE((thread_count & (thread_count - 1)) == 0); const int num_blocks = chunk_group.chunk_count; const size_t w_buffer_size = num_blocks * sizeof(TOut1); const size_t d_buffer_size = num_blocks * sizeof(TOut2); ORT_ENFORCE(w_buffer_size + d_buffer_size <= reduction_buffer_size); TOut1* w_buffer = reinterpret_cast<TOut1*>(reduction_buffer); TOut2* d_buffer = reinterpret_cast<TOut2*>(w_buffer + num_blocks); auto sync_range_and_lock = compute_tensor_range_and_lock(chunk_group, kernel); LambMultiTensorReductionImpl<TIn1, TIn2, TOut1, TOut2, TBuf><<<chunk_group.chunk_count, thread_count, shared_memory_size, stream>>>( chunk_group, w_buffer, d_buffer, sync_range_and_lock.GpuPtr()); } #define INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(TIn1, TIn2, TOut1, TOut2, TBuf) \ template void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(cudaStream_t stream, ChunkGroup<4> chunk_group, const CudaKernel& kernel, void* reduction_buffer, size_t reduction_buffer_size); INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, float, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(double, double, double, double, double) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, half, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, half, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(half, half, half, half, float) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, nv_bfloat16, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(float, nv_bfloat16, float, float, float) INSTANTIATE_LAMB_MULTI_TENSOR_REDUCTION_FUNCTOR(nv_bfloat16, nv_bfloat16, nv_bfloat16, nv_bfloat16, float) #endif } // namespace cuda } // namespace onnxruntime
2f496acb369f8bcde813d04c449386efd9a05331.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <iostream> #include "matrixmul.h" #define BLOCK_SIZE 32 #define STRIDE_SIZE 32 void checkValidation(float *_A, float *_B, float *_C); __global__ void matmul(float *_A, float *_B, float *_C); int main(){ float *hA, *hB, *hC; // time hipEvent_t start, stop; float gpu_time; hipEventCreate(&start); hipEventCreate(&stop); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(WB/BLOCK_SIZE+1, HA/BLOCK_SIZE+1); // allocate mem hA = (float*)malloc(WA*HA*sizeof(float)); hB = (float*)malloc(WB*HB*sizeof(float)); hC = (float*)malloc(WB*HA*sizeof(float)); hipMalloc((void **)&A, WA*HA*sizeof(float)); hipMalloc((void **)&B, WB*HB*sizeof(float)); hipMalloc((void **)&C, WB*HA*sizeof(float)); // init value initMatrix(hA, WA, HA); initMatrix(hB, WB, HB); hipMemcpy(A, hA, WA*HA*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(B, hB, WB*HB*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(C, hC, WB*HA*sizeof(float), hipMemcpyHostToDevice); // check if there is no matching dim of matrix mul if (WA != HB){ hipFree(A); hipFree(B); hipFree(C); free(hA); free(hB); free(hC); exit(0); return 0; } hipEventRecord(start); // compute and record for (unsigned int iter = 0; iter < MAX_ITER ; iter++) hipLaunchKernelGGL(( matmul), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&gpu_time, start, stop); printf("GPU time = %f s\n", gpu_time*0.001/MAX_ITER); hipMemcpy(hA, A, WA*HA*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hB, B, WB*HB*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(hC, C, WB*HA*sizeof(float), hipMemcpyDeviceToHost); checkValidation(hA, hB, hC); // printf("Matrix A =\n"); // printMatrix(hA, WA, HA); // printf("Matrix B =\n"); // printMatrix(hB, WB, HB); // printf("Matrix C =\n"); // printMatrix(hC, WB, HA); hipFree(A); hipFree(B); hipFree(C); free(hA); free(hB); free(hC); return 0; } void initMatrix(float *_M, int _W, int _H){ srand(time(NULL)); for (unsigned int h=0; h<_H;h++){ for (unsigned int w=0; w<_W; w++){ _M[w+h*_W] = (float)rand()/ (float)RAND_MAX; } } } void printMatrix(float *_M, int _W, int _H){ for (unsigned int h=0; h<_H;h++){ printf("%d|\t", h); for (unsigned int w=0; w<_W; w++){ printf("%f\t", _M[w+h*_W]); } printf("|\n"); } } __global__ void matmul(float *_A, float *_B, float *_C){ __shared__ float _Asub[STRIDE_SIZE][STRIDE_SIZE]; __shared__ float _Bsub[STRIDE_SIZE][STRIDE_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < WB && j < HA){ _Asub[threadIdx.y][threadIdx.x] = _A[j*STRIDE_SIZE+threadIdx.x]; _Bsub[threadIdx.y][threadIdx.x] = _B[threadIdx.y*WB+i]; __syncthreads(); float sumoverpad = 0.0f; for (unsigned int dmmy = 0; dmmy < STRIDE_SIZE; dmmy++){ sumoverpad += _Asub[threadIdx.y][dmmy]*_Bsub[dmmy][threadIdx.x]; } _C[i+j*WB] = sumoverpad; } } void checkValidation(float *_A, float *_B, float *_C){ try{ for (unsigned int j=0; j<HA;j++){ for (unsigned int i=0; i<WB; i++){ float C_cpu_ij = 0.0f; for (unsigned int dmmy=0; dmmy<HB; dmmy++){ C_cpu_ij += _A[dmmy+j*WA]*_B[i+dmmy*WB]; // Have to swop } if (_C[i+j*WB] - C_cpu_ij > 0.001f && _C[i+j*WB] - C_cpu_ij < 0.001f ){ throw "Wrong"; } } } std::cout << "The matrix is right" << std::endl; } catch (const char* msg) { std::cerr << msg << std::endl; } }
2f496acb369f8bcde813d04c449386efd9a05331.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <iostream> #include "matrixmul.h" #define BLOCK_SIZE 32 #define STRIDE_SIZE 32 void checkValidation(float *_A, float *_B, float *_C); __global__ void matmul(float *_A, float *_B, float *_C); int main(){ float *hA, *hB, *hC; // time cudaEvent_t start, stop; float gpu_time; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(WB/BLOCK_SIZE+1, HA/BLOCK_SIZE+1); // allocate mem hA = (float*)malloc(WA*HA*sizeof(float)); hB = (float*)malloc(WB*HB*sizeof(float)); hC = (float*)malloc(WB*HA*sizeof(float)); cudaMalloc((void **)&A, WA*HA*sizeof(float)); cudaMalloc((void **)&B, WB*HB*sizeof(float)); cudaMalloc((void **)&C, WB*HA*sizeof(float)); // init value initMatrix(hA, WA, HA); initMatrix(hB, WB, HB); cudaMemcpy(A, hA, WA*HA*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B, hB, WB*HB*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(C, hC, WB*HA*sizeof(float), cudaMemcpyHostToDevice); // check if there is no matching dim of matrix mul if (WA != HB){ cudaFree(A); cudaFree(B); cudaFree(C); free(hA); free(hB); free(hC); exit(0); return 0; } cudaEventRecord(start); // compute and record for (unsigned int iter = 0; iter < MAX_ITER ; iter++) matmul<<<dimGrid, dimBlock>>>(A, B, C); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_time, start, stop); printf("GPU time = %f s\n", gpu_time*0.001/MAX_ITER); cudaMemcpy(hA, A, WA*HA*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hB, B, WB*HB*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hC, C, WB*HA*sizeof(float), cudaMemcpyDeviceToHost); checkValidation(hA, hB, hC); // printf("Matrix A =\n"); // printMatrix(hA, WA, HA); // printf("Matrix B =\n"); // printMatrix(hB, WB, HB); // printf("Matrix C =\n"); // printMatrix(hC, WB, HA); cudaFree(A); cudaFree(B); cudaFree(C); free(hA); free(hB); free(hC); return 0; } void initMatrix(float *_M, int _W, int _H){ srand(time(NULL)); for (unsigned int h=0; h<_H;h++){ for (unsigned int w=0; w<_W; w++){ _M[w+h*_W] = (float)rand()/ (float)RAND_MAX; } } } void printMatrix(float *_M, int _W, int _H){ for (unsigned int h=0; h<_H;h++){ printf("%d|\t", h); for (unsigned int w=0; w<_W; w++){ printf("%f\t", _M[w+h*_W]); } printf("|\n"); } } __global__ void matmul(float *_A, float *_B, float *_C){ __shared__ float _Asub[STRIDE_SIZE][STRIDE_SIZE]; __shared__ float _Bsub[STRIDE_SIZE][STRIDE_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < WB && j < HA){ _Asub[threadIdx.y][threadIdx.x] = _A[j*STRIDE_SIZE+threadIdx.x]; _Bsub[threadIdx.y][threadIdx.x] = _B[threadIdx.y*WB+i]; __syncthreads(); float sumoverpad = 0.0f; for (unsigned int dmmy = 0; dmmy < STRIDE_SIZE; dmmy++){ sumoverpad += _Asub[threadIdx.y][dmmy]*_Bsub[dmmy][threadIdx.x]; } _C[i+j*WB] = sumoverpad; } } void checkValidation(float *_A, float *_B, float *_C){ try{ for (unsigned int j=0; j<HA;j++){ for (unsigned int i=0; i<WB; i++){ float C_cpu_ij = 0.0f; for (unsigned int dmmy=0; dmmy<HB; dmmy++){ C_cpu_ij += _A[dmmy+j*WA]*_B[i+dmmy*WB]; // Have to swop } if (_C[i+j*WB] - C_cpu_ij > 0.001f && _C[i+j*WB] - C_cpu_ij < 0.001f ){ throw "Wrong"; } } } std::cout << "The matrix is right" << std::endl; } catch (const char* msg) { std::cerr << msg << std::endl; } }
bc5322cd6f2e6dac2473d902e73d270a9b0a65c6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <algorithm> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #include "Common.cuh" ////////////////////////////// // forward ////////////////////////////// __global__ void kernal_fp32_RealToBinary_Forward( const float* x_buf, float* y_buf, float th_offset, float th_step, int modulation_size, int node_size, int x_frame_size, int x_frame_stride, int y_frame_stride ) { int x_frame = blockDim.x * blockIdx.x + threadIdx.x; int node = blockDim.y * blockIdx.y + threadIdx.y; float const *x_ptr = &x_buf[node * x_frame_stride]; float *y_ptr = &y_buf[node * y_frame_stride]; if ( x_frame < x_frame_size && node < node_size) { float x = x_ptr[x_frame]; int y_frame = x_frame * modulation_size; float th = th_offset; for ( int i = 0; i < modulation_size; ++i ) { y_ptr[y_frame + i] = (x > th) ? 1.0 : 0.0; th += th_step; } } } BBCU_DLL_EXPORT int bbcu_fp32_RealToBinary_Forward ( float const *dev_x_buf, float *dev_y_buf, float th_offset, float th_step, int modulation_size, int node_size, int x_frame_size, int x_frame_stride, int y_frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 1024; unsigned int const MAX_FRAME_UNIT = 1024; unsigned int const MAX_NODE_UNIT = 1024; #if 1 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= x_frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= x_frame_size ) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= node_size ) { block.x /= 2; } #endif block.x = ::min(block.x, MAX_FRAME_UNIT); block.y = ::min(block.y, MAX_NODE_UNIT); dim3 grid((x_frame_size + (block.x - 1)) / block.x, (node_size + (block.y - 1)) / block.y); hipLaunchKernelGGL(( kernal_fp32_RealToBinary_Forward), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, th_offset, th_step, modulation_size, node_size, x_frame_size, x_frame_stride, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////// template <int MAX_NODE_UNIT> __global__ void kernal_fp32_bit_no_modulation_RealToBinary_Forward ( float const *x_buf, int *y_buf, float th, int node_size, int frame_size, int x_frame_stride, int y_frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; int node = blockDim.y * blockIdx.y + threadIdx.y; int unit_id = ((threadIdx.y * blockDim.x + threadIdx.x) >> 5); __shared__ int sbuf[MAX_NODE_UNIT][32]; float const *x_ptr = &x_buf[node * x_frame_stride]; int *y_ptr = &y_buf[node * y_frame_stride]; int bit = (frame & 0x1f); int unit = (frame >> 5); int y = 0; if ( frame < frame_size && node < node_size) { float x = x_ptr[frame]; y = (x > th) ? (1 << bit) : 0; } y = device_int_LocalOr(y, bit, sbuf[unit_id]); if ( frame < frame_size && node < node_size && bit == 0 ) { y_ptr[unit] = y; } } BBCU_DLL_EXPORT int bbcu_fp32_bit_no_modulation_RealToBinary_Forward ( float const *dev_x_buf, int *dev_y_buf, float th, int node_size, int frame_size, int x_frame_stride, int y_frame_stride, hipStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 1024; unsigned int const MAX_FRAME_UNIT = 1024; unsigned int const MIN_FRAME_UNIT = 32; unsigned int const MAX_NODE_UNIT = 32; #if 1 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size && block.x > MIN_FRAME_UNIT ){ block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size && block.x > MIN_FRAME_UNIT ) { block.x /= 2; } #endif block.x = ::min(block.x, MAX_FRAME_UNIT); block.y = ::min(block.y, MAX_NODE_UNIT); dim3 grid((frame_size + (block.x - 1)) / block.x, (node_size + (block.y - 1)) / block.y); hipLaunchKernelGGL(( kernal_fp32_bit_no_modulation_RealToBinary_Forward<MAX_NODE_UNIT>), dim3(grid), dim3(block), 0, streamId, dev_x_buf, dev_y_buf, th, node_size, frame_size, x_frame_stride, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
bc5322cd6f2e6dac2473d902e73d270a9b0a65c6.cu
#include <iostream> #include <algorithm> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bbcu/bbcu.h" #include "bbcu/bbcu_util.h" #include "Common.cuh" ////////////////////////////// // forward ////////////////////////////// __global__ void kernal_fp32_RealToBinary_Forward( const float* x_buf, float* y_buf, float th_offset, float th_step, int modulation_size, int node_size, int x_frame_size, int x_frame_stride, int y_frame_stride ) { int x_frame = blockDim.x * blockIdx.x + threadIdx.x; int node = blockDim.y * blockIdx.y + threadIdx.y; float const *x_ptr = &x_buf[node * x_frame_stride]; float *y_ptr = &y_buf[node * y_frame_stride]; if ( x_frame < x_frame_size && node < node_size) { float x = x_ptr[x_frame]; int y_frame = x_frame * modulation_size; float th = th_offset; for ( int i = 0; i < modulation_size; ++i ) { y_ptr[y_frame + i] = (x > th) ? 1.0 : 0.0; th += th_step; } } } BBCU_DLL_EXPORT int bbcu_fp32_RealToBinary_Forward ( float const *dev_x_buf, float *dev_y_buf, float th_offset, float th_step, int modulation_size, int node_size, int x_frame_size, int x_frame_stride, int y_frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 1024; unsigned int const MAX_FRAME_UNIT = 1024; unsigned int const MAX_NODE_UNIT = 1024; #if 1 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= x_frame_size ) { block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= x_frame_size ) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= node_size ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid((x_frame_size + (block.x - 1)) / block.x, (node_size + (block.y - 1)) / block.y); kernal_fp32_RealToBinary_Forward<<<grid, block, 0, streamId>>>( dev_x_buf, dev_y_buf, th_offset, th_step, modulation_size, node_size, x_frame_size, x_frame_stride, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } ////////////////// template <int MAX_NODE_UNIT> __global__ void kernal_fp32_bit_no_modulation_RealToBinary_Forward ( float const *x_buf, int *y_buf, float th, int node_size, int frame_size, int x_frame_stride, int y_frame_stride ) { int frame = blockDim.x * blockIdx.x + threadIdx.x; int node = blockDim.y * blockIdx.y + threadIdx.y; int unit_id = ((threadIdx.y * blockDim.x + threadIdx.x) >> 5); __shared__ int sbuf[MAX_NODE_UNIT][32]; float const *x_ptr = &x_buf[node * x_frame_stride]; int *y_ptr = &y_buf[node * y_frame_stride]; int bit = (frame & 0x1f); int unit = (frame >> 5); int y = 0; if ( frame < frame_size && node < node_size) { float x = x_ptr[frame]; y = (x > th) ? (1 << bit) : 0; } y = device_int_LocalOr(y, bit, sbuf[unit_id]); if ( frame < frame_size && node < node_size && bit == 0 ) { y_ptr[unit] = y; } } BBCU_DLL_EXPORT int bbcu_fp32_bit_no_modulation_RealToBinary_Forward ( float const *dev_x_buf, int *dev_y_buf, float th, int node_size, int frame_size, int x_frame_stride, int y_frame_stride, cudaStream_t streamId ) { BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable()); unsigned int const THREAD_SIZE = 1024; unsigned int const MAX_FRAME_UNIT = 1024; unsigned int const MIN_FRAME_UNIT = 32; unsigned int const MAX_NODE_UNIT = 32; #if 1 dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT); while ( (int)block.x / 2 >= frame_size && block.x > MIN_FRAME_UNIT ){ block.x /= 2; block.y *= 2; } while ( (int)block.y / 2 >= node_size ) { block.y /= 2; } #else dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT); while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;} while ( (int)block.x / 2 >= frame_size && block.x > MIN_FRAME_UNIT ) { block.x /= 2; } #endif block.x = std::min(block.x, MAX_FRAME_UNIT); block.y = std::min(block.y, MAX_NODE_UNIT); dim3 grid((frame_size + (block.x - 1)) / block.x, (node_size + (block.y - 1)) / block.y); kernal_fp32_bit_no_modulation_RealToBinary_Forward<MAX_NODE_UNIT><<<grid, block, 0, streamId>>>( dev_x_buf, dev_y_buf, th, node_size, frame_size, x_frame_stride, y_frame_stride ); BB_CUDA_CHECK_LAST_ERROR(); return 0; } // end of file
f505ee086f198ed13a1300759417ddaa79757e93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/opencv.hpp> #include <vector> __global__ void erode ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) { auto op1 = blockIdx.x * (blockDim.x) + threadIdx.x; auto op2 = blockIdx.y * (blockDim.y) + threadIdx.y; if ( op1 > 0 && op1 < (cols - 1) && op2 > 0 && op2 < (rows - 1)) { for (auto c = 0; c < 3; ++c){ auto op3 = umax(data[((op2 - 1) * cols + op1 - 1) * 3 + c], data[((op2 - 1) * cols + op1 + 1) * 3 + c]); op3 = umax(op3, data[(( op2 - 1) * cols + op1) * 3 + c]); auto op4 = umax(data[( op2 * cols + op1 - 1) * 3 + c], data[( op2 * cols + op1 + 1) * 3 + c]); op4 = umax(op4, data[( op2 * cols + op1) * 3 + c]); auto op5 = umax(data[((op2 + 1) * cols + op1 - 1) * 3 + c], data[((op2 + 1) * cols + op1 + 1) * 3 + c]); op5 = umax(op5, data[((op2 + 1) * cols + op1) * 3 + c]); auto res = umax (op3, op4); res = umax (res, op5); out[(op2 * cols + op1) * 3 + c] = res; } } } int main() { cv::Mat img_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rows = img_in.rows; auto cols = img_in.cols; auto rgb = img_in.data; std::vector< unsigned char > g( 3 * rows * cols ); cv::Mat img_out( rows, cols, CV_8UC3, g.data() ); unsigned char * rgb_d; unsigned char * out; std::size_t size = 3 * img_in.cols * img_in.rows; hipMalloc( &rgb_d, 3 * rows * cols); hipMalloc( &out, 3 * rows * cols ); hipStream_t streams[ 2 ]; hipStreamCreate( &streams[ 0 ] ); hipStreamCreate( &streams[ 1 ] ); hipMemcpyAsync( rgb_d, rgb, size/2, hipMemcpyHostToDevice, streams[ 0 ] ); hipMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, hipMemcpyHostToDevice, streams[ 1 ] ); dim3 dim1( 32, 32 ); dim3 dim2( 3 * (( cols ) / ((dim1.x - 2) + 1) ), (( rows ) / ((dim1.y - 2) + 1) )); hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start ); hipLaunchKernelGGL(( erode), dim3(dim2), dim3(dim1), 0, streams[ 0 ] , rgb_d, out, cols, rows / 2 + 2); hipLaunchKernelGGL(( erode), dim3(dim2), dim3(dim1), 0, streams[ 1 ] , rgb_d+size/2, out+size/2, cols, rows / 2); hipMemcpyAsync( g.data(), out, size/2, hipMemcpyDeviceToHost, streams[ 0 ] ); hipMemcpyAsync( g.data()+size/2, out+size/2, size/2, hipMemcpyDeviceToHost, streams[ 1 ] ); hipDeviceSynchronize(); hipStreamDestroy(streams[0]); hipStreamDestroy(streams[1]); auto hipError_t = hipGetLastError(); if (hipError_t != hipSuccess){ std::cout << hipGetErrorName(hipError_t) << std::endl; std::cout << hipGetErrorString(hipError_t) << std::endl; } else { std::cout << "No Errors!" << std::endl; } hipEventRecord( stop ); hipEventSynchronize( stop ); float duration = 0.0f; hipEventElapsedTime( &duration, start, stop ); std::cout << "Total: " << duration << "ms\n"; cv::imwrite( "ErodeStreamOutput.jpg", img_out ); hipFree( rgb_d); hipFree ( out); return 0; }
f505ee086f198ed13a1300759417ddaa79757e93.cu
#include <opencv2/opencv.hpp> #include <vector> __global__ void erode ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) { auto op1 = blockIdx.x * (blockDim.x) + threadIdx.x; auto op2 = blockIdx.y * (blockDim.y) + threadIdx.y; if ( op1 > 0 && op1 < (cols - 1) && op2 > 0 && op2 < (rows - 1)) { for (auto c = 0; c < 3; ++c){ auto op3 = umax(data[((op2 - 1) * cols + op1 - 1) * 3 + c], data[((op2 - 1) * cols + op1 + 1) * 3 + c]); op3 = umax(op3, data[(( op2 - 1) * cols + op1) * 3 + c]); auto op4 = umax(data[( op2 * cols + op1 - 1) * 3 + c], data[( op2 * cols + op1 + 1) * 3 + c]); op4 = umax(op4, data[( op2 * cols + op1) * 3 + c]); auto op5 = umax(data[((op2 + 1) * cols + op1 - 1) * 3 + c], data[((op2 + 1) * cols + op1 + 1) * 3 + c]); op5 = umax(op5, data[((op2 + 1) * cols + op1) * 3 + c]); auto res = umax (op3, op4); res = umax (res, op5); out[(op2 * cols + op1) * 3 + c] = res; } } } int main() { cv::Mat img_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rows = img_in.rows; auto cols = img_in.cols; auto rgb = img_in.data; std::vector< unsigned char > g( 3 * rows * cols ); cv::Mat img_out( rows, cols, CV_8UC3, g.data() ); unsigned char * rgb_d; unsigned char * out; std::size_t size = 3 * img_in.cols * img_in.rows; cudaMalloc( &rgb_d, 3 * rows * cols); cudaMalloc( &out, 3 * rows * cols ); cudaStream_t streams[ 2 ]; cudaStreamCreate( &streams[ 0 ] ); cudaStreamCreate( &streams[ 1 ] ); cudaMemcpyAsync( rgb_d, rgb, size/2, cudaMemcpyHostToDevice, streams[ 0 ] ); cudaMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, cudaMemcpyHostToDevice, streams[ 1 ] ); dim3 dim1( 32, 32 ); dim3 dim2( 3 * (( cols ) / ((dim1.x - 2) + 1) ), (( rows ) / ((dim1.y - 2) + 1) )); cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start ); erode<<< dim2, dim1, 0, streams[ 0 ] >>>( rgb_d, out, cols, rows / 2 + 2); erode<<< dim2, dim1, 0, streams[ 1 ] >>>( rgb_d+size/2, out+size/2, cols, rows / 2); cudaMemcpyAsync( g.data(), out, size/2, cudaMemcpyDeviceToHost, streams[ 0 ] ); cudaMemcpyAsync( g.data()+size/2, out+size/2, size/2, cudaMemcpyDeviceToHost, streams[ 1 ] ); cudaDeviceSynchronize(); cudaStreamDestroy(streams[0]); cudaStreamDestroy(streams[1]); auto cudaError = cudaGetLastError(); if (cudaError != cudaSuccess){ std::cout << cudaGetErrorName(cudaError) << std::endl; std::cout << cudaGetErrorString(cudaError) << std::endl; } else { std::cout << "No Errors!" << std::endl; } cudaEventRecord( stop ); cudaEventSynchronize( stop ); float duration = 0.0f; cudaEventElapsedTime( &duration, start, stop ); std::cout << "Total: " << duration << "ms\n"; cv::imwrite( "ErodeStreamOutput.jpg", img_out ); cudaFree( rgb_d); cudaFree ( out); return 0; }
44823d6c669593609b25f6103776f0440be9bda9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ParallelSPSS.Form1 extern "C" __global__ void addVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N); // ParallelSPSS.Form1 extern "C" __global__ void powerVector( float* a, int aLen0, float* b, int bLen0, float c, int N); // ParallelSPSS.Form1 extern "C" __global__ void multiplyVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N); // ParallelSPSS.Form1 extern "C" __global__ void addVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x) { c[(i)] = a[(i)] + b[(i)]; } } // ParallelSPSS.Form1 extern "C" __global__ void powerVector( float* a, int aLen0, float* b, int bLen0, float c, int N) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x) { b[(i)] = (a[(i)] - c) * (a[(i)] - c); } } // ParallelSPSS.Form1 extern "C" __global__ void multiplyVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x) { c[(i)] = a[(i)] * b[(i)]; } }
44823d6c669593609b25f6103776f0440be9bda9.cu
// ParallelSPSS.Form1 extern "C" __global__ void addVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N); // ParallelSPSS.Form1 extern "C" __global__ void powerVector( float* a, int aLen0, float* b, int bLen0, float c, int N); // ParallelSPSS.Form1 extern "C" __global__ void multiplyVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N); // ParallelSPSS.Form1 extern "C" __global__ void addVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x) { c[(i)] = a[(i)] + b[(i)]; } } // ParallelSPSS.Form1 extern "C" __global__ void powerVector( float* a, int aLen0, float* b, int bLen0, float c, int N) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x) { b[(i)] = (a[(i)] - c) * (a[(i)] - c); } } // ParallelSPSS.Form1 extern "C" __global__ void multiplyVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x) { c[(i)] = a[(i)] * b[(i)]; } }
e06422a3d94d899c048436201268d78f2243050c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * CLionVS AMD64 * CMakeLists find_package(CUDA) find_package(CUDA REQUIRED) */ #include <iostream> #include <math.h> #include "timer.h" // kernelgridblock GPU __global__ void add_one_cuda(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // device hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // kernel hipLaunchKernelGGL(( add_one_cuda), dim3(1), dim3(1), 0, 0, N, x, y); // devicehostdevicehost hipDeviceSynchronize(); // float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // device hipFree(x); hipFree(y); return 0; }
e06422a3d94d899c048436201268d78f2243050c.cu
/* * CLion使用VS作为编译器 平台设置为AMD64 * CMakeLists 添加find_package(CUDA) find_package(CUDA REQUIRED) */ #include <iostream> #include <math.h> #include "timer.h" // 两个向量加法kernel,grid和block均为一维 只采用一个GPU __global__ void add_one_cuda(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; float *x, *y; // 申请device内存 cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // 申请host内存 for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // 执行kernel add_one_cuda<<<1, 1>>>(N, x, y); // 保证device和host数据同步,这样不用手动将device得到的结果拷贝到host cudaDeviceSynchronize(); // 计算误差 float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // 释放device内存 cudaFree(x); cudaFree(y); return 0; }
b94caee789e64230804de543f73a1fa094d6ddaa.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include "gmock/gmock.h" #include <thrust/device_vector.h> #include <graph.hpp> #include <algorithms.hpp> struct BetweennessCentralityTest : public ::testing::Test { }; TEST_F(BetweennessCentralityTest, SimpleGraph) { std::vector<int> graph_offsets{ { 0, 1, 2, 5, 7, 10, 12, 14 } }; std::vector<int> graph_indices{ { 2, 2, 0, 1, 3, 2, 4, 3, 5, 6, 4, 6, 4, 5 } }; std::vector<float> expected{ {0.0, 0.0, 0.6, 0.6, 0.5333333, 0.0, 0.0 } }; int num_verts = graph_offsets.size() - 1; int num_edges = graph_indices.size(); thrust::device_vector<int> d_graph_offsets(graph_offsets); thrust::device_vector<int> d_graph_indices(graph_indices); thrust::device_vector<float> d_result(num_verts); std::vector<float> result(num_verts); cugraph::experimental::GraphCSR<int,int,float> G(d_graph_offsets.data().get(), d_graph_indices.data().get(), nullptr, num_verts, num_edges); cugraph::betweenness_centrality(G, d_result.data().get()); hipMemcpy(result.data(), d_result.data().get(), sizeof(float) * num_verts, hipMemcpyDeviceToHost); for (int i = 0 ; i < num_verts ; ++i) EXPECT_FLOAT_EQ(result[i], expected[i]); }
b94caee789e64230804de543f73a1fa094d6ddaa.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include "gmock/gmock.h" #include <thrust/device_vector.h> #include <graph.hpp> #include <algorithms.hpp> struct BetweennessCentralityTest : public ::testing::Test { }; TEST_F(BetweennessCentralityTest, SimpleGraph) { std::vector<int> graph_offsets{ { 0, 1, 2, 5, 7, 10, 12, 14 } }; std::vector<int> graph_indices{ { 2, 2, 0, 1, 3, 2, 4, 3, 5, 6, 4, 6, 4, 5 } }; std::vector<float> expected{ {0.0, 0.0, 0.6, 0.6, 0.5333333, 0.0, 0.0 } }; int num_verts = graph_offsets.size() - 1; int num_edges = graph_indices.size(); thrust::device_vector<int> d_graph_offsets(graph_offsets); thrust::device_vector<int> d_graph_indices(graph_indices); thrust::device_vector<float> d_result(num_verts); std::vector<float> result(num_verts); cugraph::experimental::GraphCSR<int,int,float> G(d_graph_offsets.data().get(), d_graph_indices.data().get(), nullptr, num_verts, num_edges); cugraph::betweenness_centrality(G, d_result.data().get()); cudaMemcpy(result.data(), d_result.data().get(), sizeof(float) * num_verts, cudaMemcpyDeviceToHost); for (int i = 0 ; i < num_verts ; ++i) EXPECT_FLOAT_EQ(result[i], expected[i]); }
840dc7131c146ba0d0c8f705bec9f3483897bfc7.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <iomanip> #include <iostream> #include <hip/hip_runtime.h> #include <fstream> #include <string> #include <dtl/dtl.hpp> #include <dtl/env.hpp> #include <dtl/hash.hpp> #include <dtl/thread.hpp> #include <dtl/mem.hpp> #include <dtl/filter/blocked_bloomfilter/zoned_blocked_bloomfilter.hpp> #include "cuda_helper.hpp" #include "util.hpp" #include <hipcub/hipcub.hpp> #include "cuda/blocked_bloomFilter.hpp" //===----------------------------------------------------------------------===// // Typedefs. (cache-sectorized blocked bloom filter) using filter_key_t = $u32; using hash_value_t = $u32; using word_t = $u32; // The first hash function to use inside the block. Note: 0 is used for block addressing static constexpr u32 block_hash_fn_idx = 1; // The block type. template<u32 word_cnt, u32 zone_cnt, u32 k, u1 early_out = false> using bbf_block_t = dtl::multizone_block<filter_key_t, word_t, word_cnt, zone_cnt, k, dtl::hasher, hash_value_t, block_hash_fn_idx, 0, zone_cnt, early_out>; template<u32 word_cnt, u32 zone_cnt, u32 k, dtl::block_addressing addr = dtl::block_addressing::POWER_OF_TWO, u1 early_out = false> using bbf_t = dtl::blocked_bloomfilter_logic<filter_key_t, dtl::hasher, bbf_block_t<word_cnt, zone_cnt, k, early_out>, addr, early_out>; //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// template< std::size_t word_cnt, std::size_t zone_cnt, std::size_t k > void benchmark(const std::size_t m, std::vector<filter_key_t>& to_insert, std::vector<filter_key_t>& to_lookup, std::size_t bits_to_sort, std::ofstream &results) { //===----------------------------------------------------------------------===// // Construct an empty filter with the given size. using filter_t = bbf_t<word_cnt, zone_cnt, k>; using word_t = typename filter_t::word_t; filter_t filter(m); // Allocate memory. dtl::mem::allocator_config alloc_config = dtl::mem::allocator_config::local(); dtl::mem::numa_allocator<word_t> allocator(alloc_config); using filter_data_t = std::vector<word_t, dtl::mem::numa_allocator<word_t>>; filter_data_t filter_data(filter.word_cnt() + 1024, 0, allocator); // + x to avoid buffer overrun bug in CF // Build Filter by inserting keys. std::size_t n = 0; for (std::size_t i = 0; i < to_insert.size(); ++i) { const auto key = to_insert[i]; filter.insert(&filter_data[0], key); auto hash_key = filter.hash(key); if (!filter.contains_with_hash(&filter_data[0], hash_key, key)) { break; } else { n++; } } // validation (scalar code) if (n == 0) { std::cerr << "Empty filter?!" << std::endl; std::exit(1); } std::size_t matches = 0, matches_naive = 0; for(std::size_t i = 0; i != to_lookup.size(); ++i) { const auto key = to_lookup[i]; const auto hash_val = filter.hash(key); if (filter.contains(&filter_data[0], key)) { matches_naive++; } if(filter.contains_with_hash(&filter_data[0], hash_val, key)) { matches++; } } assert(matches == matches_naive); // CUDA filter cuda_filter<filter_t> cf(filter, &filter_data[0], filter_data.size(),nullptr,0); // probe the filter { std::vector<$u32> result_bitmap; result_bitmap.resize((n), 0); // probe filter typename cuda_filter<filter_t>::perf_data_t perf_data; // TODO (HL) result bitmaps sizing issue with the sorted_kernel!!! cf.contains_clustering(&to_lookup[0], to_lookup.size(), &result_bitmap[0], perf_data, bits_to_sort); std::cout << "=============================== " << '\n' << " Results: " << '\n' << " Word count: " << word_cnt << '\n' << " Block size: " << word_cnt * 4 << '\n' << " Zone count: " << zone_cnt << '\n' << " k: " << k << '\n' << " Bloom filter size(MiB): " << m/(8*1024*1024) << '\n' << " Lookup size: " << to_lookup.size() << '\n' << " Blocks: " << perf_data.cuda_block_cnt << '\n' << " CUDA Block size: " << perf_data.cuda_block_size << '\n' << " Hash throughput: " << perf_data.hash_throughput << '\n' << " Hash time (ms): " << perf_data.hash_time * 1000 << '\n' << " Sort throughput: " << perf_data.sort_throughput << '\n' << " Sort time (ms): " << perf_data.sort_time * 1000 << '\n' << " Probes per second: " << perf_data.probes_per_second << '\n' << " Probe time (ms): " << perf_data.probe_time * 1000 << '\n' << " Candidate List time (ms): " << perf_data.candidate_time * 1000 << '\n' << " Total throughput: " << perf_data.total_throughput << '\n' << "=============================== " << '\n' << std::endl; results << m/(8*1024*1024) << ';'; results << word_cnt * 4 << ';'; results << bits_to_sort << ';'; results << to_insert.size() << ';'; results << perf_data.hash_time * 1000 << ';'; results << perf_data.sort_time * 1000 << ';'; results << perf_data.probe_time * 1000 << ';'; results << perf_data.total_throughput << "\n"; size_t count = 0; for(size_t i = 0; i != to_lookup.size(); ++i ) { if(result_bitmap[i] != 0) { //std::cout << result_bitmap[i] << std::endl; count++; } } std::cout << "possible matches found " << count << " - matches found " << matches << " total " << to_lookup.size() - matches << std::endl; } } //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Main //===----------------------------------------------------------------------===// int main(int argc, char** argv) { std::string result_file("bench_bits.csv"), output_file("output.txt"); for (int i = 1; i < argc; i++) { auto arg = std::string(argv[i]); if (arg.substr(0, 2) != "--") { exit(EXIT_FAILURE); } arg = arg.substr(2); auto p = split_once(arg, '='); auto &arg_name = p.first; auto &arg_value = p.second; if (arg_name.compare("result_file") == 0) { result_file = arg_value; } if (arg_name.compare("output_file") == 0) { output_file = arg_value; } } std::ofstream out(std::string("results/" + output_file)); std::streambuf *coutbuf = std::cout.rdbuf(); //save old buf std::cout.rdbuf(out.rdbuf()); //redirect std::cout to out.txt! std::ofstream results; results.open(std::string("results/" + result_file)); results << "Bloom filter size (MiB); Block size (bytes); bits to sort; Probe size ; Hash time (ms); Sort time (ms); Probe time (ms); Total throughput" << '\n'; //get_device_properties(); //===----------------------------------------------------------------------===// //Benchmark set up auto increment_one = [n = 0]() mutable {return ++n;}; std::vector<size_t> bits_to_sort(32); std::generate(bits_to_sort.begin(), bits_to_sort.end(), increment_one); std::size_t default_m = 1ull * 1024ull * 1024ull * 8ull; // 256MiB auto m = { default_m, default_m * 2, default_m * 4, default_m * 8, default_m * 16, default_m * 32, default_m * 64, default_m * 128, default_m * 256, default_m * 512}; auto input_size = {1ull<<28}; // 10K 100K 1M 10M 100M // Data generation. using key_t = $u32; for(auto to_insert_cnt : input_size){ const std::size_t to_lookup_cnt = to_insert_cnt; std::vector<key_t> to_insert(to_insert_cnt); std::vector<key_t> to_lookup(to_lookup_cnt); for(auto bloom_size : m) { set_random_values(to_insert); set_random_values(to_lookup); auto bf_size = (bloom_size / (8*1024*1024)); for(auto& bits : bits_to_sort) { std::cout << "to_insert.size(): " << to_insert_cnt/1024 << " K-keys" << std::endl; std::cout << "to_lookup.size(): " << to_insert_cnt/1024 << " K-keys" << std::endl; std::cout << "bits to sort: " << bits << " bits" << std::endl; std::cerr << "bits to sort: " << bits << " bits" << std::endl; std::cerr << "Bloom Size: " << bf_size << " MiB" << std::endl; //Register Blocking //benchmark<8, 1, 2>(bloom_size, to_insert, to_lookup, bit); //benchmark<16, 1, 2>(bloom_size, to_insert, to_lookup, bit); benchmark<32, 1, 2>(bloom_size, to_insert, to_lookup, bits, results); //benchmark<64, 1, 2>(bloom_size, to_insert, to_lookup, bit); } } } results.close(); return 0; }
840dc7131c146ba0d0c8f705bec9f3483897bfc7.cu
#include <algorithm> #include <iomanip> #include <iostream> #include <cuda_runtime.h> #include <fstream> #include <string> #include <dtl/dtl.hpp> #include <dtl/env.hpp> #include <dtl/hash.hpp> #include <dtl/thread.hpp> #include <dtl/mem.hpp> #include <dtl/filter/blocked_bloomfilter/zoned_blocked_bloomfilter.hpp> #include "cuda_helper.hpp" #include "util.hpp" #include <cub/device/device_radix_sort.cuh> #include "cuda/blocked_bloomFilter.hpp" //===----------------------------------------------------------------------===// // Typedefs. (cache-sectorized blocked bloom filter) using filter_key_t = $u32; using hash_value_t = $u32; using word_t = $u32; // The first hash function to use inside the block. Note: 0 is used for block addressing static constexpr u32 block_hash_fn_idx = 1; // The block type. template<u32 word_cnt, u32 zone_cnt, u32 k, u1 early_out = false> using bbf_block_t = dtl::multizone_block<filter_key_t, word_t, word_cnt, zone_cnt, k, dtl::hasher, hash_value_t, block_hash_fn_idx, 0, zone_cnt, early_out>; template<u32 word_cnt, u32 zone_cnt, u32 k, dtl::block_addressing addr = dtl::block_addressing::POWER_OF_TWO, u1 early_out = false> using bbf_t = dtl::blocked_bloomfilter_logic<filter_key_t, dtl::hasher, bbf_block_t<word_cnt, zone_cnt, k, early_out>, addr, early_out>; //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// template< std::size_t word_cnt, std::size_t zone_cnt, std::size_t k > void benchmark(const std::size_t m, std::vector<filter_key_t>& to_insert, std::vector<filter_key_t>& to_lookup, std::size_t bits_to_sort, std::ofstream &results) { //===----------------------------------------------------------------------===// // Construct an empty filter with the given size. using filter_t = bbf_t<word_cnt, zone_cnt, k>; using word_t = typename filter_t::word_t; filter_t filter(m); // Allocate memory. dtl::mem::allocator_config alloc_config = dtl::mem::allocator_config::local(); dtl::mem::numa_allocator<word_t> allocator(alloc_config); using filter_data_t = std::vector<word_t, dtl::mem::numa_allocator<word_t>>; filter_data_t filter_data(filter.word_cnt() + 1024, 0, allocator); // + x to avoid buffer overrun bug in CF // Build Filter by inserting keys. std::size_t n = 0; for (std::size_t i = 0; i < to_insert.size(); ++i) { const auto key = to_insert[i]; filter.insert(&filter_data[0], key); auto hash_key = filter.hash(key); if (!filter.contains_with_hash(&filter_data[0], hash_key, key)) { break; } else { n++; } } // validation (scalar code) if (n == 0) { std::cerr << "Empty filter?!" << std::endl; std::exit(1); } std::size_t matches = 0, matches_naive = 0; for(std::size_t i = 0; i != to_lookup.size(); ++i) { const auto key = to_lookup[i]; const auto hash_val = filter.hash(key); if (filter.contains(&filter_data[0], key)) { matches_naive++; } if(filter.contains_with_hash(&filter_data[0], hash_val, key)) { matches++; } } assert(matches == matches_naive); // CUDA filter cuda_filter<filter_t> cf(filter, &filter_data[0], filter_data.size(),nullptr,0); // probe the filter { std::vector<$u32> result_bitmap; result_bitmap.resize((n), 0); // probe filter typename cuda_filter<filter_t>::perf_data_t perf_data; // TODO (HL) result bitmaps sizing issue with the sorted_kernel!!! cf.contains_clustering(&to_lookup[0], to_lookup.size(), &result_bitmap[0], perf_data, bits_to_sort); std::cout << "=============================== " << '\n' << " Results: " << '\n' << " Word count: " << word_cnt << '\n' << " Block size: " << word_cnt * 4 << '\n' << " Zone count: " << zone_cnt << '\n' << " k: " << k << '\n' << " Bloom filter size(MiB): " << m/(8*1024*1024) << '\n' << " Lookup size: " << to_lookup.size() << '\n' << " Blocks: " << perf_data.cuda_block_cnt << '\n' << " CUDA Block size: " << perf_data.cuda_block_size << '\n' << " Hash throughput: " << perf_data.hash_throughput << '\n' << " Hash time (ms): " << perf_data.hash_time * 1000 << '\n' << " Sort throughput: " << perf_data.sort_throughput << '\n' << " Sort time (ms): " << perf_data.sort_time * 1000 << '\n' << " Probes per second: " << perf_data.probes_per_second << '\n' << " Probe time (ms): " << perf_data.probe_time * 1000 << '\n' << " Candidate List time (ms): " << perf_data.candidate_time * 1000 << '\n' << " Total throughput: " << perf_data.total_throughput << '\n' << "=============================== " << '\n' << std::endl; results << m/(8*1024*1024) << ';'; results << word_cnt * 4 << ';'; results << bits_to_sort << ';'; results << to_insert.size() << ';'; results << perf_data.hash_time * 1000 << ';'; results << perf_data.sort_time * 1000 << ';'; results << perf_data.probe_time * 1000 << ';'; results << perf_data.total_throughput << "\n"; size_t count = 0; for(size_t i = 0; i != to_lookup.size(); ++i ) { if(result_bitmap[i] != 0) { //std::cout << result_bitmap[i] << std::endl; count++; } } std::cout << "possible matches found " << count << " - matches found " << matches << " total " << to_lookup.size() - matches << std::endl; } } //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // Main //===----------------------------------------------------------------------===// int main(int argc, char** argv) { std::string result_file("bench_bits.csv"), output_file("output.txt"); for (int i = 1; i < argc; i++) { auto arg = std::string(argv[i]); if (arg.substr(0, 2) != "--") { exit(EXIT_FAILURE); } arg = arg.substr(2); auto p = split_once(arg, '='); auto &arg_name = p.first; auto &arg_value = p.second; if (arg_name.compare("result_file") == 0) { result_file = arg_value; } if (arg_name.compare("output_file") == 0) { output_file = arg_value; } } std::ofstream out(std::string("results/" + output_file)); std::streambuf *coutbuf = std::cout.rdbuf(); //save old buf std::cout.rdbuf(out.rdbuf()); //redirect std::cout to out.txt! std::ofstream results; results.open(std::string("results/" + result_file)); results << "Bloom filter size (MiB); Block size (bytes); bits to sort; Probe size ; Hash time (ms); Sort time (ms); Probe time (ms); Total throughput" << '\n'; //get_device_properties(); //===----------------------------------------------------------------------===// //Benchmark set up auto increment_one = [n = 0]() mutable {return ++n;}; std::vector<size_t> bits_to_sort(32); std::generate(bits_to_sort.begin(), bits_to_sort.end(), increment_one); std::size_t default_m = 1ull * 1024ull * 1024ull * 8ull; // 256MiB auto m = { default_m, default_m * 2, default_m * 4, default_m * 8, default_m * 16, default_m * 32, default_m * 64, default_m * 128, default_m * 256, default_m * 512}; auto input_size = {1ull<<28}; // 10K 100K 1M 10M 100M // Data generation. using key_t = $u32; for(auto to_insert_cnt : input_size){ const std::size_t to_lookup_cnt = to_insert_cnt; std::vector<key_t> to_insert(to_insert_cnt); std::vector<key_t> to_lookup(to_lookup_cnt); for(auto bloom_size : m) { set_random_values(to_insert); set_random_values(to_lookup); auto bf_size = (bloom_size / (8*1024*1024)); for(auto& bits : bits_to_sort) { std::cout << "to_insert.size(): " << to_insert_cnt/1024 << " K-keys" << std::endl; std::cout << "to_lookup.size(): " << to_insert_cnt/1024 << " K-keys" << std::endl; std::cout << "bits to sort: " << bits << " bits" << std::endl; std::cerr << "bits to sort: " << bits << " bits" << std::endl; std::cerr << "Bloom Size: " << bf_size << " MiB" << std::endl; //Register Blocking //benchmark<8, 1, 2>(bloom_size, to_insert, to_lookup, bit); //benchmark<16, 1, 2>(bloom_size, to_insert, to_lookup, bit); benchmark<32, 1, 2>(bloom_size, to_insert, to_lookup, bits, results); //benchmark<64, 1, 2>(bloom_size, to_insert, to_lookup, bit); } } } results.close(); return 0; }
1f7eff7a264667106f439e350ca6f501e9314c43.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include "device_helpers_hip.cuh" namespace xgboost { template <typename T> struct HostDeviceVectorImpl { HostDeviceVectorImpl(size_t size, T v, int device) : device_(device), on_d_(device >= 0) { if (on_d_) { dh::safe_cuda(hipSetDevice(device_)); data_d_.resize(size, v); } else { data_h_.resize(size, v); } } // Init can be std::vector<T> or std::initializer_list<T> template <class Init> HostDeviceVectorImpl(const Init& init, int device) : device_(device), on_d_(device >= 0) { if (on_d_) { dh::safe_cuda(hipSetDevice(device_)); data_d_.resize(init.size()); thrust::copy(init.begin(), init.end(), data_d_.begin()); } else { data_h_ = init; } } HostDeviceVectorImpl(const HostDeviceVectorImpl<T>&) = delete; HostDeviceVectorImpl(HostDeviceVectorImpl<T>&&) = delete; void operator=(const HostDeviceVectorImpl<T>&) = delete; void operator=(HostDeviceVectorImpl<T>&&) = delete; size_t Size() const { return on_d_ ? data_d_.size() : data_h_.size(); } int DeviceIdx() const { return device_; } T* DevicePointer(int device) { LazySyncDevice(device); return data_d_.data().get(); } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT auto begin = tbegin(device); return begin + Size(); } std::vector<T>& HostVector() { LazySyncHost(); return data_h_; } void Resize(size_t new_size, T v, int new_device) { if (new_size == this->Size() && new_device == device_) return; if (new_device != -1) device_ = new_device; // if !on_d_, but the data size is 0 and the device is set, // resize the data on device instead if (!on_d_ && (data_h_.size() > 0 || device_ == -1)) { data_h_.resize(new_size, v); } else { dh::safe_cuda(hipSetDevice(device_)); data_d_.resize(new_size, v); on_d_ = true; } } void LazySyncHost() { if (!on_d_) return; if (data_h_.size() != this->Size()) data_h_.resize(this->Size()); dh::safe_cuda(hipSetDevice(device_)); thrust::copy(data_d_.begin(), data_d_.end(), data_h_.begin()); on_d_ = false; } void LazySyncDevice(int device) { if (on_d_) return; if (device != device_) { CHECK_EQ(device_, -1); device_ = device; } if (data_d_.size() != this->Size()) { dh::safe_cuda(hipSetDevice(device_)); data_d_.resize(this->Size()); } dh::safe_cuda(hipSetDevice(device_)); thrust::copy(data_h_.begin(), data_h_.end(), data_d_.begin()); on_d_ = true; } std::vector<T> data_h_; thrust::device_vector<T> data_d_; // true if there is an up-to-date copy of data on device, false otherwise bool on_d_; int device_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, device); } template <typename T> HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, device); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, device); } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { HostDeviceVectorImpl<T>* tmp = impl_; impl_ = nullptr; delete tmp; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v, int new_device) { impl_->Resize(new_size, v, new_device); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; } // namespace xgboost
1f7eff7a264667106f439e350ca6f501e9314c43.cu
/*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include "./device_helpers.cuh" namespace xgboost { template <typename T> struct HostDeviceVectorImpl { HostDeviceVectorImpl(size_t size, T v, int device) : device_(device), on_d_(device >= 0) { if (on_d_) { dh::safe_cuda(cudaSetDevice(device_)); data_d_.resize(size, v); } else { data_h_.resize(size, v); } } // Init can be std::vector<T> or std::initializer_list<T> template <class Init> HostDeviceVectorImpl(const Init& init, int device) : device_(device), on_d_(device >= 0) { if (on_d_) { dh::safe_cuda(cudaSetDevice(device_)); data_d_.resize(init.size()); thrust::copy(init.begin(), init.end(), data_d_.begin()); } else { data_h_ = init; } } HostDeviceVectorImpl(const HostDeviceVectorImpl<T>&) = delete; HostDeviceVectorImpl(HostDeviceVectorImpl<T>&&) = delete; void operator=(const HostDeviceVectorImpl<T>&) = delete; void operator=(HostDeviceVectorImpl<T>&&) = delete; size_t Size() const { return on_d_ ? data_d_.size() : data_h_.size(); } int DeviceIdx() const { return device_; } T* DevicePointer(int device) { LazySyncDevice(device); return data_d_.data().get(); } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT auto begin = tbegin(device); return begin + Size(); } std::vector<T>& HostVector() { LazySyncHost(); return data_h_; } void Resize(size_t new_size, T v, int new_device) { if (new_size == this->Size() && new_device == device_) return; if (new_device != -1) device_ = new_device; // if !on_d_, but the data size is 0 and the device is set, // resize the data on device instead if (!on_d_ && (data_h_.size() > 0 || device_ == -1)) { data_h_.resize(new_size, v); } else { dh::safe_cuda(cudaSetDevice(device_)); data_d_.resize(new_size, v); on_d_ = true; } } void LazySyncHost() { if (!on_d_) return; if (data_h_.size() != this->Size()) data_h_.resize(this->Size()); dh::safe_cuda(cudaSetDevice(device_)); thrust::copy(data_d_.begin(), data_d_.end(), data_h_.begin()); on_d_ = false; } void LazySyncDevice(int device) { if (on_d_) return; if (device != device_) { CHECK_EQ(device_, -1); device_ = device; } if (data_d_.size() != this->Size()) { dh::safe_cuda(cudaSetDevice(device_)); data_d_.resize(this->Size()); } dh::safe_cuda(cudaSetDevice(device_)); thrust::copy(data_h_.begin(), data_h_.end(), data_d_.begin()); on_d_ = true; } std::vector<T> data_h_; thrust::device_vector<T> data_d_; // true if there is an up-to-date copy of data on device, false otherwise bool on_d_; int device_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, device); } template <typename T> HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, device); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, device); } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { HostDeviceVectorImpl<T>* tmp = impl_; impl_ = nullptr; delete tmp; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v, int new_device) { impl_->Resize(new_size, v, new_device); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; } // namespace xgboost
64f6ac5a1ba45d550989a459048253ad874a3b02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //nvcc -o lab5_2_1 lab5_2_1.cu /*Author: Pedro Silva */ /*2. Implemente um programa em CUDA que calcule a soma de todos os elementos de um vetor de tamanho N. Teste para vrios valores de N.*/ /*2.1. Implemente uma verso simples (sem recorrer a optimizaes).*/ #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void vectorsum2_1(int * d_buffer, int N){ //THREAD ID int index = threadIdx.x + blockIdx.x * blockDim.x; /*Temos N elementos no h_buffer. Vamos partir a soma de todos os elementos como a soma de um elemento com o elemento 16 indices a direita Repetir at so termos um elemento (a cada iterao, temos metade dos elementos).*/ int num_of_threads = N; int distance = N / 2; //Distancia entre elementos a somar int primeiro, segundo; //Este ciclo executa enquanto tivermos mais que uma thread e so se a thread ainda estiver no "scope" da soma. while(num_of_threads > 1 && index < N/2){ primeiro = index; segundo = primeiro + distance; //na primeira iteracao: 1a thread, index 1, 2a thread, index 3, 3a thread, index 5 d_buffer[primeiro] = d_buffer[primeiro] + d_buffer[segundo]; //passou uma iteracao: duplicar a distancia entre elementos a somar e dividir por 2 o numero de threads activas distance = distance / 2; num_of_threads = num_of_threads / 2; //garantir que todas as threads fizeram a sua soma __syncthreads(); } } int main(){ printf("Exercicio 2, Lab 5 de CHAD. Soma de todos os elementos de um h_buffer de tamanho N.\nN comeca a 8 (2^3)e duplica at 4096 (2^10).\n"); int *d_buffer, *result, *h_buffer; int error; struct timespec start, end; double startTime, endTime; for( int N = 256; N <= 4096; N = N*2){ printf("--------------------------------------------------------------------------\n"); printf("Soma de um vector com %i elementos.\n", N); clock_gettime(CLOCK_MONOTONIC, &start); //alocar memoria no device if(hipMalloc(&d_buffer, sizeof(int) * N) != hipSuccess){ fprintf(stderr, "Error allocating memory on device.\n"); return(-1); } //alocar memoria no host para h_buffer h_buffer = (int*) malloc(N * sizeof(int)); for(int i = 0; i < N; i++) h_buffer[i] = 1; //alocar memoria no host para receber o resultado de cada bloco result = (int*) malloc(N * sizeof(int)); //Transferir dados do device para host (vector a somar) if((error = hipMemcpy(d_buffer, h_buffer, N * sizeof(int), hipMemcpyHostToDevice)) != hipSuccess) fprintf(stderr, "Erro a transferir vector para GPU, de dimensao %i. Error = %i.\n", N, error); //Inicializar block e grid size dim3 BlockSize(32, 1, 1); //Comecar simples: Blocos de tamanho fixo dim3 GridSize(N/32 + 1, 1, 1); hipLaunchKernelGGL(( vectorsum2_1), dim3(GridSize), dim3(BlockSize), 0, 0, d_buffer, N); //Vamos buscar o resultado da soma ao primeiro elemento do d_buffer hipMemcpy(result, d_buffer, sizeof(int), hipMemcpyDeviceToHost); printf("Resultado da soma de um vector de %i elementos: %i.\n", N, *result); if(hipFree(d_buffer) != hipSuccess) printf("Erro a libertar memoria no device.\n"); clock_gettime(CLOCK_MONOTONIC, &end); startTime = (start.tv_sec * 1e3) + (start.tv_nsec * 1e-6); endTime = (end.tv_sec * 1e3) + (end.tv_nsec * 1e-6); printf("Tempo de execuo do GPU kernel: %fms.\n", endTime - startTime); if(hipFree(d_buffer) != hipSuccess) printf("Erro a libertar memoria no device para vector.\n"); free(h_buffer); free(result); } return 0; }
64f6ac5a1ba45d550989a459048253ad874a3b02.cu
//nvcc -o lab5_2_1 lab5_2_1.cu /*Author: Pedro Silva */ /*2. Implemente um programa em CUDA que calcule a soma de todos os elementos de um vetor de tamanho N. Teste para vários valores de N.*/ /*2.1. Implemente uma versão simples (sem recorrer a optimizações).*/ #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void vectorsum2_1(int * d_buffer, int N){ //THREAD ID int index = threadIdx.x + blockIdx.x * blockDim.x; /*Temos N elementos no h_buffer. Vamos partir a soma de todos os elementos como a soma de um elemento com o elemento 16 indices a direita Repetir até so termos um elemento (a cada iteração, temos metade dos elementos).*/ int num_of_threads = N; int distance = N / 2; //Distancia entre elementos a somar int primeiro, segundo; //Este ciclo executa enquanto tivermos mais que uma thread e so se a thread ainda estiver no "scope" da soma. while(num_of_threads > 1 && index < N/2){ primeiro = index; segundo = primeiro + distance; //na primeira iteracao: 1a thread, index 1, 2a thread, index 3, 3a thread, index 5 d_buffer[primeiro] = d_buffer[primeiro] + d_buffer[segundo]; //passou uma iteracao: duplicar a distancia entre elementos a somar e dividir por 2 o numero de threads activas distance = distance / 2; num_of_threads = num_of_threads / 2; //garantir que todas as threads fizeram a sua soma __syncthreads(); } } int main(){ printf("Exercicio 2, Lab 5 de CHAD. Soma de todos os elementos de um h_buffer de tamanho N.\nN comeca a 8 (2^3)e duplica até 4096 (2^10).\n"); int *d_buffer, *result, *h_buffer; int error; struct timespec start, end; double startTime, endTime; for( int N = 256; N <= 4096; N = N*2){ printf("--------------------------------------------------------------------------\n"); printf("Soma de um vector com %i elementos.\n", N); clock_gettime(CLOCK_MONOTONIC, &start); //alocar memoria no device if(cudaMalloc(&d_buffer, sizeof(int) * N) != cudaSuccess){ fprintf(stderr, "Error allocating memory on device.\n"); return(-1); } //alocar memoria no host para h_buffer h_buffer = (int*) malloc(N * sizeof(int)); for(int i = 0; i < N; i++) h_buffer[i] = 1; //alocar memoria no host para receber o resultado de cada bloco result = (int*) malloc(N * sizeof(int)); //Transferir dados do device para host (vector a somar) if((error = cudaMemcpy(d_buffer, h_buffer, N * sizeof(int), cudaMemcpyHostToDevice)) != cudaSuccess) fprintf(stderr, "Erro a transferir vector para GPU, de dimensao %i. Error = %i.\n", N, error); //Inicializar block e grid size dim3 BlockSize(32, 1, 1); //Comecar simples: Blocos de tamanho fixo dim3 GridSize(N/32 + 1, 1, 1); vectorsum2_1<<<GridSize, BlockSize>>>(d_buffer, N); //Vamos buscar o resultado da soma ao primeiro elemento do d_buffer cudaMemcpy(result, d_buffer, sizeof(int), cudaMemcpyDeviceToHost); printf("Resultado da soma de um vector de %i elementos: %i.\n", N, *result); if(cudaFree(d_buffer) != cudaSuccess) printf("Erro a libertar memoria no device.\n"); clock_gettime(CLOCK_MONOTONIC, &end); startTime = (start.tv_sec * 1e3) + (start.tv_nsec * 1e-6); endTime = (end.tv_sec * 1e3) + (end.tv_nsec * 1e-6); printf("Tempo de execução do GPU kernel: %fms.\n", endTime - startTime); if(cudaFree(d_buffer) != cudaSuccess) printf("Erro a libertar memoria no device para vector.\n"); free(h_buffer); free(result); } return 0; }
3b8f35433939ef17bc229f334039c20931302c19.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <cutil_math.h> #include "cutil_inline.h" #define DEG_TO_RAD 0.017453292519943 float4 * dRGBA; unsigned short * dRAW; int * dWARP; int * dBUFF; int * dHUFF; int * dEDGE; int * hmax; int * minmax; int3 * head; __device__ int freezer; __device__ unsigned short getDepthFromRAW( unsigned short raw ) { return ( raw >> 3 ); } __device__ unsigned short getPlayerFromRAW( unsigned short raw ) { return ( raw & 0x7 ); } __device__ float3 getWorldSpaceFromDepthSpace( int x, int y, short depth, int width, int height ) { float3 XYZ; float phi = ( (float)x / (float)width * 57.0f - 28.5f ) * DEG_TO_RAD; float theta = ( (float)y / (float)height * 43.0f + 68.5f ) * DEG_TO_RAD; XYZ.x = depth; XYZ.y = (int)(depth * tan(phi)); XYZ.z = (int)(depth / tan(theta) / cos(phi)); return XYZ; } __device__ int getIndex( int x, int y, int width, int limx, int limy ) { if (x < 0 || y < 0 || x >= limx || y >= limy) return -1; return width * y + x; } __global__ void clear_color_k( float4 * color, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if( x >= width || y >= height ) { return; } unsigned int cindex = (height-y-1)*width+x; color[cindex] = make_float4( 0.0 ); } __global__ void clear_k( int * warp, int * hough, int * edge, int * maximum, int * mmax, int3 * xyz, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if( x >= width || y >= height ) { return; } unsigned int cindex = (height-y-1)*width+x; if( cindex == 0 ) { *maximum = 0; mmax[0] = width; //minx mmax[1] = 0; //maxx mmax[2] = height; //miny mmax[3] = 0; //maxy freezer = 0xBeef; *xyz = make_int3( -1 ); } warp[cindex] = 3000; hough[cindex] = 0; edge[cindex] = 0; } __global__ void make_pretty_k( float4 * color, int * warp, int * edge, int* hough, int * maximum, int * mmax, int3 * xyz, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } unsigned int cindex = v*width+x; float fdepth = warp[cindex] == 0 ? 0.0 : ( 1.0 - ( warp[cindex] - 400.0 ) / 2600.0 ); bool gotbeef = false; if( hough[cindex] >= *maximum ) { int answer = atomicExch( &freezer, 0xDeadBeef ); //Try to steal some beef if( answer == 0xBeef ) //first ones here, everyone else gets dead beef! { *xyz = make_int3( x, y, warp[cindex] ); gotbeef = true; } } color[cindex] = make_float4( gotbeef, edge[cindex] > 0, 0.1,//( fdepth > 0 ? sqrt( (float)min( hough[cindex] / 40.0, 1.0 ) ) : 0.0 ), 1.0 ); } __global__ void max_5x5_k( int * buff, int * warp, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; int maximum = warp[index]; for( int i = -2; i <= 2; i ++ ) { for( int j = -2; j <= 2; j ++ ) { maximum = max( maximum, warp[index+i+j*width] ); } } buff[index] = maximum; } __global__ void find_head_k( int * maximum, int * warp, int * hough, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; if( warp[index] < 3000 ) atomicMax( maximum, hough[index] ); } __global__ void hough_k( int * buff, int * edge, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; for( int t = 0; t < 180; t += 10 ) { int xoff = 10.0 * cos( t * DEG_TO_RAD ) + 0.5; int yoff = 10.0 * sin( t * DEG_TO_RAD ) + 0.5; for( int i = -1; i <= 1; i ++ ) { for( int j = -1; j <= 1; j ++ ) { buff[index] += edge[index+(i+xoff)+(j+yoff)*width]; } } } } __global__ void min_5x5_k( int * buff, int * warp, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; int minimum = warp[index]; for( int i = -2; i <= 2; i ++ ) { for( int j = -2; j <= 2; j ++ ) { minimum = min( minimum, warp[index+i+j*width] ); } } buff[index] = minimum; } __global__ void edge_3x3_k( int * buff, int * warp, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; int count = 0; for( int i = -1; i <= 1; i ++ ) { for( int j = -1; j <= 1; j ++ ) { count += warp[index+i+j*width] < 3000; } } buff[index] = ( count < 7 && warp[index] < 3000 ) ? 1 : 0; } __global__ void process_depth_k( int * warp, unsigned short * raw, int * mmax, unsigned int width, unsigned int height, int target ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; int rHeight = 315, rWidth = 434; if( x >= width || y >= height ) { return; } unsigned int index = y*width+x; unsigned int cindex = (height-y-1)*width+x; unsigned short player = getPlayerFromRAW( raw[index] ); unsigned short depth = getDepthFromRAW( raw[index] ); float3 worldpos = getWorldSpaceFromDepthSpace( x, y, depth, width, height ); int wpx = (worldpos.y / 10 ) + rWidth / 2.0; int wpy = (worldpos.z / 10) + rHeight / 2.0; int warpindex = getIndex( wpx, wpy, width, rWidth, rHeight ); if( warpindex >= 0 && target == player && depth > 0 ) { atomicMin( &(warp[warpindex]), (int)depth ); atomicMin( &(mmax[0]), wpx ); atomicMax( &(mmax[1]), wpx ); atomicMin( &(mmax[2]), wpy ); atomicMax( &(mmax[3]), wpy ); } else atomicMin( &(warp[cindex]), (int)3000 ); } extern "C" void process_depth( dim3 dimGrid, dim3 dimBlock, float4 * depthRGBA, unsigned short * depthRAW, unsigned int width, unsigned int height, int3 * xyz, int target ) { hipMemcpy( dRAW, depthRAW, width * height * sizeof( unsigned short ), hipMemcpyHostToDevice ); cutilCheckMsg("RAW Depth Transfer"); hipLaunchKernelGGL(( clear_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dWARP, dHUFF, dEDGE, hmax, minmax, head, width, height ); cutilCheckMsg("Clear"); hipLaunchKernelGGL(( process_depth_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dWARP, dRAW, minmax, width, height, target ); cutilCheckMsg("Depth Process"); hipLaunchKernelGGL(( min_5x5_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dBUFF, dWARP, minmax, width, height ); cutilCheckMsg("Min filt"); hipLaunchKernelGGL(( max_5x5_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dWARP, dBUFF, minmax, width, height ); cutilCheckMsg("Max filt"); hipLaunchKernelGGL(( edge_3x3_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dEDGE, dWARP, minmax, width, height ); cutilCheckMsg("Edge filt"); hipLaunchKernelGGL(( hough_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dHUFF, dEDGE, minmax, width, height ); cutilCheckMsg("Hough filt"); hipLaunchKernelGGL(( find_head_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, hmax, dWARP, dHUFF, minmax, width, height ); cutilCheckMsg("Big Max"); hipLaunchKernelGGL(( make_pretty_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dRGBA, dWARP, dEDGE, dHUFF, hmax, minmax, head, width, height ); cutilCheckMsg("Convert to Image"); hipMemcpy( xyz, head, sizeof( int3 ), hipMemcpyDeviceToHost ); cutilCheckMsg("XYZ Transfer"); hipMemcpy( depthRGBA, dRGBA, width * height * sizeof( float4 ), hipMemcpyDeviceToHost ); cutilCheckMsg("RGBA Depth Transfer"); } extern "C" void reset( dim3 dimGrid, dim3 dimBlock, unsigned int width, unsigned int height ) { hipLaunchKernelGGL(( clear_color_k) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dRGBA, width, height ); cutilCheckMsg("Reset"); } extern "C" void cudaInit( unsigned int width, unsigned int height ) { cutilCheckMsg("Before"); hipMalloc( &dRGBA, width * height * sizeof( float4 ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &dRAW, width * height * sizeof( unsigned short ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &dWARP, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &dBUFF, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &dHUFF, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &dEDGE, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &hmax, sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &minmax, 4*sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); hipMalloc( &head, sizeof( int3 ) ); cutilCheckMsg("CUDA Malloc"); }
3b8f35433939ef17bc229f334039c20931302c19.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <cutil_math.h> #include "cutil_inline.h" #define DEG_TO_RAD 0.017453292519943 float4 * dRGBA; unsigned short * dRAW; int * dWARP; int * dBUFF; int * dHUFF; int * dEDGE; int * hmax; int * minmax; int3 * head; __device__ int freezer; __device__ unsigned short getDepthFromRAW( unsigned short raw ) { return ( raw >> 3 ); } __device__ unsigned short getPlayerFromRAW( unsigned short raw ) { return ( raw & 0x7 ); } __device__ float3 getWorldSpaceFromDepthSpace( int x, int y, short depth, int width, int height ) { float3 XYZ; float phi = ( (float)x / (float)width * 57.0f - 28.5f ) * DEG_TO_RAD; float theta = ( (float)y / (float)height * 43.0f + 68.5f ) * DEG_TO_RAD; XYZ.x = depth; XYZ.y = (int)(depth * tan(phi)); XYZ.z = (int)(depth / tan(theta) / cos(phi)); return XYZ; } __device__ int getIndex( int x, int y, int width, int limx, int limy ) { if (x < 0 || y < 0 || x >= limx || y >= limy) return -1; return width * y + x; } __global__ void clear_color_k( float4 * color, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if( x >= width || y >= height ) { return; } unsigned int cindex = (height-y-1)*width+x; color[cindex] = make_float4( 0.0 ); } __global__ void clear_k( int * warp, int * hough, int * edge, int * maximum, int * mmax, int3 * xyz, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if( x >= width || y >= height ) { return; } unsigned int cindex = (height-y-1)*width+x; if( cindex == 0 ) { *maximum = 0; mmax[0] = width; //minx mmax[1] = 0; //maxx mmax[2] = height; //miny mmax[3] = 0; //maxy freezer = 0xBeef; *xyz = make_int3( -1 ); } warp[cindex] = 3000; hough[cindex] = 0; edge[cindex] = 0; } __global__ void make_pretty_k( float4 * color, int * warp, int * edge, int* hough, int * maximum, int * mmax, int3 * xyz, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } unsigned int cindex = v*width+x; float fdepth = warp[cindex] == 0 ? 0.0 : ( 1.0 - ( warp[cindex] - 400.0 ) / 2600.0 ); bool gotbeef = false; if( hough[cindex] >= *maximum ) { int answer = atomicExch( &freezer, 0xDeadBeef ); //Try to steal some beef if( answer == 0xBeef ) //first ones here, everyone else gets dead beef! { *xyz = make_int3( x, y, warp[cindex] ); gotbeef = true; } } color[cindex] = make_float4( gotbeef, edge[cindex] > 0, 0.1,//( fdepth > 0 ? sqrt( (float)min( hough[cindex] / 40.0, 1.0 ) ) : 0.0 ), 1.0 ); } __global__ void max_5x5_k( int * buff, int * warp, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; int maximum = warp[index]; for( int i = -2; i <= 2; i ++ ) { for( int j = -2; j <= 2; j ++ ) { maximum = max( maximum, warp[index+i+j*width] ); } } buff[index] = maximum; } __global__ void find_head_k( int * maximum, int * warp, int * hough, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; if( warp[index] < 3000 ) atomicMax( maximum, hough[index] ); } __global__ void hough_k( int * buff, int * edge, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; for( int t = 0; t < 180; t += 10 ) { int xoff = 10.0 * cos( t * DEG_TO_RAD ) + 0.5; int yoff = 10.0 * sin( t * DEG_TO_RAD ) + 0.5; for( int i = -1; i <= 1; i ++ ) { for( int j = -1; j <= 1; j ++ ) { buff[index] += edge[index+(i+xoff)+(j+yoff)*width]; } } } } __global__ void min_5x5_k( int * buff, int * warp, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; int minimum = warp[index]; for( int i = -2; i <= 2; i ++ ) { for( int j = -2; j <= 2; j ++ ) { minimum = min( minimum, warp[index+i+j*width] ); } } buff[index] = minimum; } __global__ void edge_3x3_k( int * buff, int * warp, int * mmax, unsigned int width, unsigned int height ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int v = (height-y-1); if( x < mmax[0] || x > mmax[1] || v < mmax[2] || v > mmax[3] ) { return; } int index = (height-y-1)*width+x; int count = 0; for( int i = -1; i <= 1; i ++ ) { for( int j = -1; j <= 1; j ++ ) { count += warp[index+i+j*width] < 3000; } } buff[index] = ( count < 7 && warp[index] < 3000 ) ? 1 : 0; } __global__ void process_depth_k( int * warp, unsigned short * raw, int * mmax, unsigned int width, unsigned int height, int target ) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; int rHeight = 315, rWidth = 434; if( x >= width || y >= height ) { return; } unsigned int index = y*width+x; unsigned int cindex = (height-y-1)*width+x; unsigned short player = getPlayerFromRAW( raw[index] ); unsigned short depth = getDepthFromRAW( raw[index] ); float3 worldpos = getWorldSpaceFromDepthSpace( x, y, depth, width, height ); int wpx = (worldpos.y / 10 ) + rWidth / 2.0; int wpy = (worldpos.z / 10) + rHeight / 2.0; int warpindex = getIndex( wpx, wpy, width, rWidth, rHeight ); if( warpindex >= 0 && target == player && depth > 0 ) { atomicMin( &(warp[warpindex]), (int)depth ); atomicMin( &(mmax[0]), wpx ); atomicMax( &(mmax[1]), wpx ); atomicMin( &(mmax[2]), wpy ); atomicMax( &(mmax[3]), wpy ); } else atomicMin( &(warp[cindex]), (int)3000 ); } extern "C" void process_depth( dim3 dimGrid, dim3 dimBlock, float4 * depthRGBA, unsigned short * depthRAW, unsigned int width, unsigned int height, int3 * xyz, int target ) { cudaMemcpy( dRAW, depthRAW, width * height * sizeof( unsigned short ), cudaMemcpyHostToDevice ); cutilCheckMsg("RAW Depth Transfer"); clear_k <<< dimGrid, dimBlock >>> ( dWARP, dHUFF, dEDGE, hmax, minmax, head, width, height ); cutilCheckMsg("Clear"); process_depth_k <<< dimGrid, dimBlock >>> ( dWARP, dRAW, minmax, width, height, target ); cutilCheckMsg("Depth Process"); min_5x5_k <<< dimGrid, dimBlock >>> ( dBUFF, dWARP, minmax, width, height ); cutilCheckMsg("Min filt"); max_5x5_k <<< dimGrid, dimBlock >>> ( dWARP, dBUFF, minmax, width, height ); cutilCheckMsg("Max filt"); edge_3x3_k <<< dimGrid, dimBlock >>> ( dEDGE, dWARP, minmax, width, height ); cutilCheckMsg("Edge filt"); hough_k <<< dimGrid, dimBlock >>> ( dHUFF, dEDGE, minmax, width, height ); cutilCheckMsg("Hough filt"); find_head_k <<< dimGrid, dimBlock >>> ( hmax, dWARP, dHUFF, minmax, width, height ); cutilCheckMsg("Big Max"); make_pretty_k <<< dimGrid, dimBlock >>> ( dRGBA, dWARP, dEDGE, dHUFF, hmax, minmax, head, width, height ); cutilCheckMsg("Convert to Image"); cudaMemcpy( xyz, head, sizeof( int3 ), cudaMemcpyDeviceToHost ); cutilCheckMsg("XYZ Transfer"); cudaMemcpy( depthRGBA, dRGBA, width * height * sizeof( float4 ), cudaMemcpyDeviceToHost ); cutilCheckMsg("RGBA Depth Transfer"); } extern "C" void reset( dim3 dimGrid, dim3 dimBlock, unsigned int width, unsigned int height ) { clear_color_k <<< dimGrid, dimBlock >>> ( dRGBA, width, height ); cutilCheckMsg("Reset"); } extern "C" void cudaInit( unsigned int width, unsigned int height ) { cutilCheckMsg("Before"); cudaMalloc( &dRGBA, width * height * sizeof( float4 ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &dRAW, width * height * sizeof( unsigned short ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &dWARP, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &dBUFF, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &dHUFF, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &dEDGE, width * height * sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &hmax, sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &minmax, 4*sizeof( int ) ); cutilCheckMsg("CUDA Malloc"); cudaMalloc( &head, sizeof( int3 ) ); cutilCheckMsg("CUDA Malloc"); }
b121a804d7eab8b8579d1211ab6283260b600ada.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/io/class_io/trianglemesh_io.h" #include "cupoch/geometry/trianglemesh.h" using namespace cupoch; using namespace cupoch::io; void HostTriangleMesh::FromDevice(const geometry::TriangleMesh& trianglemesh) { vertices_.resize(trianglemesh.vertices_.size()); vertex_normals_.resize(trianglemesh.vertex_normals_.size()); vertex_colors_.resize(trianglemesh.vertex_colors_.size()); triangles_.resize(trianglemesh.triangles_.size()); triangle_normals_.resize(trianglemesh.triangle_normals_.size()); triangle_uvs_.resize(trianglemesh.triangle_uvs_.size()); utility::CopyFromDeviceMultiStream(trianglemesh.vertices_, vertices_); utility::CopyFromDeviceMultiStream(trianglemesh.vertex_normals_, vertex_normals_); utility::CopyFromDeviceMultiStream(trianglemesh.vertex_colors_, vertex_colors_); utility::CopyFromDeviceMultiStream(trianglemesh.triangles_, triangles_); utility::CopyFromDeviceMultiStream(trianglemesh.triangle_normals_, triangle_normals_); utility::CopyFromDeviceMultiStream(trianglemesh.triangle_uvs_, triangle_uvs_); texture_.FromDevice(trianglemesh.texture_); hipDeviceSynchronize(); } void HostTriangleMesh::ToDevice(geometry::TriangleMesh& trianglemesh) const { trianglemesh.vertices_.resize(vertices_.size()); trianglemesh.vertex_normals_.resize(vertex_normals_.size()); trianglemesh.vertex_colors_.resize(vertex_colors_.size()); trianglemesh.triangles_.resize(triangles_.size()); trianglemesh.triangle_normals_.resize(triangle_normals_.size()); trianglemesh.triangle_uvs_.resize(triangle_uvs_.size()); utility::CopyToDeviceMultiStream(vertices_, trianglemesh.vertices_); utility::CopyToDeviceMultiStream(vertex_normals_, trianglemesh.vertex_normals_); utility::CopyToDeviceMultiStream(vertex_colors_, trianglemesh.vertex_colors_); utility::CopyToDeviceMultiStream(triangles_, trianglemesh.triangles_); utility::CopyToDeviceMultiStream(triangle_normals_, trianglemesh.triangle_normals_); utility::CopyToDeviceMultiStream(triangle_uvs_, trianglemesh.triangle_uvs_); texture_.ToDevice(trianglemesh.texture_); hipDeviceSynchronize(); } void HostTriangleMesh::Clear() { vertices_.clear(); vertex_normals_.clear(); vertex_colors_.clear(); triangles_.clear(); triangle_normals_.clear(); triangle_uvs_.clear(); texture_.Clear(); }
b121a804d7eab8b8579d1211ab6283260b600ada.cu
#include "cupoch/io/class_io/trianglemesh_io.h" #include "cupoch/geometry/trianglemesh.h" using namespace cupoch; using namespace cupoch::io; void HostTriangleMesh::FromDevice(const geometry::TriangleMesh& trianglemesh) { vertices_.resize(trianglemesh.vertices_.size()); vertex_normals_.resize(trianglemesh.vertex_normals_.size()); vertex_colors_.resize(trianglemesh.vertex_colors_.size()); triangles_.resize(trianglemesh.triangles_.size()); triangle_normals_.resize(trianglemesh.triangle_normals_.size()); triangle_uvs_.resize(trianglemesh.triangle_uvs_.size()); utility::CopyFromDeviceMultiStream(trianglemesh.vertices_, vertices_); utility::CopyFromDeviceMultiStream(trianglemesh.vertex_normals_, vertex_normals_); utility::CopyFromDeviceMultiStream(trianglemesh.vertex_colors_, vertex_colors_); utility::CopyFromDeviceMultiStream(trianglemesh.triangles_, triangles_); utility::CopyFromDeviceMultiStream(trianglemesh.triangle_normals_, triangle_normals_); utility::CopyFromDeviceMultiStream(trianglemesh.triangle_uvs_, triangle_uvs_); texture_.FromDevice(trianglemesh.texture_); cudaDeviceSynchronize(); } void HostTriangleMesh::ToDevice(geometry::TriangleMesh& trianglemesh) const { trianglemesh.vertices_.resize(vertices_.size()); trianglemesh.vertex_normals_.resize(vertex_normals_.size()); trianglemesh.vertex_colors_.resize(vertex_colors_.size()); trianglemesh.triangles_.resize(triangles_.size()); trianglemesh.triangle_normals_.resize(triangle_normals_.size()); trianglemesh.triangle_uvs_.resize(triangle_uvs_.size()); utility::CopyToDeviceMultiStream(vertices_, trianglemesh.vertices_); utility::CopyToDeviceMultiStream(vertex_normals_, trianglemesh.vertex_normals_); utility::CopyToDeviceMultiStream(vertex_colors_, trianglemesh.vertex_colors_); utility::CopyToDeviceMultiStream(triangles_, trianglemesh.triangles_); utility::CopyToDeviceMultiStream(triangle_normals_, trianglemesh.triangle_normals_); utility::CopyToDeviceMultiStream(triangle_uvs_, trianglemesh.triangle_uvs_); texture_.ToDevice(trianglemesh.texture_); cudaDeviceSynchronize(); } void HostTriangleMesh::Clear() { vertices_.clear(); vertex_normals_.clear(); vertex_colors_.clear(); triangles_.clear(); triangle_normals_.clear(); triangle_uvs_.clear(); texture_.Clear(); }
405d5595535426eb68e320d252b3d246dcd7ac6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper // #include <cutil.h> #include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0f /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; checkCudaErrors(hipMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { checkCudaErrors(hipFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { checkCudaErrors(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost)); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } double sum_density_energy = 0; { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; sum_density_energy += h_variables[i + VAR_DENSITY_ENERGY*nelr]; } } printf("total density energy %.10f \n", sum_density_energy); delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_fc_momentum_x[1]; __constant__ float3 ff_fc_momentum_y[1]; __constant__ float3 ff_fc_momentum_z[1]; __constant__ float3 ff_fc_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); hipError_t error; hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s initialize variables \n", hipGetErrorString(error)); exit(-1); } } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z, fc_i_density_energy); fc_momentum_x[i + 0*nelr] = fc_i_momentum_x.x; fc_momentum_x[i + 1*nelr] = fc_i_momentum_x.y; fc_momentum_x[i + 2*nelr] = fc_i_momentum_x.z; fc_momentum_y[i + 0*nelr] = fc_i_momentum_y.x; fc_momentum_y[i + 1*nelr] = fc_i_momentum_y.y; fc_momentum_y[i + 2*nelr] = fc_i_momentum_y.z; fc_momentum_z[i + 0*nelr] = fc_i_momentum_z.x; fc_momentum_z[i + 1*nelr] = fc_i_momentum_z.y; fc_momentum_z[i + 2*nelr] = fc_i_momentum_z.z; fc_density_energy[i + 0*nelr] = fc_i_density_energy.x; fc_density_energy[i + 1*nelr] = fc_i_density_energy.y; fc_density_energy[i + 2*nelr] = fc_i_density_energy.z; } void compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { dim3 Dg(nelr / block_length), Db(block_length); hipError_t error; hipLaunchKernelGGL(( cuda_compute_flux_contributions), dim3(Dg),dim3(Db), 0, 0, nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contribution failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; fc_i_momentum_x.x = fc_momentum_x[i + 0*nelr]; fc_i_momentum_x.y = fc_momentum_x[i + 1*nelr]; fc_i_momentum_x.z = fc_momentum_x[i + 2*nelr]; fc_i_momentum_y.x = fc_momentum_y[i + 0*nelr]; fc_i_momentum_y.y = fc_momentum_y[i + 1*nelr]; fc_i_momentum_y.z = fc_momentum_y[i + 2*nelr]; fc_i_momentum_z.x = fc_momentum_z[i + 0*nelr]; fc_i_momentum_z.y = fc_momentum_z[i + 1*nelr]; fc_i_momentum_z.z = fc_momentum_z[i + 2*nelr]; fc_i_density_energy.x = fc_density_energy[i + 0*nelr]; fc_i_density_energy.y = fc_density_energy[i + 1*nelr]; fc_i_density_energy.z = fc_density_energy[i + 2*nelr]; float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 fc_nb_momentum_x, fc_nb_momentum_y, fc_nb_momentum_z; float3 fc_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); fc_nb_momentum_x.x = fc_momentum_x[nb + 0*nelr]; fc_nb_momentum_x.y = fc_momentum_x[nb + 1*nelr]; fc_nb_momentum_x.z = fc_momentum_x[nb + 2*nelr]; fc_nb_momentum_y.x = fc_momentum_y[nb + 0*nelr]; fc_nb_momentum_y.y = fc_momentum_y[nb + 1*nelr]; fc_nb_momentum_y.z = fc_momentum_y[nb + 2*nelr]; fc_nb_momentum_z.x = fc_momentum_z[nb + 0*nelr]; fc_nb_momentum_z.y = fc_momentum_z[nb + 1*nelr]; fc_nb_momentum_z.z = fc_momentum_z[nb + 2*nelr]; fc_nb_density_energy.x = fc_density_energy[nb + 0*nelr]; fc_nb_density_energy.y = fc_density_energy[nb + 1*nelr]; fc_nb_density_energy.z = fc_density_energy[nb + 2*nelr]; // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(fc_nb_density_energy.x+fc_i_density_energy.x); flux_i_momentum.x += factor*(fc_nb_momentum_x.x+fc_i_momentum_x.x); flux_i_momentum.y += factor*(fc_nb_momentum_y.x+fc_i_momentum_y.x); flux_i_momentum.z += factor*(fc_nb_momentum_z.x+fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(fc_nb_density_energy.y+fc_i_density_energy.y); flux_i_momentum.x += factor*(fc_nb_momentum_x.y+fc_i_momentum_x.y); flux_i_momentum.y += factor*(fc_nb_momentum_y.y+fc_i_momentum_y.y); flux_i_momentum.z += factor*(fc_nb_momentum_z.y+fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(fc_nb_density_energy.z+fc_i_density_energy.z); flux_i_momentum.x += factor*(fc_nb_momentum_x.z+fc_i_momentum_x.z); flux_i_momentum.y += factor*(fc_nb_momentum_y.z+fc_i_momentum_y.z); flux_i_momentum.z += factor*(fc_nb_momentum_z.z+fc_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_fc_density_energy[0].x+fc_i_density_energy.x); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].x + fc_i_momentum_x.x); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].x + fc_i_momentum_y.x); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].x + fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_fc_density_energy[0].y+fc_i_density_energy.y); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].y + fc_i_momentum_x.y); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].y + fc_i_momentum_y.y); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].y + fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_fc_density_energy[0].z+fc_i_density_energy.z); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].z + fc_i_momentum_x.z); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].z + fc_i_momentum_y.z); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].z + fc_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", hipGetErrorString(error)); exit(-1); } } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { hipError_t error; dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s update failed\n", hipGetErrorString(error)); exit(-1); } } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; hipDeviceProp_t prop; int dev; checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipGetDevice(&dev)); checkCudaErrors(hipGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_fc_momentum_x; float3 h_ff_fc_momentum_y; float3 h_ff_fc_momentum_z; float3 h_ff_fc_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_fc_momentum_x, h_ff_fc_momentum_y, h_ff_fc_momentum_z, h_ff_fc_density_energy); // copy far field conditions to the gpu checkCudaErrors( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_x, &h_ff_fc_momentum_x, sizeof(float3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_y, &h_ff_fc_momentum_y, sizeof(float3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_momentum_z, &h_ff_fc_momentum_z, sizeof(float3)) ); checkCudaErrors( hipMemcpyToSymbol(ff_fc_density_energy, &h_ff_fc_density_energy, sizeof(float3)) ); } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); float* fc_momentum_x = alloc<float>(nelr*NDIM); float* fc_momentum_y = alloc<float>(nelr*NDIM); float* fc_momentum_z = alloc<float>(nelr*NDIM); float* fc_density_energy = alloc<float>(nelr*NDIM); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); hipError_t error; // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", hipGetErrorString(error)); exit(-1); } for(int j = 0; j < RK; j++) { compute_flux_contributions(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contributions failed\n", hipGetErrorString(error)); exit(-1); } compute_flux(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", hipGetErrorString(error)); exit(-1); } time_step(j, nelr, old_variables, variables, step_factors, fluxes); error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr,"GPUassert: %s time_step\n", hipGetErrorString(error)); exit(-1); } } } hipDeviceSynchronize(); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); dealloc<float>(fc_momentum_x); dealloc<float>(fc_momentum_y); dealloc<float>(fc_momentum_z); dealloc<float>(fc_density_energy); std::cout << "Done..." << std::endl; return 0; }
405d5595535426eb68e320d252b3d246dcd7ac6a.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper // #include <cutil.h> #include <helper_cuda.h> #include <helper_timer.h> #include <iostream> #include <fstream> /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0f /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; checkCudaErrors(cudaMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { checkCudaErrors(cudaFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice)); } template <typename T> void upload(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice)); } template <typename T> void download(T* dst, T* src, int N) { checkCudaErrors(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost)); } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } double sum_density_energy = 0; { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; sum_density_energy += h_variables[i + VAR_DENSITY_ENERGY*nelr]; } } printf("total density energy %.10f \n", sum_density_energy); delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_fc_momentum_x[1]; __constant__ float3 ff_fc_momentum_y[1]; __constant__ float3 ff_fc_momentum_z[1]; __constant__ float3 ff_fc_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); cudaError_t error; cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s initialize variables \n", cudaGetErrorString(error)); exit(-1); } } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z, fc_i_density_energy); fc_momentum_x[i + 0*nelr] = fc_i_momentum_x.x; fc_momentum_x[i + 1*nelr] = fc_i_momentum_x.y; fc_momentum_x[i + 2*nelr] = fc_i_momentum_x.z; fc_momentum_y[i + 0*nelr] = fc_i_momentum_y.x; fc_momentum_y[i + 1*nelr] = fc_i_momentum_y.y; fc_momentum_y[i + 2*nelr] = fc_i_momentum_y.z; fc_momentum_z[i + 0*nelr] = fc_i_momentum_z.x; fc_momentum_z[i + 1*nelr] = fc_i_momentum_z.y; fc_momentum_z[i + 2*nelr] = fc_i_momentum_z.z; fc_density_energy[i + 0*nelr] = fc_i_density_energy.x; fc_density_energy[i + 1*nelr] = fc_i_density_energy.y; fc_density_energy[i + 2*nelr] = fc_i_density_energy.z; } void compute_flux_contributions(int nelr, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy) { dim3 Dg(nelr / block_length), Db(block_length); cudaError_t error; cuda_compute_flux_contributions<<<Dg,Db>>>(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contribution failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 fc_i_momentum_x, fc_i_momentum_y, fc_i_momentum_z; float3 fc_i_density_energy; fc_i_momentum_x.x = fc_momentum_x[i + 0*nelr]; fc_i_momentum_x.y = fc_momentum_x[i + 1*nelr]; fc_i_momentum_x.z = fc_momentum_x[i + 2*nelr]; fc_i_momentum_y.x = fc_momentum_y[i + 0*nelr]; fc_i_momentum_y.y = fc_momentum_y[i + 1*nelr]; fc_i_momentum_y.z = fc_momentum_y[i + 2*nelr]; fc_i_momentum_z.x = fc_momentum_z[i + 0*nelr]; fc_i_momentum_z.y = fc_momentum_z[i + 1*nelr]; fc_i_momentum_z.z = fc_momentum_z[i + 2*nelr]; fc_i_density_energy.x = fc_density_energy[i + 0*nelr]; fc_i_density_energy.y = fc_density_energy[i + 1*nelr]; fc_i_density_energy.z = fc_density_energy[i + 2*nelr]; float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 fc_nb_momentum_x, fc_nb_momentum_y, fc_nb_momentum_z; float3 fc_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); fc_nb_momentum_x.x = fc_momentum_x[nb + 0*nelr]; fc_nb_momentum_x.y = fc_momentum_x[nb + 1*nelr]; fc_nb_momentum_x.z = fc_momentum_x[nb + 2*nelr]; fc_nb_momentum_y.x = fc_momentum_y[nb + 0*nelr]; fc_nb_momentum_y.y = fc_momentum_y[nb + 1*nelr]; fc_nb_momentum_y.z = fc_momentum_y[nb + 2*nelr]; fc_nb_momentum_z.x = fc_momentum_z[nb + 0*nelr]; fc_nb_momentum_z.y = fc_momentum_z[nb + 1*nelr]; fc_nb_momentum_z.z = fc_momentum_z[nb + 2*nelr]; fc_nb_density_energy.x = fc_density_energy[nb + 0*nelr]; fc_nb_density_energy.y = fc_density_energy[nb + 1*nelr]; fc_nb_density_energy.z = fc_density_energy[nb + 2*nelr]; // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(fc_nb_density_energy.x+fc_i_density_energy.x); flux_i_momentum.x += factor*(fc_nb_momentum_x.x+fc_i_momentum_x.x); flux_i_momentum.y += factor*(fc_nb_momentum_y.x+fc_i_momentum_y.x); flux_i_momentum.z += factor*(fc_nb_momentum_z.x+fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(fc_nb_density_energy.y+fc_i_density_energy.y); flux_i_momentum.x += factor*(fc_nb_momentum_x.y+fc_i_momentum_x.y); flux_i_momentum.y += factor*(fc_nb_momentum_y.y+fc_i_momentum_y.y); flux_i_momentum.z += factor*(fc_nb_momentum_z.y+fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(fc_nb_density_energy.z+fc_i_density_energy.z); flux_i_momentum.x += factor*(fc_nb_momentum_x.z+fc_i_momentum_x.z); flux_i_momentum.y += factor*(fc_nb_momentum_y.z+fc_i_momentum_y.z); flux_i_momentum.z += factor*(fc_nb_momentum_z.z+fc_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_fc_density_energy[0].x+fc_i_density_energy.x); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].x + fc_i_momentum_x.x); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].x + fc_i_momentum_y.x); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].x + fc_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_fc_density_energy[0].y+fc_i_density_energy.y); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].y + fc_i_momentum_x.y); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].y + fc_i_momentum_y.y); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].y + fc_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_fc_density_energy[0].z+fc_i_density_energy.z); flux_i_momentum.x += factor*(ff_fc_momentum_x[0].z + fc_i_momentum_x.z); flux_i_momentum.y += factor*(ff_fc_momentum_y[0].z + fc_i_momentum_y.z); flux_i_momentum.z += factor*(ff_fc_momentum_z[0].z + fc_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fc_momentum_x, float* fc_momentum_y, float* fc_momentum_z, float* fc_density_energy, float* fluxes) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", cudaGetErrorString(error)); exit(-1); } } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { cudaError_t error; dim3 Dg(nelr / block_length), Db(block_length); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s update failed\n", cudaGetErrorString(error)); exit(-1); } } /* * Main function */ int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; cudaDeviceProp prop; int dev; checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaGetDevice(&dev)); checkCudaErrors(cudaGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_fc_momentum_x; float3 h_ff_fc_momentum_y; float3 h_ff_fc_momentum_z; float3 h_ff_fc_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_fc_momentum_x, h_ff_fc_momentum_y, h_ff_fc_momentum_z, h_ff_fc_density_energy); // copy far field conditions to the gpu checkCudaErrors( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_x, &h_ff_fc_momentum_x, sizeof(float3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_y, &h_ff_fc_momentum_y, sizeof(float3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_momentum_z, &h_ff_fc_momentum_z, sizeof(float3)) ); checkCudaErrors( cudaMemcpyToSymbol(ff_fc_density_energy, &h_ff_fc_density_energy, sizeof(float3)) ); } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); float* fc_momentum_x = alloc<float>(nelr*NDIM); float* fc_momentum_y = alloc<float>(nelr*NDIM); float* fc_momentum_z = alloc<float>(nelr*NDIM); float* fc_density_energy = alloc<float>(nelr*NDIM); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaThreadSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "Starting..." << std::endl; StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); cudaError_t error; // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_step_factor failed\n", cudaGetErrorString(error)); exit(-1); } for(int j = 0; j < RK; j++) { compute_flux_contributions(nelr, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux_contributions failed\n", cudaGetErrorString(error)); exit(-1); } compute_flux(nelr, elements_surrounding_elements, normals, variables, fc_momentum_x, fc_momentum_y, fc_momentum_z, fc_density_energy, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s compute_flux failed\n", cudaGetErrorString(error)); exit(-1); } time_step(j, nelr, old_variables, variables, step_factors, fluxes); error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr,"GPUassert: %s time_step\n", cudaGetErrorString(error)); exit(-1); } } } cudaThreadSynchronize(); sdkStopTimer(&timer); std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "Saving solution..." << std::endl; dump(variables, nel, nelr); std::cout << "Saved solution..." << std::endl; std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); dealloc<float>(fc_momentum_x); dealloc<float>(fc_momentum_y); dealloc<float>(fc_momentum_z); dealloc<float>(fc_density_energy); std::cout << "Done..." << std::endl; return 0; }
15a4e5951418ec38529abcec83119de30358a3db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. */ #include <stdlib.h> #include "omp_repair.h" #include "book.h" const long num_steps = 100000000; const int nbBlocks = 32; const int nbThreadPerBlock = 1024; __global__ void kernel(double step, double * GPU) { int tid = threadIdx.x + blockIdx.x * blockDim.x, i; double result = 0.0, x; for (i = tid; i <= num_steps ; i += nbThreadPerBlock * nbBlocks) { x = (i-0.5)*step; result = result + 4.0/(1.0+x*x); } // Affectation du resultat d'un seul thread GPU[tid] = result; } int main () { double pi, start_time, run_time, step, final = 0, * dev_Sum, * sum; sum = (double*) malloc((nbThreadPerBlock * nbBlocks) * sizeof(double)); step = 1.0/(double) num_steps; start_time = omp_get_wtime(); HANDLE_ERROR(hipMalloc((void**)&dev_Sum, sizeof(double) * (nbBlocks * nbThreadPerBlock))); hipLaunchKernelGGL(( kernel), dim3(nbBlocks), dim3(nbThreadPerBlock), 0, 0, step, dev_Sum); HANDLE_ERROR(hipMemcpy(sum, dev_Sum, sizeof(double) * (nbBlocks * nbThreadPerBlock), hipMemcpyDeviceToHost)); for (int i = 0; i < nbBlocks * nbThreadPerBlock; i++) { final += sum[i]; } pi = step * final; run_time = omp_get_wtime() - start_time; printf("\n pi with %ld steps is %lf in %lf seconds\n ",num_steps,pi,run_time); }
15a4e5951418ec38529abcec83119de30358a3db.cu
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. */ #include <stdlib.h> #include "omp_repair.h" #include "book.h" const long num_steps = 100000000; const int nbBlocks = 32; const int nbThreadPerBlock = 1024; __global__ void kernel(double step, double * GPU) { int tid = threadIdx.x + blockIdx.x * blockDim.x, i; double result = 0.0, x; for (i = tid; i <= num_steps ; i += nbThreadPerBlock * nbBlocks) { x = (i-0.5)*step; result = result + 4.0/(1.0+x*x); } // Affectation du resultat d'un seul thread GPU[tid] = result; } int main () { double pi, start_time, run_time, step, final = 0, * dev_Sum, * sum; sum = (double*) malloc((nbThreadPerBlock * nbBlocks) * sizeof(double)); step = 1.0/(double) num_steps; start_time = omp_get_wtime(); HANDLE_ERROR(cudaMalloc((void**)&dev_Sum, sizeof(double) * (nbBlocks * nbThreadPerBlock))); kernel<<<nbBlocks, nbThreadPerBlock>>>(step, dev_Sum); HANDLE_ERROR(cudaMemcpy(sum, dev_Sum, sizeof(double) * (nbBlocks * nbThreadPerBlock), cudaMemcpyDeviceToHost)); for (int i = 0; i < nbBlocks * nbThreadPerBlock; i++) { final += sum[i]; } pi = step * final; run_time = omp_get_wtime() - start_time; printf("\n pi with %ld steps is %lf in %lf seconds\n ",num_steps,pi,run_time); }
a01ece8fb969beee687f6be04664e178b6b659c5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdexcept> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/remove.h> #include <thrust/sequence.h> #include <thrust/gather.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "custring_view.cuh" // create a new instance containing only the strings at the specified positions // position values can be in any order and can even be repeated NVStrings* NVStrings::gather( const int* pos, unsigned int elems, bool bdevmem ) { unsigned int count = size(); if( count==0 || elems==0 || pos==0 ) return new NVStrings(0); auto execpol = rmm::exec_policy(0); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory RMM_ALLOC((void**)&d_pos,elems*sizeof(int),0); hipMemcpy((void*)d_pos,pos,elems*sizeof(int),hipMemcpyHostToDevice); } // get individual sizes rmm::device_vector<long> sizes(elems,0); long* d_sizes = sizes.data().get(); custring_view** d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elems, [d_strings, d_pos, count, d_sizes] __device__(unsigned int idx){ int pos = d_pos[idx]; if( (pos < 0) || (pos >= count) ) { d_sizes[idx] = -1; return; } custring_view* dstr = d_strings[pos]; if( dstr ) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // check for any out-of-range values long* first = thrust::min_element(execpol->on(0),d_sizes,d_sizes+elems); long hfirst = 0; hipMemcpy(&hfirst,first,sizeof(long),hipMemcpyDeviceToHost); if( hfirst < 0 ) { if( !bdevmem ) RMM_FREE((void*)d_pos,0); throw std::out_of_range("nvs.gather position value out of range"); } // create output object NVStrings* rtn = new NVStrings(elems); char* d_buffer = rtn->pImpl->createMemoryFor((size_t*)d_sizes); if( d_buffer ) // if all values are not null { // create offsets rmm::device_vector<size_t> offsets(elems,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // copy strings custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elems, [d_strings, d_buffer, d_offsets, d_pos, count, d_results] __device__(unsigned int idx){ int pos = d_pos[idx]; //if( (pos < 0) || (pos >= count) ) // return; -- should no longer happen custring_view* dstr = d_strings[pos]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); }); // //printCudaError(hipDeviceSynchronize(),"nvs-gather"); } if( !bdevmem ) RMM_FREE((void*)d_pos,0); return rtn; } NVStrings* NVStrings::sublist( unsigned int start, unsigned int end, int step ) { unsigned int count = size(); if( end > count ) end = count; if( start > count ) start = count; if( step==0 ) step = 1; if( start == end ) return new NVStrings(0); if( ((step > 0) && (start > end)) || ((step < 0) && (start < end)) ) return new NVStrings(0); unsigned int elems = (unsigned int)std::abs((int)(end-start)); unsigned int abs_step = (unsigned int)std::abs(step); elems = (elems + abs_step -1)/abs_step; // adjust for steps auto execpol = rmm::exec_policy(0); rmm::device_vector<int> indexes(elems); thrust::sequence(execpol->on(0),indexes.begin(),indexes.end(),(int)start,step); return gather(indexes.data().get(),elems,true); } // remove the specified strings and return a new instance NVStrings* NVStrings::remove_strings( const int* pos, unsigned int elems, bool bdevmem ) { unsigned int count = size(); if( count==0 || elems==0 || pos==0 ) return nullptr; // return copy of ourselves? auto execpol = rmm::exec_policy(0); int* dpos = nullptr; RMM_ALLOC(&dpos,elems*sizeof(unsigned int),0); if( bdevmem ) hipMemcpy((void*)dpos,pos,elems*sizeof(unsigned int),hipMemcpyDeviceToDevice); else hipMemcpy((void*)dpos,pos,elems*sizeof(unsigned int),hipMemcpyHostToDevice); // sort the position values thrust::sort(execpol->on(0),dpos,dpos+elems,thrust::greater<int>()); // also should remove duplicates int* nend = thrust::unique(execpol->on(0),dpos,dpos+elems,thrust::equal_to<int>()); elems = (unsigned int)(nend - dpos); if( count < elems ) { RMM_FREE(dpos,0); fprintf(stderr,"nvs.remove_strings: more positions (%u) specified than the number of strings (%u)\n",elems,count); return nullptr; } // build array to hold positions which are not to be removed by marking deleted positions with -1 rmm::device_vector<int> dnpos(count); thrust::sequence(execpol->on(0),dnpos.begin(),dnpos.end()); int* d_npos = dnpos.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elems, [dpos, d_npos, count] __device__ (unsigned int idx) { unsigned int pos = dpos[idx]; if( pos < count ) d_npos[pos] = -1; }); // now remove the positions marked with -1 int* dend = thrust::remove_if(execpol->on(0),d_npos,d_npos+count,[] __device__ (int val) { return val < 0; }); unsigned int newCount = (unsigned int)(dend-d_npos); // gather string pointers based on indexes in dnpos (new-positions) custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<custring_view*> newList(newCount,nullptr); // newList will hold custring_view_array d_newList = newList.data().get(); // all the remaining thrust::gather(execpol->on(0),d_npos,d_npos+newCount,d_strings,d_newList); // strings ptrs // get individual sizes for the new strings list rmm::device_vector<size_t> sizes(newCount,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), newCount, [d_newList, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_newList[idx]; if( dstr ) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // create output object NVStrings* rtn = new NVStrings(newCount); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { RMM_FREE(dpos,0); return rtn; } // create offsets rmm::device_vector<size_t> offsets(newCount,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // finally, copy the strings custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), newCount, [d_newList, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_newList[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); }); // //printCudaError(hipDeviceSynchronize(),"nvs-remove_strings"); RMM_FREE(dpos,0); return rtn; } // this now sorts the strings into a new instance; // a sorted strings list can improve performance by reducing divergence NVStrings* NVStrings::sort( sorttype stype, bool ascending, bool nullfirst ) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); // get the lengths so they can be sorted too rmm::device_vector<size_t> lengths(count,0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_lengths] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( dstr ) d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if( d_buffer==0 ) return rtn; // all are null so we are done // copy the pointers to temporary vector and sort them along with the alloc-lengths rmm::device_vector<custring_view*> sortvector(count,nullptr); custring_view_array d_sortvector = sortvector.data().get(); hipMemcpy(d_sortvector,d_strings,sizeof(custring_view*)*count,hipMemcpyDeviceToDevice); thrust::sort_by_key(execpol->on(0), d_sortvector, d_sortvector+count, d_lengths, [stype, ascending, nullfirst] __device__( custring_view*& lhs, custring_view*& rhs ) { if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // null < non-null // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // // create offsets from the sorted lengths rmm::device_vector<size_t> offsets(count,0); size_t* d_offsets = offsets.data().get(); thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin()); // gather the sorted results into the new memory custring_view_array d_results = rtn->pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_sortvector, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_sortvector[idx]; if( dstr ) { char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); } }); // return rtn; } // just provide the index order and leave the strings intact int NVStrings::order( sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice ) { unsigned int count = size(); unsigned int* d_indexes = indexes; auto execpol = rmm::exec_policy(0); if( !todevice ) RMM_ALLOC(&d_indexes,count*sizeof(unsigned int),0); thrust::sequence(execpol->on(0), d_indexes, d_indexes+count); // custring_view_array d_strings = pImpl->getStringsPtr(); thrust::sort(execpol->on(0), d_indexes, d_indexes+count, [d_strings, stype, ascending, nullfirst] __device__( unsigned int& lidx, unsigned int& ridx ) { custring_view* lhs = d_strings[lidx]; custring_view* rhs = d_strings[ridx]; if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // if( !todevice ) { hipMemcpy(indexes,d_indexes,count*sizeof(unsigned int),hipMemcpyDeviceToHost); RMM_FREE(d_indexes,0); } return 0; }
a01ece8fb969beee687f6be04664e178b6b659c5.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdexcept> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/remove.h> #include <thrust/sequence.h> #include <thrust/gather.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include "NVStrings.h" #include "NVStringsImpl.h" #include "custring_view.cuh" // create a new instance containing only the strings at the specified positions // position values can be in any order and can even be repeated NVStrings* NVStrings::gather( const int* pos, unsigned int elems, bool bdevmem ) { unsigned int count = size(); if( count==0 || elems==0 || pos==0 ) return new NVStrings(0); auto execpol = rmm::exec_policy(0); const int* d_pos = pos; if( !bdevmem ) { // copy indexes to device memory RMM_ALLOC((void**)&d_pos,elems*sizeof(int),0); cudaMemcpy((void*)d_pos,pos,elems*sizeof(int),cudaMemcpyHostToDevice); } // get individual sizes rmm::device_vector<long> sizes(elems,0); long* d_sizes = sizes.data().get(); custring_view** d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elems, [d_strings, d_pos, count, d_sizes] __device__(unsigned int idx){ int pos = d_pos[idx]; if( (pos < 0) || (pos >= count) ) { d_sizes[idx] = -1; return; } custring_view* dstr = d_strings[pos]; if( dstr ) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // check for any out-of-range values long* first = thrust::min_element(execpol->on(0),d_sizes,d_sizes+elems); long hfirst = 0; cudaMemcpy(&hfirst,first,sizeof(long),cudaMemcpyDeviceToHost); if( hfirst < 0 ) { if( !bdevmem ) RMM_FREE((void*)d_pos,0); throw std::out_of_range("nvs.gather position value out of range"); } // create output object NVStrings* rtn = new NVStrings(elems); char* d_buffer = rtn->pImpl->createMemoryFor((size_t*)d_sizes); if( d_buffer ) // if all values are not null { // create offsets rmm::device_vector<size_t> offsets(elems,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // copy strings custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elems, [d_strings, d_buffer, d_offsets, d_pos, count, d_results] __device__(unsigned int idx){ int pos = d_pos[idx]; //if( (pos < 0) || (pos >= count) ) // return; -- should no longer happen custring_view* dstr = d_strings[pos]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); }); // //printCudaError(cudaDeviceSynchronize(),"nvs-gather"); } if( !bdevmem ) RMM_FREE((void*)d_pos,0); return rtn; } NVStrings* NVStrings::sublist( unsigned int start, unsigned int end, int step ) { unsigned int count = size(); if( end > count ) end = count; if( start > count ) start = count; if( step==0 ) step = 1; if( start == end ) return new NVStrings(0); if( ((step > 0) && (start > end)) || ((step < 0) && (start < end)) ) return new NVStrings(0); unsigned int elems = (unsigned int)std::abs((int)(end-start)); unsigned int abs_step = (unsigned int)std::abs(step); elems = (elems + abs_step -1)/abs_step; // adjust for steps auto execpol = rmm::exec_policy(0); rmm::device_vector<int> indexes(elems); thrust::sequence(execpol->on(0),indexes.begin(),indexes.end(),(int)start,step); return gather(indexes.data().get(),elems,true); } // remove the specified strings and return a new instance NVStrings* NVStrings::remove_strings( const int* pos, unsigned int elems, bool bdevmem ) { unsigned int count = size(); if( count==0 || elems==0 || pos==0 ) return nullptr; // return copy of ourselves? auto execpol = rmm::exec_policy(0); int* dpos = nullptr; RMM_ALLOC(&dpos,elems*sizeof(unsigned int),0); if( bdevmem ) cudaMemcpy((void*)dpos,pos,elems*sizeof(unsigned int),cudaMemcpyDeviceToDevice); else cudaMemcpy((void*)dpos,pos,elems*sizeof(unsigned int),cudaMemcpyHostToDevice); // sort the position values thrust::sort(execpol->on(0),dpos,dpos+elems,thrust::greater<int>()); // also should remove duplicates int* nend = thrust::unique(execpol->on(0),dpos,dpos+elems,thrust::equal_to<int>()); elems = (unsigned int)(nend - dpos); if( count < elems ) { RMM_FREE(dpos,0); fprintf(stderr,"nvs.remove_strings: more positions (%u) specified than the number of strings (%u)\n",elems,count); return nullptr; } // build array to hold positions which are not to be removed by marking deleted positions with -1 rmm::device_vector<int> dnpos(count); thrust::sequence(execpol->on(0),dnpos.begin(),dnpos.end()); int* d_npos = dnpos.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), elems, [dpos, d_npos, count] __device__ (unsigned int idx) { unsigned int pos = dpos[idx]; if( pos < count ) d_npos[pos] = -1; }); // now remove the positions marked with -1 int* dend = thrust::remove_if(execpol->on(0),d_npos,d_npos+count,[] __device__ (int val) { return val < 0; }); unsigned int newCount = (unsigned int)(dend-d_npos); // gather string pointers based on indexes in dnpos (new-positions) custring_view** d_strings = pImpl->getStringsPtr(); rmm::device_vector<custring_view*> newList(newCount,nullptr); // newList will hold custring_view_array d_newList = newList.data().get(); // all the remaining thrust::gather(execpol->on(0),d_npos,d_npos+newCount,d_strings,d_newList); // strings ptrs // get individual sizes for the new strings list rmm::device_vector<size_t> sizes(newCount,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), newCount, [d_newList, d_sizes] __device__(unsigned int idx){ custring_view* dstr = d_newList[idx]; if( dstr ) d_sizes[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // create output object NVStrings* rtn = new NVStrings(newCount); char* d_buffer = rtn->pImpl->createMemoryFor(d_sizes); if( d_buffer==0 ) { RMM_FREE(dpos,0); return rtn; } // create offsets rmm::device_vector<size_t> offsets(newCount,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // finally, copy the strings custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), newCount, [d_newList, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_newList[idx]; if( !dstr ) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); }); // //printCudaError(cudaDeviceSynchronize(),"nvs-remove_strings"); RMM_FREE(dpos,0); return rtn; } // this now sorts the strings into a new instance; // a sorted strings list can improve performance by reducing divergence NVStrings* NVStrings::sort( sorttype stype, bool ascending, bool nullfirst ) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); // get the lengths so they can be sorted too rmm::device_vector<size_t> lengths(count,0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_lengths] __device__(unsigned int idx){ custring_view* dstr = d_strings[idx]; if( dstr ) d_lengths[idx] = ALIGN_SIZE(dstr->alloc_size()); }); // NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if( d_buffer==0 ) return rtn; // all are null so we are done // copy the pointers to temporary vector and sort them along with the alloc-lengths rmm::device_vector<custring_view*> sortvector(count,nullptr); custring_view_array d_sortvector = sortvector.data().get(); cudaMemcpy(d_sortvector,d_strings,sizeof(custring_view*)*count,cudaMemcpyDeviceToDevice); thrust::sort_by_key(execpol->on(0), d_sortvector, d_sortvector+count, d_lengths, [stype, ascending, nullfirst] __device__( custring_view*& lhs, custring_view*& rhs ) { if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // null < non-null // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // // create offsets from the sorted lengths rmm::device_vector<size_t> offsets(count,0); size_t* d_offsets = offsets.data().get(); thrust::exclusive_scan(execpol->on(0),lengths.begin(),lengths.end(),offsets.begin()); // gather the sorted results into the new memory custring_view_array d_results = rtn->pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_sortvector, d_buffer, d_offsets, d_results] __device__(unsigned int idx){ custring_view* dstr = d_sortvector[idx]; if( dstr ) { char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = custring_view::create_from(buffer,*dstr); } }); // return rtn; } // just provide the index order and leave the strings intact int NVStrings::order( sorttype stype, bool ascending, unsigned int* indexes, bool nullfirst, bool todevice ) { unsigned int count = size(); unsigned int* d_indexes = indexes; auto execpol = rmm::exec_policy(0); if( !todevice ) RMM_ALLOC(&d_indexes,count*sizeof(unsigned int),0); thrust::sequence(execpol->on(0), d_indexes, d_indexes+count); // custring_view_array d_strings = pImpl->getStringsPtr(); thrust::sort(execpol->on(0), d_indexes, d_indexes+count, [d_strings, stype, ascending, nullfirst] __device__( unsigned int& lidx, unsigned int& ridx ) { custring_view* lhs = d_strings[lidx]; custring_view* rhs = d_strings[ridx]; if( lhs==0 || rhs==0 ) return (nullfirst ? rhs!=0 : lhs!=0); // allow sorting by name and length int diff = 0; if( stype & NVStrings::length ) diff = lhs->size() - rhs->size(); if( diff==0 && (stype & NVStrings::name) ) diff = lhs->compare(*rhs); return (ascending ? (diff < 0) : (diff > 0)); }); // if( !todevice ) { cudaMemcpy(indexes,d_indexes,count*sizeof(unsigned int),cudaMemcpyDeviceToHost); RMM_FREE(d_indexes,0); } return 0; }
4976c4b39938b6bafbe8368fd11baf6993435ac1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/reader/nvdecoder/imgproc.h" #include <hip/hip_fp16.h> namespace dali { namespace { // using math from https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx template<typename T> struct YCbCr { T y, cb, cr; }; // https://docs.microsoft.com/en-gb/windows/desktop/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888 __constant__ float ycbcr2rgb_mat_norm[9] = { 1.164383f, 0.0f, 1.596027f, 1.164383f, -0.391762f, -0.812968f, 1.164383f, 2.017232f, 0.0f }; // not normalized need *255 __constant__ float ycbcr2rgb_mat[9] = { 1.164383f * 255.0f, 0.0f, 1.596027f * 255.0f, 1.164383f * 255.0f, -0.391762f * 255.0f, -0.812968f * 255.0f, 1.164383f * 255.0f, 2.017232f * 255.0f, 0.0f }; // https://en.wikipedia.org/wiki/YUV#Y%E2%80%B2UV444_to_RGB888_conversion __constant__ float ycbcr2rgb_mat_norm_full_range[9] = { 1, 0.0f, 1.402f, 1, -0.344136285f, -0.714136285f, 1, 1.772f, 0.0f }; // not normalized need *255 __constant__ float ycbcr2rgb_mat_full_range[9] = { 1 * 255, 0.0f, 1.402f * 255, 1 * 255, -0.344136285f * 255, -0.714136285f * 255, 1 * 255, 1.772f * 255, 0.0f }; __device__ float clip(float x, float max) { return fminf(fmaxf(x, 0.0f), max); } template<typename T> __device__ T convert(const float x) { return static_cast<T>(x); } #if 0 template<> __device__ half convert<half>(const float x) { return __float2half(x); } template<> __device__ uint8_t convert<uint8_t>(const float x) { return static_cast<uint8_t>(roundf(x)); } #endif template<typename YCbCr_T, typename RGB_T, bool Normalized = false> __device__ void ycbcr2rgb(const YCbCr<YCbCr_T>& ycbcr, RGB_T* rgb, size_t stride) { auto y = (static_cast<float>(ycbcr.y) - 16.0f/255.0f); auto cb = (static_cast<float>(ycbcr.cb) - 128.0f/255.0f); auto cr = (static_cast<float>(ycbcr.cr) - 128.0f/255.0f); float r, g, b; if (Normalized) { auto& m = ycbcr2rgb_mat_norm; r = clip(y*m[0] + cb*m[1] + cr*m[2], 1.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 1.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 1.0f); } else { auto& m = ycbcr2rgb_mat; r = clip(y*m[0] + cb*m[1] + cr*m[2], 255.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 255.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 255.0f); } rgb[0] = convert<RGB_T>(r); rgb[stride] = convert<RGB_T>(g); rgb[stride*2] = convert<RGB_T>(b); } template<typename YCbCr_T, typename RGB_T, bool Normalized = false> __device__ void ycbcr2rgb_full_range(const YCbCr<YCbCr_T>& ycbcr, RGB_T* rgb, size_t stride) { auto y = (static_cast<float>(ycbcr.y)); auto cb = (static_cast<float>(ycbcr.cb) - 128.0f/255.0f); auto cr = (static_cast<float>(ycbcr.cr) - 128.0f/255.0f); float r, g, b; if (Normalized) { auto& m = ycbcr2rgb_mat_norm_full_range; r = clip(y*m[0] + cb*m[1] + cr*m[2], 1.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 1.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 1.0f); } else { auto& m = ycbcr2rgb_mat_full_range; r = clip(y*m[0] + cb*m[1] + cr*m[2], 255.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 255.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 255.0f); } rgb[0] = convert<RGB_T>(r); rgb[stride] = convert<RGB_T>(g); rgb[stride*2] = convert<RGB_T>(b); } template<typename T, bool Normalized = false, bool RGB = true, bool FullRange = false> __global__ void process_frame_kernel( hipTextureObject_t luma, hipTextureObject_t chroma, T* dst, int index, float fx, float fy, int dst_width, int dst_height, int c) { const int dst_x = blockIdx.x * blockDim.x + threadIdx.x; const int dst_y = blockIdx.y * blockDim.y + threadIdx.y; if (dst_x >= dst_width || dst_y >= dst_height) return; auto src_x = 0.0f; // TODO(spanev) something less hacky here, why 4:2:0 fails on this edge? float shift = (dst_x == dst_width - 1) ? 0 : 0.5f; src_x = static_cast<float>(dst_x) * fx + shift; auto src_y = static_cast<float>(dst_y) * fy + shift; // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#tex2d-object YCbCr<float> ycbcr; ycbcr.y = tex2D<float>(luma, src_x, src_y); auto cbcr = tex2D<float2>(chroma, src_x * 0.5f, src_y * 0.5f); ycbcr.cb = cbcr.x; ycbcr.cr = cbcr.y; auto* out = &dst[(dst_x + dst_y * dst_width) * c]; constexpr size_t stride = 1; if (RGB) { if (FullRange) { ycbcr2rgb_full_range<float, T, Normalized>(ycbcr, out, stride); } else { ycbcr2rgb<float, T, Normalized>(ycbcr, out, stride); } } else { constexpr float scaling = Normalized ? 1.0f : 255.0f; out[0] = convert<T>(ycbcr.y * scaling); out[stride] = convert<T>(ycbcr.cb * scaling); out[stride*2] = convert<T>(ycbcr.cr * scaling); } } inline constexpr int divUp(int total, int grain) { return (total + grain - 1) / grain; } } // namespace template<typename T> void process_frame( hipTextureObject_t chroma, hipTextureObject_t luma, SequenceWrapper& output, int index, hipStream_t stream, uint16_t input_width, uint16_t input_height, bool rgb, bool normalized, bool full_range) { auto scale_width = input_width; auto scale_height = input_height; auto fx = static_cast<float>(input_width) / scale_width; auto fy = static_cast<float>(input_height) / scale_height; dim3 block(32, 8); dim3 grid(divUp(output.width, block.x), divUp(output.height, block.y)); int frame_stride = index * output.height * output.width * output.channels; LOG_LINE << "Processing frame " << index << " (frame_stride=" << frame_stride << ")" << std::endl; auto* tensor_out = output.sequence.mutable_data<T>() + frame_stride; if (normalized) { if (rgb) { if (full_range) { hipLaunchKernelGGL(( process_frame_kernel<T, true, true, true>), dim3(grid), dim3(block), 0, stream, luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } else { hipLaunchKernelGGL(( process_frame_kernel<T, true, true, false>), dim3(grid), dim3(block), 0, stream, luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } else { hipLaunchKernelGGL(( process_frame_kernel<T, true, false>), dim3(grid), dim3(block), 0, stream, luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } else { if (rgb) { if (full_range) { hipLaunchKernelGGL(( process_frame_kernel<T, false, true, true>), dim3(grid), dim3(block), 0, stream, luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } else { hipLaunchKernelGGL(( process_frame_kernel<T, false, true, false>), dim3(grid), dim3(block), 0, stream, luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } else { hipLaunchKernelGGL(( process_frame_kernel<T, false, false>), dim3(grid), dim3(block), 0, stream, luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } } template void process_frame<float>( hipTextureObject_t chroma, hipTextureObject_t luma, SequenceWrapper& output, int index, hipStream_t stream, uint16_t input_width, uint16_t input_height, bool rgb, bool normalized, bool full_range); template void process_frame<uint8_t>( hipTextureObject_t chroma, hipTextureObject_t luma, SequenceWrapper& output, int index, hipStream_t stream, uint16_t input_width, uint16_t input_height, bool rgb, bool normalized, bool full_range); } // namespace dali
4976c4b39938b6bafbe8368fd11baf6993435ac1.cu
// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/operators/reader/nvdecoder/imgproc.h" #include <cuda_fp16.h> namespace dali { namespace { // using math from https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx template<typename T> struct YCbCr { T y, cb, cr; }; // https://docs.microsoft.com/en-gb/windows/desktop/medfound/recommended-8-bit-yuv-formats-for-video-rendering#converting-8-bit-yuv-to-rgb888 __constant__ float ycbcr2rgb_mat_norm[9] = { 1.164383f, 0.0f, 1.596027f, 1.164383f, -0.391762f, -0.812968f, 1.164383f, 2.017232f, 0.0f }; // not normalized need *255 __constant__ float ycbcr2rgb_mat[9] = { 1.164383f * 255.0f, 0.0f, 1.596027f * 255.0f, 1.164383f * 255.0f, -0.391762f * 255.0f, -0.812968f * 255.0f, 1.164383f * 255.0f, 2.017232f * 255.0f, 0.0f }; // https://en.wikipedia.org/wiki/YUV#Y%E2%80%B2UV444_to_RGB888_conversion __constant__ float ycbcr2rgb_mat_norm_full_range[9] = { 1, 0.0f, 1.402f, 1, -0.344136285f, -0.714136285f, 1, 1.772f, 0.0f }; // not normalized need *255 __constant__ float ycbcr2rgb_mat_full_range[9] = { 1 * 255, 0.0f, 1.402f * 255, 1 * 255, -0.344136285f * 255, -0.714136285f * 255, 1 * 255, 1.772f * 255, 0.0f }; __device__ float clip(float x, float max) { return fminf(fmaxf(x, 0.0f), max); } template<typename T> __device__ T convert(const float x) { return static_cast<T>(x); } #if 0 template<> __device__ half convert<half>(const float x) { return __float2half(x); } template<> __device__ uint8_t convert<uint8_t>(const float x) { return static_cast<uint8_t>(roundf(x)); } #endif template<typename YCbCr_T, typename RGB_T, bool Normalized = false> __device__ void ycbcr2rgb(const YCbCr<YCbCr_T>& ycbcr, RGB_T* rgb, size_t stride) { auto y = (static_cast<float>(ycbcr.y) - 16.0f/255.0f); auto cb = (static_cast<float>(ycbcr.cb) - 128.0f/255.0f); auto cr = (static_cast<float>(ycbcr.cr) - 128.0f/255.0f); float r, g, b; if (Normalized) { auto& m = ycbcr2rgb_mat_norm; r = clip(y*m[0] + cb*m[1] + cr*m[2], 1.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 1.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 1.0f); } else { auto& m = ycbcr2rgb_mat; r = clip(y*m[0] + cb*m[1] + cr*m[2], 255.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 255.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 255.0f); } rgb[0] = convert<RGB_T>(r); rgb[stride] = convert<RGB_T>(g); rgb[stride*2] = convert<RGB_T>(b); } template<typename YCbCr_T, typename RGB_T, bool Normalized = false> __device__ void ycbcr2rgb_full_range(const YCbCr<YCbCr_T>& ycbcr, RGB_T* rgb, size_t stride) { auto y = (static_cast<float>(ycbcr.y)); auto cb = (static_cast<float>(ycbcr.cb) - 128.0f/255.0f); auto cr = (static_cast<float>(ycbcr.cr) - 128.0f/255.0f); float r, g, b; if (Normalized) { auto& m = ycbcr2rgb_mat_norm_full_range; r = clip(y*m[0] + cb*m[1] + cr*m[2], 1.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 1.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 1.0f); } else { auto& m = ycbcr2rgb_mat_full_range; r = clip(y*m[0] + cb*m[1] + cr*m[2], 255.0f); g = clip(y*m[3] + cb*m[4] + cr*m[5], 255.0f); b = clip(y*m[6] + cb*m[7] + cr*m[8], 255.0f); } rgb[0] = convert<RGB_T>(r); rgb[stride] = convert<RGB_T>(g); rgb[stride*2] = convert<RGB_T>(b); } template<typename T, bool Normalized = false, bool RGB = true, bool FullRange = false> __global__ void process_frame_kernel( cudaTextureObject_t luma, cudaTextureObject_t chroma, T* dst, int index, float fx, float fy, int dst_width, int dst_height, int c) { const int dst_x = blockIdx.x * blockDim.x + threadIdx.x; const int dst_y = blockIdx.y * blockDim.y + threadIdx.y; if (dst_x >= dst_width || dst_y >= dst_height) return; auto src_x = 0.0f; // TODO(spanev) something less hacky here, why 4:2:0 fails on this edge? float shift = (dst_x == dst_width - 1) ? 0 : 0.5f; src_x = static_cast<float>(dst_x) * fx + shift; auto src_y = static_cast<float>(dst_y) * fy + shift; // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#tex2d-object YCbCr<float> ycbcr; ycbcr.y = tex2D<float>(luma, src_x, src_y); auto cbcr = tex2D<float2>(chroma, src_x * 0.5f, src_y * 0.5f); ycbcr.cb = cbcr.x; ycbcr.cr = cbcr.y; auto* out = &dst[(dst_x + dst_y * dst_width) * c]; constexpr size_t stride = 1; if (RGB) { if (FullRange) { ycbcr2rgb_full_range<float, T, Normalized>(ycbcr, out, stride); } else { ycbcr2rgb<float, T, Normalized>(ycbcr, out, stride); } } else { constexpr float scaling = Normalized ? 1.0f : 255.0f; out[0] = convert<T>(ycbcr.y * scaling); out[stride] = convert<T>(ycbcr.cb * scaling); out[stride*2] = convert<T>(ycbcr.cr * scaling); } } inline constexpr int divUp(int total, int grain) { return (total + grain - 1) / grain; } } // namespace template<typename T> void process_frame( cudaTextureObject_t chroma, cudaTextureObject_t luma, SequenceWrapper& output, int index, cudaStream_t stream, uint16_t input_width, uint16_t input_height, bool rgb, bool normalized, bool full_range) { auto scale_width = input_width; auto scale_height = input_height; auto fx = static_cast<float>(input_width) / scale_width; auto fy = static_cast<float>(input_height) / scale_height; dim3 block(32, 8); dim3 grid(divUp(output.width, block.x), divUp(output.height, block.y)); int frame_stride = index * output.height * output.width * output.channels; LOG_LINE << "Processing frame " << index << " (frame_stride=" << frame_stride << ")" << std::endl; auto* tensor_out = output.sequence.mutable_data<T>() + frame_stride; if (normalized) { if (rgb) { if (full_range) { process_frame_kernel<T, true, true, true><<<grid, block, 0, stream>>> (luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } else { process_frame_kernel<T, true, true, false><<<grid, block, 0, stream>>> (luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } else { process_frame_kernel<T, true, false><<<grid, block, 0, stream>>> (luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } else { if (rgb) { if (full_range) { process_frame_kernel<T, false, true, true><<<grid, block, 0, stream>>> (luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } else { process_frame_kernel<T, false, true, false><<<grid, block, 0, stream>>> (luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } else { process_frame_kernel<T, false, false><<<grid, block, 0, stream>>> (luma, chroma, tensor_out, index, fx, fy, output.width, output.height, output.channels); } } } template void process_frame<float>( cudaTextureObject_t chroma, cudaTextureObject_t luma, SequenceWrapper& output, int index, cudaStream_t stream, uint16_t input_width, uint16_t input_height, bool rgb, bool normalized, bool full_range); template void process_frame<uint8_t>( cudaTextureObject_t chroma, cudaTextureObject_t luma, SequenceWrapper& output, int index, cudaStream_t stream, uint16_t input_width, uint16_t input_height, bool rgb, bool normalized, bool full_range); } // namespace dali
6c7c0efe99dfd7099c8cc8ace18e3e56d240912f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <rocblas.h> #include <iostream> #include <algorithm> #include <time.h> //__device__ void swap(int i, int j) { // float t; // float *a=new float[]; // t = a[i]; // a[i] = a[j]; // a[j] = t; //} __device__ void insertion_sort(float* knnqueue, int* knnqueueind, int qpitch, int k, int threadId){ int n, o; float var; for (o = threadId; o < k*qpitch; o += qpitch){ n = o; while (n > threadId){ if (knnqueue[n] >knnqueue[n - qpitch]){ var = knnqueue[n]; knnqueue[n] = knnqueue[n - qpitch]; knnqueue[n - qpitch] = var; } n -= qpitch; } } } __global__ void selection_of_knn(float *distlist, int *indlist, int listpitch, int k, int numofpoint, int refdimofpoint, float *knnqueue, int *knnqueueind, int qpitch, int m, volatile int result) //the visibility of memory operations on the result variable is ensured by declaring it as volatile //From Nvidia documentation. { int thx; int queueinit = 0; int mval = m; float locmax; int j = 0; int varpitch = qpitch; __shared__ int flags[16]; volatile int *flag = &flags[threadIdx.x / 32]; int b, c;//divfact for the dividing the sequence in the second bitonic sort step, //initialization to 2 for the second stage of sorting as we devide the sequence into two lists and so on float var, var1; int var2 = 0; //for bitonic diffrent stage sorting int move = mval / 2; thx = threadIdx.x + blockIdx.x*blockDim.x; if (thx < numofpoint){ //queue initialization if (queueinit == 0){ while (j < k){ knnqueue[j*qpitch + thx] = distlist[j*listpitch + thx]; //printf("knnqueueu: %d %f\n", thx, knnqueue[j*qpitch + thx]); j++; } insertion_sort(knnqueue, 0, qpitch, k, thx); queueinit = 1; } //insertion in the first level m //locmax is the first element in the queue locmax = knnqueue[thx]; for (int i = k*listpitch + thx; i < refdimofpoint*listpitch + thx; i += listpitch){ //locmax = knnqueue[thx];//re_assigning the locmax to the head of the first level in the queue mval = m; move = mval / 2;//reinitializing mval ,move to deal with the remaining elemnets in the list if (distlist[i] < locmax){ knnqueue[thx] = distlist[i]; //insert to the first level m insertion_sort(knnqueue, 0, qpitch, m, thx); locmax = knnqueue[thx]; while (/*(locmax < knnqueue[mval*qpitch + thx])&&*/(mval <= k)){ flag[threadIdx.x / 32] = 0; if (locmax < knnqueue[mval*qpitch + thx]) { flag[threadIdx.x / 32] = 1; } if (flag[threadIdx.x / 32] == 0){ break; } //*flag = 1; //first bitonic sort step(two sorted list in decreasing order) for (int a = mval*qpitch + thx; (a<k*qpitch + thx) && (a < (2 * mval*qpitch) + thx); a += qpitch){ if (knnqueue[a] > knnqueue[a - varpitch]){ var = knnqueue[a]; knnqueue[a] = knnqueue[a - varpitch]; knnqueue[a - varpitch] = var; } //else{ // break;//to finish the first bitonic step at the size of the previous level in the queue //} varpitch += 2 * qpitch; }//end of for //next bitonic sort steps while (move >0){ for (c = 0; (c<(k*qpitch) + thx) && (c < (mval * 2 * qpitch) + thx); c += move * 2 * qpitch){ /*if (thx == 0) printf("move,localmax,mval,move*2,b: %d %f %d %d \n", move, locmax, mval, move * 2);*/ for (b = thx; (b<k*qpitch + thx) && (b < move*qpitch + thx); b += qpitch){ if ((knnqueue[b + c] < knnqueue[b + (move*qpitch) + c]) && (b + (move*qpitch) + c<k*qpitch + thx)){ var1 = knnqueue[b + c]; knnqueue[b + c] = knnqueue[b + (move*qpitch) + c]; knnqueue[b + (move*qpitch) + c] = var1; /*if (thx == 0){ for (int u = thx; u < k*qpitch; u += qpitch) printf("next bitonic: %d %f\n", thx, knnqueue[u]); printf("\n"); }*/ } /*if (knnqueue[b + mval*qpitch] < knnqueue[b + (mval + move)*qpitch]){ var2 = knnqueue[b + mval*qpitch]; knnqueue[b + mval*qpitch] = knnqueue[b + (mval + move)*qpitch]; knnqueue[b + (mval + move)*qpitch] = var2; }*/ } } move /= 2; }//end of while //to compare with the next level in the queue locmax = knnqueue[mval*qpitch + thx];//to ensure that the level heads are in decreasing order. /* if (thx==0) printf("locmax: %f\n", locmax);*/ mval = mval * 2; move = mval / 2; varpitch = qpitch; }//the end of bitonic sort process for merging levels } locmax = knnqueue[thx];//re_assigning the locmax to the head of the first level in the queue } //bitonic merge //if the head of the second level is less than that of the first one of size m }//end of thx<numofpoint //if (thx == 0){ // for (int y = thx; y < (k*qpitch) + thx; y += qpitch){ // printf("last result: %d %f\n", var2, knnqueue[y]); // var2 += 1; // } // printf("\n"); //} } int main(){ //testing width=8000; h=32000 k=64 hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int width = 8192;//query points int height = 32768;//ref points int k = 1024; size_t lpitch; size_t qpitch; hipError_t val; float *d_indistqueue; float *d_list; float *l_in = new float[height*width]; float *qh_in = new float[width*k]; //allocate cpu memor float *h_out = (float *)malloc(width*k*sizeof(float)); // generate the input array on the host/ for (int i = 0; i < width*height; i++) l_in[i] = (float)rand() / (float)RAND_MAX; //l_in[i] = width*height- i;//should be used for testing bitonic sort //float l_in[20] = {20,21,16,14,22,23,24,8,30,32,0,5,4,3,2,1,0,6,2,7}; val = hipMallocPitch((void **)& d_indistqueue, &qpitch, width*sizeof(float), k); val = hipMallocPitch((void **)&d_list, &lpitch, width*sizeof(float), height); if (val) printf("Memorypitch Error: %s\n", hipGetErrorString(val)); // transfer the array to the GPU hipMemcpy2D(d_indistqueue, qpitch, qh_in, width*sizeof(float), width*sizeof(float), k, hipMemcpyHostToDevice); hipMemcpy2D(d_list, lpitch, l_in, width*sizeof(float), width*sizeof(float), height, hipMemcpyHostToDevice); // launch the kernel dim3 Grid(width / 512 + 1, 1, 1); dim3 threads(512, 1); hipEventRecord(start, 0); selection_of_knn << <Grid, threads >> >(d_list, 0, lpitch / sizeof(float), k, width, height, d_indistqueue, 0, qpitch / sizeof(float), 8, 0); hipEventRecord(stop, 0); hipEventSynchronize(stop); //bitonic_finalize_col<<<1,1>>>(d_indistqueue,1,1,8); // // copy back the result array to the CPU hipMemcpy2D(h_out, width*sizeof(float), d_indistqueue, qpitch, width*sizeof(float), k, hipMemcpyDeviceToHost); //hipMemcpy(l_out, d_list, list_size*sizeof(int), hipMemcpyDeviceToHost); float milliseconds; hipEventElapsedTime(&milliseconds, start, stop); hipEventDestroy(start); hipEventDestroy(stop); // /*for (int j = 0; j < k*width; j++) printf("dequeue: %d %f\n", j, h_out[j]); */ printf("The required time: %f\n", milliseconds / 1000); hipFree(d_indistqueue); hipFree(d_list); return 0; }
6c7c0efe99dfd7099c8cc8ace18e3e56d240912f.cu
#include <cuda.h> #include <stdio.h> #include <cublas.h> #include <iostream> #include <algorithm> #include <time.h> //__device__ void swap(int i, int j) { // float t; // float *a=new float[]; // t = a[i]; // a[i] = a[j]; // a[j] = t; //} __device__ void insertion_sort(float* knnqueue, int* knnqueueind, int qpitch, int k, int threadId){ int n, o; float var; for (o = threadId; o < k*qpitch; o += qpitch){ n = o; while (n > threadId){ if (knnqueue[n] >knnqueue[n - qpitch]){ var = knnqueue[n]; knnqueue[n] = knnqueue[n - qpitch]; knnqueue[n - qpitch] = var; } n -= qpitch; } } } __global__ void selection_of_knn(float *distlist, int *indlist, int listpitch, int k, int numofpoint, int refdimofpoint, float *knnqueue, int *knnqueueind, int qpitch, int m, volatile int result) //the visibility of memory operations on the result variable is ensured by declaring it as volatile //From Nvidia documentation. { int thx; int queueinit = 0; int mval = m; float locmax; int j = 0; int varpitch = qpitch; __shared__ int flags[16]; volatile int *flag = &flags[threadIdx.x / 32]; int b, c;//divfact for the dividing the sequence in the second bitonic sort step, //initialization to 2 for the second stage of sorting as we devide the sequence into two lists and so on float var, var1; int var2 = 0; //for bitonic diffrent stage sorting int move = mval / 2; thx = threadIdx.x + blockIdx.x*blockDim.x; if (thx < numofpoint){ //queue initialization if (queueinit == 0){ while (j < k){ knnqueue[j*qpitch + thx] = distlist[j*listpitch + thx]; //printf("knnqueueu: %d %f\n", thx, knnqueue[j*qpitch + thx]); j++; } insertion_sort(knnqueue, 0, qpitch, k, thx); queueinit = 1; } //insertion in the first level m //locmax is the first element in the queue locmax = knnqueue[thx]; for (int i = k*listpitch + thx; i < refdimofpoint*listpitch + thx; i += listpitch){ //locmax = knnqueue[thx];//re_assigning the locmax to the head of the first level in the queue mval = m; move = mval / 2;//reinitializing mval ,move to deal with the remaining elemnets in the list if (distlist[i] < locmax){ knnqueue[thx] = distlist[i]; //insert to the first level m insertion_sort(knnqueue, 0, qpitch, m, thx); locmax = knnqueue[thx]; while (/*(locmax < knnqueue[mval*qpitch + thx])&&*/(mval <= k)){ flag[threadIdx.x / 32] = 0; if (locmax < knnqueue[mval*qpitch + thx]) { flag[threadIdx.x / 32] = 1; } if (flag[threadIdx.x / 32] == 0){ break; } //*flag = 1; //first bitonic sort step(two sorted list in decreasing order) for (int a = mval*qpitch + thx; (a<k*qpitch + thx) && (a < (2 * mval*qpitch) + thx); a += qpitch){ if (knnqueue[a] > knnqueue[a - varpitch]){ var = knnqueue[a]; knnqueue[a] = knnqueue[a - varpitch]; knnqueue[a - varpitch] = var; } //else{ // break;//to finish the first bitonic step at the size of the previous level in the queue //} varpitch += 2 * qpitch; }//end of for //next bitonic sort steps while (move >0){ for (c = 0; (c<(k*qpitch) + thx) && (c < (mval * 2 * qpitch) + thx); c += move * 2 * qpitch){ /*if (thx == 0) printf("move,localmax,mval,move*2,b: %d %f %d %d \n", move, locmax, mval, move * 2);*/ for (b = thx; (b<k*qpitch + thx) && (b < move*qpitch + thx); b += qpitch){ if ((knnqueue[b + c] < knnqueue[b + (move*qpitch) + c]) && (b + (move*qpitch) + c<k*qpitch + thx)){ var1 = knnqueue[b + c]; knnqueue[b + c] = knnqueue[b + (move*qpitch) + c]; knnqueue[b + (move*qpitch) + c] = var1; /*if (thx == 0){ for (int u = thx; u < k*qpitch; u += qpitch) printf("next bitonic: %d %f\n", thx, knnqueue[u]); printf("\n"); }*/ } /*if (knnqueue[b + mval*qpitch] < knnqueue[b + (mval + move)*qpitch]){ var2 = knnqueue[b + mval*qpitch]; knnqueue[b + mval*qpitch] = knnqueue[b + (mval + move)*qpitch]; knnqueue[b + (mval + move)*qpitch] = var2; }*/ } } move /= 2; }//end of while //to compare with the next level in the queue locmax = knnqueue[mval*qpitch + thx];//to ensure that the level heads are in decreasing order. /* if (thx==0) printf("locmax: %f\n", locmax);*/ mval = mval * 2; move = mval / 2; varpitch = qpitch; }//the end of bitonic sort process for merging levels } locmax = knnqueue[thx];//re_assigning the locmax to the head of the first level in the queue } //bitonic merge //if the head of the second level is less than that of the first one of size m }//end of thx<numofpoint //if (thx == 0){ // for (int y = thx; y < (k*qpitch) + thx; y += qpitch){ // printf("last result: %d %f\n", var2, knnqueue[y]); // var2 += 1; // } // printf("\n"); //} } int main(){ //testing width=8000; h=32000 k=64 cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int width = 8192;//query points int height = 32768;//ref points int k = 1024; size_t lpitch; size_t qpitch; cudaError_t val; float *d_indistqueue; float *d_list; float *l_in = new float[height*width]; float *qh_in = new float[width*k]; //allocate cpu memor float *h_out = (float *)malloc(width*k*sizeof(float)); // generate the input array on the host/ for (int i = 0; i < width*height; i++) l_in[i] = (float)rand() / (float)RAND_MAX; //l_in[i] = width*height- i;//should be used for testing bitonic sort //float l_in[20] = {20,21,16,14,22,23,24,8,30,32,0,5,4,3,2,1,0,6,2,7}; val = cudaMallocPitch((void **)& d_indistqueue, &qpitch, width*sizeof(float), k); val = cudaMallocPitch((void **)&d_list, &lpitch, width*sizeof(float), height); if (val) printf("Memorypitch Error: %s\n", cudaGetErrorString(val)); // transfer the array to the GPU cudaMemcpy2D(d_indistqueue, qpitch, qh_in, width*sizeof(float), width*sizeof(float), k, cudaMemcpyHostToDevice); cudaMemcpy2D(d_list, lpitch, l_in, width*sizeof(float), width*sizeof(float), height, cudaMemcpyHostToDevice); // launch the kernel dim3 Grid(width / 512 + 1, 1, 1); dim3 threads(512, 1); cudaEventRecord(start, 0); selection_of_knn << <Grid, threads >> >(d_list, 0, lpitch / sizeof(float), k, width, height, d_indistqueue, 0, qpitch / sizeof(float), 8, 0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //bitonic_finalize_col<<<1,1>>>(d_indistqueue,1,1,8); // // copy back the result array to the CPU cudaMemcpy2D(h_out, width*sizeof(float), d_indistqueue, qpitch, width*sizeof(float), k, cudaMemcpyDeviceToHost); //cudaMemcpy(l_out, d_list, list_size*sizeof(int), cudaMemcpyDeviceToHost); float milliseconds; cudaEventElapsedTime(&milliseconds, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); // /*for (int j = 0; j < k*width; j++) printf("dequeue: %d %f\n", j, h_out[j]); */ printf("The required time: %f\n", milliseconds / 1000); cudaFree(d_indistqueue); cudaFree(d_list); return 0; }
5d5d08d2ea7dd5cadae64d1de591803686a4fe38.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> //#include <cutil.h> #define TILE_WIDTH 16 #define N 2048 void err_handling(hipError_t *err, const char *str) { if (*err != hipSuccess) { printf("%s\n", str); exit(EXIT_FAILURE); } } __global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n) { __shared__ float sh_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float sh_B[TILE_WIDTH][TILE_WIDTH]; //int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = blockIdx.y*blockDim.y + ty; int col = blockIdx.x*blockDim.x + tx; float cVal = 0.0; for (int t = 0; t < k/TILE_WIDTH; ++t) { sh_A[ty][tx] = A[row*k + t*TILE_WIDTH + tx]; sh_B[ty][tx] = B[(t*TILE_WIDTH + ty)*k + col]; __syncthreads(); for (int i = 0; i < TILE_WIDTH; ++i) { cVal += sh_A[ty][i] * sh_B[i][tx]; } __syncthreads(); } C[row*n + col] = cVal; } int main(void) { hipError_t err = hipSuccess; int m = N; int n = N; int k = N; float *A = (float*)malloc(m*k*sizeof(float)); float *B = (float*)malloc(k*n*sizeof(float)); float *C = (float*)malloc(m*n*sizeof(float)); if (A == NULL || B == NULL || C == NULL) { printf("allocate host error!\n"); return 1; } for (int i = 0; i < m*k; ++i) { A[i] = rand()/(float)RAND_MAX; } for (int i = 0; i < k*n; ++i) { B[i] = rand()/(float)RAND_MAX; } for (int i = 0; i < m*n; ++i) { C[i] = rand()/(float)RAND_MAX; } float *dev_A = NULL; float *dev_B = NULL; float *dev_C = NULL; err = hipMalloc((void**)&dev_A, m*k*sizeof(float)); err_handling(&err, "allocate devecie error A!"); err = hipMalloc((void**)&dev_B, k*n*sizeof(float)); err_handling(&err, "allocate devecie error B!"); err = hipMalloc((void**)&dev_C, m*n*sizeof(float)); err_handling(&err, "allocate devecie error C!"); err = hipMemcpy(dev_A, A, m*k*sizeof(float), hipMemcpyHostToDevice); err_handling(&err, "memcpy to A error!"); err = hipMemcpy(dev_B, B, k*n*sizeof(float), hipMemcpyHostToDevice); err_handling(&err, "memcpy to B error!"); dim3 dimGrid((m-1)/TILE_WIDTH+1, (n-1)/TILE_WIDTH+1, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( matMul), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_A, dev_B, dev_C, m, k, n); hipEventRecord(stop, 0); hipEventSynchronize(start); hipEventSynchronize(stop); float time_elapsed = 0; hipEventElapsedTime(&time_elapsed, start, stop); printf("%fms\n", time_elapsed); err = hipMemcpy(C, dev_C, m*n*sizeof(float), hipMemcpyDeviceToHost); err_handling(&err, "memcpy to host C error!"); printf("%f %f\n", C[100*N+100], C[234*N+234]); err = hipFree(dev_A); err_handling(&err, "mem free A error!"); err = hipFree(dev_B); err_handling(&err, "mem free B error!"); err = hipFree(dev_C); err_handling(&err, "mem free C error!"); err = hipDeviceReset(); err_handling(&err, "device reset error!"); return 0; }
5d5d08d2ea7dd5cadae64d1de591803686a4fe38.cu
#include <stdio.h> #include <cuda_runtime.h> //#include <cutil.h> #define TILE_WIDTH 16 #define N 2048 void err_handling(cudaError_t *err, const char *str) { if (*err != cudaSuccess) { printf("%s\n", str); exit(EXIT_FAILURE); } } __global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n) { __shared__ float sh_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float sh_B[TILE_WIDTH][TILE_WIDTH]; //int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = blockIdx.y*blockDim.y + ty; int col = blockIdx.x*blockDim.x + tx; float cVal = 0.0; for (int t = 0; t < k/TILE_WIDTH; ++t) { sh_A[ty][tx] = A[row*k + t*TILE_WIDTH + tx]; sh_B[ty][tx] = B[(t*TILE_WIDTH + ty)*k + col]; __syncthreads(); for (int i = 0; i < TILE_WIDTH; ++i) { cVal += sh_A[ty][i] * sh_B[i][tx]; } __syncthreads(); } C[row*n + col] = cVal; } int main(void) { cudaError_t err = cudaSuccess; int m = N; int n = N; int k = N; float *A = (float*)malloc(m*k*sizeof(float)); float *B = (float*)malloc(k*n*sizeof(float)); float *C = (float*)malloc(m*n*sizeof(float)); if (A == NULL || B == NULL || C == NULL) { printf("allocate host error!\n"); return 1; } for (int i = 0; i < m*k; ++i) { A[i] = rand()/(float)RAND_MAX; } for (int i = 0; i < k*n; ++i) { B[i] = rand()/(float)RAND_MAX; } for (int i = 0; i < m*n; ++i) { C[i] = rand()/(float)RAND_MAX; } float *dev_A = NULL; float *dev_B = NULL; float *dev_C = NULL; err = cudaMalloc((void**)&dev_A, m*k*sizeof(float)); err_handling(&err, "allocate devecie error A!"); err = cudaMalloc((void**)&dev_B, k*n*sizeof(float)); err_handling(&err, "allocate devecie error B!"); err = cudaMalloc((void**)&dev_C, m*n*sizeof(float)); err_handling(&err, "allocate devecie error C!"); err = cudaMemcpy(dev_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice); err_handling(&err, "memcpy to A error!"); err = cudaMemcpy(dev_B, B, k*n*sizeof(float), cudaMemcpyHostToDevice); err_handling(&err, "memcpy to B error!"); dim3 dimGrid((m-1)/TILE_WIDTH+1, (n-1)/TILE_WIDTH+1, 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); matMul<<<dimGrid, dimBlock>>>(dev_A, dev_B, dev_C, m, k, n); cudaEventRecord(stop, 0); cudaEventSynchronize(start); cudaEventSynchronize(stop); float time_elapsed = 0; cudaEventElapsedTime(&time_elapsed, start, stop); printf("%fms\n", time_elapsed); err = cudaMemcpy(C, dev_C, m*n*sizeof(float), cudaMemcpyDeviceToHost); err_handling(&err, "memcpy to host C error!"); printf("%f %f\n", C[100*N+100], C[234*N+234]); err = cudaFree(dev_A); err_handling(&err, "mem free A error!"); err = cudaFree(dev_B); err_handling(&err, "mem free B error!"); err = cudaFree(dev_C); err_handling(&err, "mem free C error!"); err = cudaDeviceReset(); err_handling(&err, "device reset error!"); return 0; }
2b7ad8a91f3fab8a1ff5ca488b377f87f06fd53a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> //#include <sys/time.h> //#include <opencv2/opencv.hpp> __global__ void mykernel(int *arr, int *stop){ int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction int localsum = 0; for (int i = id; i < *stop; i+= numThreads){ double tmp = sin(i*1.0); double tmp2 = tmp*tmp; int z = (int)(tmp2*10000.0); localsum = (localsum + z) % 10000; } printf(" %d ", localsum); arr[id] = localsum; /* if(id < *stop){ double tmp = sin(id*1.0); double tmp2 = tmp*tmp; int z = (int)(tmp2*10000.0); arr[id] = z % 10000; } */ } int main(int argc, char *argv[]){ //assert(argc==2); int stop = (int)atol(argv[1]); assert(stop >= 1.0); printf("Hello World!\n"); int blocks = 4; int threads = 5; int result = 0; int *arr; int arrsize; if(blocks*threads < stop){ arrsize = blocks*threads; }else{ arrsize = stop; } arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu int *devarr; int *devstop; hipMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu hipMalloc((int**) &devstop , sizeof(int)); //mem in gpu hipMemcpy(devarr, arr, sizeof(int)*arrsize, hipMemcpyHostToDevice); //transfer hipMemcpy(devstop, &stop, sizeof(int), hipMemcpyHostToDevice); //transfer hipLaunchKernelGGL(( mykernel), dim3(blocks),dim3(threads), 0, 0, devarr, devstop); //1,1 block, threads- launch config hipMemcpy(arr, devarr, sizeof(int)*arrsize, hipMemcpyDeviceToHost); printf("arrsize: %d\n", arrsize); for(int i = 0; i<arrsize; i++){ //printf(" %d ", arr[i]); result = (result + arr[i]) % 10000; } printf("PIN is: %d\n",result); //scanf("%d%d", a, b); free(arr); hipFree(devarr); hipFree(devstop); return 0; }
2b7ad8a91f3fab8a1ff5ca488b377f87f06fd53a.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> //#include <sys/time.h> //#include <opencv2/opencv.hpp> __global__ void mykernel(int *arr, int *stop){ int id = blockIdx.x * blockDim.x + threadIdx.x; //unique global id of thread int numThreads = gridDim.x * blockDim.x; //total num threads in grid in x direction int localsum = 0; for (int i = id; i < *stop; i+= numThreads){ double tmp = sin(i*1.0); double tmp2 = tmp*tmp; int z = (int)(tmp2*10000.0); localsum = (localsum + z) % 10000; } printf(" %d ", localsum); arr[id] = localsum; /* if(id < *stop){ double tmp = sin(id*1.0); double tmp2 = tmp*tmp; int z = (int)(tmp2*10000.0); arr[id] = z % 10000; } */ } int main(int argc, char *argv[]){ //assert(argc==2); int stop = (int)atol(argv[1]); assert(stop >= 1.0); printf("Hello World!\n"); int blocks = 4; int threads = 5; int result = 0; int *arr; int arrsize; if(blocks*threads < stop){ arrsize = blocks*threads; }else{ arrsize = stop; } arr = (int *)malloc(sizeof(int)*arrsize); //memory in cpu int *devarr; int *devstop; cudaMalloc((int**) &devarr , sizeof(int)*arrsize); //mem in gpu cudaMalloc((int**) &devstop , sizeof(int)); //mem in gpu cudaMemcpy(devarr, arr, sizeof(int)*arrsize, cudaMemcpyHostToDevice); //transfer cudaMemcpy(devstop, &stop, sizeof(int), cudaMemcpyHostToDevice); //transfer mykernel<<<blocks,threads>>>(devarr, devstop); //1,1 block, threads- launch config cudaMemcpy(arr, devarr, sizeof(int)*arrsize, cudaMemcpyDeviceToHost); printf("arrsize: %d\n", arrsize); for(int i = 0; i<arrsize; i++){ //printf(" %d ", arr[i]); result = (result + arr[i]) % 10000; } printf("PIN is: %d\n",result); //scanf("%d%d", a, b); free(arr); cudaFree(devarr); cudaFree(devstop); return 0; }
27c3fc22aa24e392b63ceececf69eed0c5440984.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "blender_cuda.cuh" #include <stdio.h> #include <stdlib.h> class MatAddAction { public: static __device__ __forceinline__ void MatAdd(int width, int height,short*src_laplace, float* src_weight, short*dst_laplace, float* dst_weight) { int i = threadIdx.x+blockIdx.x*blockDim.x; int j = threadIdx.y+blockIdx.y*blockDim.y; if(i<width && j<height) { (dst_laplace+j*width*3)[i*3+0] += (short)(src_laplace+j*width*3)[i*3+0]*(src_weight+j*width)[i]; (dst_laplace+j*width*3)[i*3+1] += (short)(src_laplace+j*width*3)[i*3+1]*(src_weight+j*width)[i]; (dst_laplace+j*width*3)[i*3+2] += (short)(src_laplace+j*width*3)[i*3+2]*(src_weight+j*width)[i]; (dst_weight+j*width)[i] += (src_weight+j*width)[i]; //if (i < 5 && j < 5) //printf("i:<%d, %d> %d,%d,%d, %f\n", i, j, (src_laplace+j*width*3)[i*3+0], (src_laplace+j*width*3)[i*3+1], (src_laplace+j*width*3)[i*3+2], (src_weight+j*width)[i]); } } }; __global__ void MatAdd_(int width, int height,short*src_laplace, float* src_weight, short*dst_laplace, float* dst_weight) { MatAddAction::MatAdd(width,height,src_laplace,src_weight,dst_laplace,dst_weight); } #define BLOCK 32 void MatAddEx(int width, int height, short*src_laplace, float* src_weight, short*dst_laplace, float* dst_weight) { dim3 threadperblocks(BLOCK,BLOCK); dim3 blockspergrid((width+BLOCK - 1 )/BLOCK,(height+ BLOCK - 1)/BLOCK); hipLaunchKernelGGL(( MatAdd_), dim3(blockspergrid),dim3(threadperblocks), 0, 0, width,height,src_laplace,src_weight,dst_laplace,dst_weight); }
27c3fc22aa24e392b63ceececf69eed0c5440984.cu
#include "blender_cuda.cuh" #include <stdio.h> #include <stdlib.h> class MatAddAction { public: static __device__ __forceinline__ void MatAdd(int width, int height,short*src_laplace, float* src_weight, short*dst_laplace, float* dst_weight) { int i = threadIdx.x+blockIdx.x*blockDim.x; int j = threadIdx.y+blockIdx.y*blockDim.y; if(i<width && j<height) { (dst_laplace+j*width*3)[i*3+0] += (short)(src_laplace+j*width*3)[i*3+0]*(src_weight+j*width)[i]; (dst_laplace+j*width*3)[i*3+1] += (short)(src_laplace+j*width*3)[i*3+1]*(src_weight+j*width)[i]; (dst_laplace+j*width*3)[i*3+2] += (short)(src_laplace+j*width*3)[i*3+2]*(src_weight+j*width)[i]; (dst_weight+j*width)[i] += (src_weight+j*width)[i]; //if (i < 5 && j < 5) //printf("i:<%d, %d> %d,%d,%d, %f\n", i, j, (src_laplace+j*width*3)[i*3+0], (src_laplace+j*width*3)[i*3+1], (src_laplace+j*width*3)[i*3+2], (src_weight+j*width)[i]); } } }; __global__ void MatAdd_(int width, int height,short*src_laplace, float* src_weight, short*dst_laplace, float* dst_weight) { MatAddAction::MatAdd(width,height,src_laplace,src_weight,dst_laplace,dst_weight); } #define BLOCK 32 void MatAddEx(int width, int height, short*src_laplace, float* src_weight, short*dst_laplace, float* dst_weight) { dim3 threadperblocks(BLOCK,BLOCK); dim3 blockspergrid((width+BLOCK - 1 )/BLOCK,(height+ BLOCK - 1)/BLOCK); MatAdd_<<<blockspergrid,threadperblocks>>>(width,height,src_laplace,src_weight,dst_laplace,dst_weight); }