hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
71004a5bdde9aaec1517329bebca46cee4d9780d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" const int Nthreads = 1024, maxFR = 5000, NrankMax = 6; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void sumChannels(const double *Params, const float *data, float *datasum, int *kkmax, const int *iC){ int tid, tid0,t, kmax, i, bid, NT, Nchan, NchanNear,j,iChan, Nsum, Nrank; float Cf, Cmax; NchanNear = (int) Params[10]; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nchan = (int) Params[9]; Nsum = (int) Params[13]; Nrank = (int) Params[14]; tid0 = tid + bid * blockDim.x; while (tid0<NT){ for (i=0; i<Nchan;i++){ Cmax = 0.0f; kmax = 0; for (t=0;t<Nrank;t++){ Cf = 0.0f; for(j=0; j<Nsum; j++){ iChan = iC[j+ NchanNear * i]; Cf += data[tid0 + NT * iChan + t * NT * Nchan]; if (Cf*Cf/(1+j) > Cmax){ Cmax = Cf*Cf /(1+j); kmax = j + t*Nsum; } } } datasum[tid0 + NT * i] = Cmax; kkmax[tid0 + NT * i] = kmax; } tid0 += blockDim.x * gridDim.x; } }
71004a5bdde9aaec1517329bebca46cee4d9780d.cu
#include "includes.h" const int Nthreads = 1024, maxFR = 5000, NrankMax = 6; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void sumChannels(const double *Params, const float *data, float *datasum, int *kkmax, const int *iC){ int tid, tid0,t, kmax, i, bid, NT, Nchan, NchanNear,j,iChan, Nsum, Nrank; float Cf, Cmax; NchanNear = (int) Params[10]; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nchan = (int) Params[9]; Nsum = (int) Params[13]; Nrank = (int) Params[14]; tid0 = tid + bid * blockDim.x; while (tid0<NT){ for (i=0; i<Nchan;i++){ Cmax = 0.0f; kmax = 0; for (t=0;t<Nrank;t++){ Cf = 0.0f; for(j=0; j<Nsum; j++){ iChan = iC[j+ NchanNear * i]; Cf += data[tid0 + NT * iChan + t * NT * Nchan]; if (Cf*Cf/(1+j) > Cmax){ Cmax = Cf*Cf /(1+j); kmax = j + t*Nsum; } } } datasum[tid0 + NT * i] = Cmax; kkmax[tid0 + NT * i] = kmax; } tid0 += blockDim.x * gridDim.x; } }
1727c26ba2ccaf7cc3f40defe16028b5d65c4e7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_runtime.h" /*********************************************** streamcluster_cuda.cu : parallelized code of streamcluster - original code from PARSEC Benchmark Suite - parallelization with CUDA API has been applied by Shawn Sang-Ha Lee - [email protected] University of Virginia Department of Electrical and Computer Engineering Department of Computer Science ***********************************************/ #include "streamcluster_header.cu" #ifdef PROFILING #include "RDTimer.h" extern RDTimerCPU* rdtimerMallocCpu; extern RDTimerCPU* rdtimerC2GCpu; extern RDTimerCPU* rdtimerG2CCpu; extern RDTimerCPU* rdtimerKernelCpu; #endif using namespace std; // AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS #define CUDA_SAFE_CALL /* RG ( call) do { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0) */ #define THREADS_PER_BLOCK 512 #define MAXBLOCKS 65536 //#define CUDATIME // host memory float *work_mem_h; float *coord_h; // device memory float *work_mem_d; float *coord_d; int *center_table_d; bool *switch_membership_d; Point *p; static int iter = 0; // counter for total# of iteration //======================================= // Euclidean Distance //======================================= __device__ float d_dist(int p1, int p2, int num, int dim, float *coord_d) { float retval = 0.0; for(int i = 0; i < dim; i++){ float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2]; retval += tmp * tmp; } return retval; } //======================================= // Kernel - Compute Cost //======================================= __global__ void kernel_compute_cost ( hipLaunchParm lp, int num, int dim, long x, Point *p, int K, int stride,float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d) { // block ID and global thread ID const int bid = hipBlockIdx_x + hipGridDim_x * hipBlockIdx_y; const int tid = hipBlockDim_x * bid + hipThreadIdx_x; if(tid < num) { float *lower = &work_mem_d[tid*stride]; // cost between this point and point[x]: euclidean distance multiplied by weight float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight; // if computed cost is less then original (it saves), mark it as to reassign if ( x_cost < p[tid].cost ) { switch_membership_d[tid] = 1; lower[K] += x_cost - p[tid].cost; } // if computed cost is larger, save the difference else { lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost; } } } //======================================= // Allocate Device Memory //======================================= void allocDevMem(int num, int dim) { CUDA_SAFE_CALL( hipMalloc((void**) &center_table_d, num * sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc((void**) &switch_membership_d, num * sizeof(bool)) ); CUDA_SAFE_CALL( hipMalloc((void**) &p, num * sizeof(Point)) ); CUDA_SAFE_CALL( hipMalloc((void**) &coord_d, num * dim * sizeof(float)) ); } //======================================= // Allocate Host Memory //======================================= void allocHostMem(int num, int dim) { coord_h = (float*) malloc( num * dim * sizeof(float) ); } //======================================= // Free Device Memory //======================================= void freeDevMem() { CUDA_SAFE_CALL( hipFree(center_table_d) ); CUDA_SAFE_CALL( hipFree(switch_membership_d) ); CUDA_SAFE_CALL( hipFree(p) ); CUDA_SAFE_CALL( hipFree(coord_d) ); } //======================================= // Free Host Memory //======================================= void freeHostMem() { free(coord_h); } //======================================= // pgain Entry - CUDA SETUP + CUDA CALL //======================================= float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged, double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t) { #ifdef CUDATIME float tmp_t; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #endif hipError_t error; int stride = *numcenters + 1; // size of each work_mem segment int K = *numcenters ; // number of centers int num = points->num; // number of points int dim = points->dim; // number of dimension int nThread = num; // number of threads == number of data points //========================================= // ALLOCATE HOST MEMORY + DATA PREPARATION //========================================= work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) ); // Only on the first iteration if(iter == 0) { allocHostMem(num, dim); } // build center-index table int count = 0; for( int i=0; i<num; i++) { if( is_center[i] ) { center_table[i] = count++; } } // Extract 'coord' // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { for(int i=0; i<dim; i++) { for(int j=0; j<num; j++) { coord_h[ (num*i)+j ] = points->p[j].coord[i]; } } } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerMallocCpu->Start(); #endif //======================================= // ALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( hipMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) ); // Only on the first iteration if( iter == 0 ) { allocDevMem(num, dim); } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *alloc_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerMallocCpu->Accumulate(); rdtimerC2GCpu->Start(); #endif //======================================= // CPU-TO-GPU MEMORY COPY //======================================= // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { CUDA_SAFE_CALL( hipMemcpy(coord_d, coord_h, num * dim * sizeof(float), hipMemcpyHostToDevice) ); } CUDA_SAFE_CALL( hipMemcpy(center_table_d, center_table, num * sizeof(int), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(p, points->p, num * sizeof(Point), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemset((void*) switch_membership_d, 0, num * sizeof(bool)) ); CUDA_SAFE_CALL( hipMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *cpu_to_gpu_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerC2GCpu->Accumulate(); rdtimerKernelCpu->Start(); #endif //======================================= // KERNEL: CALCULATE COST //======================================= // Determine the number of thread blocks in the x- and y-dimension int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK); int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS); int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y); dim3 grid_size(num_blocks_x, num_blocks_y, 1); hipLaunchKernel(kernel_compute_cost, dim3(grid_size), dim3(THREADS_PER_BLOCK), 0, 0, num, // in: # of data dim, // in: dimension of point coordinates x, // in: point to open a center at p, // in: data point array K, // in: number of centers stride, // in: size of each work_mem segment coord_d, // in: array of point coordinates work_mem_d, // out: cost and lower field array center_table_d, // in: center index table switch_membership_d // out: changes in membership ); hipDeviceSynchronize(); // error check error = hipGetLastError(); if (error != hipSuccess) { printf("kernel error: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *kernel_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerKernelCpu->Accumulate(); rdtimerG2CCpu->Start(); #endif //======================================= // GPU-TO-CPU MEMORY COPY //======================================= CUDA_SAFE_CALL( hipMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), hipMemcpyDeviceToHost) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *gpu_to_cpu_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerG2CCpu->Accumulate(); #endif //======================================= // CPU (SERIAL) WORK //======================================= int number_of_centers_to_close = 0; float gl_cost_of_opening_x = z; float *gl_lower = &work_mem_h[stride * nThread]; // compute the number of centers to close if we are to open i for(int i=0; i < num; i++) { if( is_center[i] ) { float low = z; for( int j = 0; j < num; j++ ) { low += work_mem_h[ j*stride + center_table[i] ]; } gl_lower[center_table[i]] = low; if ( low > 0 ) { ++number_of_centers_to_close; work_mem_h[i*stride+K] -= low; } } gl_cost_of_opening_x += work_mem_h[i*stride+K]; } //if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing if ( gl_cost_of_opening_x < 0 ) { for(int i = 0; i < num; i++) { bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ; if ( switch_membership[i] || close_center ) { points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight; points->p[i].assign = x; } } for(int i = 0; i < num; i++) { if( is_center[i] && gl_lower[center_table[i]] > 0 ) { is_center[i] = false; } } if( x >= 0 && x < num) { is_center[x] = true; } *numcenters = *numcenters + 1 - number_of_centers_to_close; } else { gl_cost_of_opening_x = 0; } //======================================= // DEALLOCATE HOST MEMORY //======================================= free(work_mem_h); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // DEALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( hipFree(work_mem_d) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *free_t += (double) tmp_t; #endif iter++; return -gl_cost_of_opening_x; }
1727c26ba2ccaf7cc3f40defe16028b5d65c4e7f.cu
#include "hip_runtime.h" /*********************************************** streamcluster_cuda.cu : parallelized code of streamcluster - original code from PARSEC Benchmark Suite - parallelization with CUDA API has been applied by Shawn Sang-Ha Lee - [email protected] University of Virginia Department of Electrical and Computer Engineering Department of Computer Science ***********************************************/ #include "streamcluster_header.cu" #ifdef PROFILING #include "RDTimer.h" extern RDTimerCPU* rdtimerMallocCpu; extern RDTimerCPU* rdtimerC2GCpu; extern RDTimerCPU* rdtimerG2CCpu; extern RDTimerCPU* rdtimerKernelCpu; #endif using namespace std; // AUTO-ERROR CHECK FOR ALL CUDA FUNCTIONS #define CUDA_SAFE_CALL /* RG ( call) do { \ cudaError err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0) */ #define THREADS_PER_BLOCK 512 #define MAXBLOCKS 65536 //#define CUDATIME // host memory float *work_mem_h; float *coord_h; // device memory float *work_mem_d; float *coord_d; int *center_table_d; bool *switch_membership_d; Point *p; static int iter = 0; // counter for total# of iteration //======================================= // Euclidean Distance //======================================= __device__ float d_dist(int p1, int p2, int num, int dim, float *coord_d) { float retval = 0.0; for(int i = 0; i < dim; i++){ float tmp = coord_d[(i*num)+p1] - coord_d[(i*num)+p2]; retval += tmp * tmp; } return retval; } //======================================= // Kernel - Compute Cost //======================================= __global__ void kernel_compute_cost ( hipLaunchParm lp, int num, int dim, long x, Point *p, int K, int stride,float *coord_d, float *work_mem_d, int *center_table_d, bool *switch_membership_d) { // block ID and global thread ID const int bid = hipBlockIdx_x + hipGridDim_x * hipBlockIdx_y; const int tid = hipBlockDim_x * bid + hipThreadIdx_x; if(tid < num) { float *lower = &work_mem_d[tid*stride]; // cost between this point and point[x]: euclidean distance multiplied by weight float x_cost = d_dist(tid, x, num, dim, coord_d) * p[tid].weight; // if computed cost is less then original (it saves), mark it as to reassign if ( x_cost < p[tid].cost ) { switch_membership_d[tid] = 1; lower[K] += x_cost - p[tid].cost; } // if computed cost is larger, save the difference else { lower[center_table_d[p[tid].assign]] += p[tid].cost - x_cost; } } } //======================================= // Allocate Device Memory //======================================= void allocDevMem(int num, int dim) { CUDA_SAFE_CALL( hipMalloc((void**) &center_table_d, num * sizeof(int)) ); CUDA_SAFE_CALL( hipMalloc((void**) &switch_membership_d, num * sizeof(bool)) ); CUDA_SAFE_CALL( hipMalloc((void**) &p, num * sizeof(Point)) ); CUDA_SAFE_CALL( hipMalloc((void**) &coord_d, num * dim * sizeof(float)) ); } //======================================= // Allocate Host Memory //======================================= void allocHostMem(int num, int dim) { coord_h = (float*) malloc( num * dim * sizeof(float) ); } //======================================= // Free Device Memory //======================================= void freeDevMem() { CUDA_SAFE_CALL( hipFree(center_table_d) ); CUDA_SAFE_CALL( hipFree(switch_membership_d) ); CUDA_SAFE_CALL( hipFree(p) ); CUDA_SAFE_CALL( hipFree(coord_d) ); } //======================================= // Free Host Memory //======================================= void freeHostMem() { free(coord_h); } //======================================= // pgain Entry - CUDA SETUP + CUDA CALL //======================================= float pgain( long x, Points *points, float z, long int *numcenters, int kmax, bool *is_center, int *center_table, bool *switch_membership, bool isCoordChanged, double *serial_t, double *cpu_to_gpu_t, double *gpu_to_cpu_t, double *alloc_t, double *kernel_t, double *free_t) { #ifdef CUDATIME float tmp_t; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #endif hipError_t error; int stride = *numcenters + 1; // size of each work_mem segment int K = *numcenters ; // number of centers int num = points->num; // number of points int dim = points->dim; // number of dimension int nThread = num; // number of threads == number of data points //========================================= // ALLOCATE HOST MEMORY + DATA PREPARATION //========================================= work_mem_h = (float*) malloc(stride * (nThread + 1) * sizeof(float) ); // Only on the first iteration if(iter == 0) { allocHostMem(num, dim); } // build center-index table int count = 0; for( int i=0; i<num; i++) { if( is_center[i] ) { center_table[i] = count++; } } // Extract 'coord' // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { for(int i=0; i<dim; i++) { for(int j=0; j<num; j++) { coord_h[ (num*i)+j ] = points->p[j].coord[i]; } } } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerMallocCpu->Start(); #endif //======================================= // ALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( hipMalloc((void**) &work_mem_d, stride * (nThread + 1) * sizeof(float)) ); // Only on the first iteration if( iter == 0 ) { allocDevMem(num, dim); } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *alloc_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerMallocCpu->Accumulate(); rdtimerC2GCpu->Start(); #endif //======================================= // CPU-TO-GPU MEMORY COPY //======================================= // Only if first iteration OR coord has changed if(isCoordChanged || iter == 0) { CUDA_SAFE_CALL( hipMemcpy(coord_d, coord_h, num * dim * sizeof(float), hipMemcpyHostToDevice) ); } CUDA_SAFE_CALL( hipMemcpy(center_table_d, center_table, num * sizeof(int), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemcpy(p, points->p, num * sizeof(Point), hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipMemset((void*) switch_membership_d, 0, num * sizeof(bool)) ); CUDA_SAFE_CALL( hipMemset((void*) work_mem_d, 0, stride * (nThread + 1) * sizeof(float)) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *cpu_to_gpu_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerC2GCpu->Accumulate(); rdtimerKernelCpu->Start(); #endif //======================================= // KERNEL: CALCULATE COST //======================================= // Determine the number of thread blocks in the x- and y-dimension int num_blocks = (int) ((float) (num + THREADS_PER_BLOCK - 1) / (float) THREADS_PER_BLOCK); int num_blocks_y = (int) ((float) (num_blocks + MAXBLOCKS - 1) / (float) MAXBLOCKS); int num_blocks_x = (int) ((float) (num_blocks+num_blocks_y - 1) / (float) num_blocks_y); dim3 grid_size(num_blocks_x, num_blocks_y, 1); hipLaunchKernel(kernel_compute_cost, dim3(grid_size), dim3(THREADS_PER_BLOCK), 0, 0, num, // in: # of data dim, // in: dimension of point coordinates x, // in: point to open a center at p, // in: data point array K, // in: number of centers stride, // in: size of each work_mem segment coord_d, // in: array of point coordinates work_mem_d, // out: cost and lower field array center_table_d, // in: center index table switch_membership_d // out: changes in membership ); hipDeviceSynchronize(); // error check error = hipGetLastError(); if (error != hipSuccess) { printf("kernel error: %s\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *kernel_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerKernelCpu->Accumulate(); rdtimerG2CCpu->Start(); #endif //======================================= // GPU-TO-CPU MEMORY COPY //======================================= CUDA_SAFE_CALL( hipMemcpy(work_mem_h, work_mem_d, stride * (nThread + 1) * sizeof(float), hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(switch_membership, switch_membership_d, num * sizeof(bool), hipMemcpyDeviceToHost) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *gpu_to_cpu_t += (double) tmp_t; hipEventRecord(start,0); #endif #ifdef PROFILING rdtimerG2CCpu->Accumulate(); #endif //======================================= // CPU (SERIAL) WORK //======================================= int number_of_centers_to_close = 0; float gl_cost_of_opening_x = z; float *gl_lower = &work_mem_h[stride * nThread]; // compute the number of centers to close if we are to open i for(int i=0; i < num; i++) { if( is_center[i] ) { float low = z; for( int j = 0; j < num; j++ ) { low += work_mem_h[ j*stride + center_table[i] ]; } gl_lower[center_table[i]] = low; if ( low > 0 ) { ++number_of_centers_to_close; work_mem_h[i*stride+K] -= low; } } gl_cost_of_opening_x += work_mem_h[i*stride+K]; } //if opening a center at x saves cost (i.e. cost is negative) do so; otherwise, do nothing if ( gl_cost_of_opening_x < 0 ) { for(int i = 0; i < num; i++) { bool close_center = gl_lower[center_table[points->p[i].assign]] > 0 ; if ( switch_membership[i] || close_center ) { points->p[i].cost = dist(points->p[i], points->p[x], dim) * points->p[i].weight; points->p[i].assign = x; } } for(int i = 0; i < num; i++) { if( is_center[i] && gl_lower[center_table[i]] > 0 ) { is_center[i] = false; } } if( x >= 0 && x < num) { is_center[x] = true; } *numcenters = *numcenters + 1 - number_of_centers_to_close; } else { gl_cost_of_opening_x = 0; } //======================================= // DEALLOCATE HOST MEMORY //======================================= free(work_mem_h); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *serial_t += (double) tmp_t; hipEventRecord(start,0); #endif //======================================= // DEALLOCATE GPU MEMORY //======================================= CUDA_SAFE_CALL( hipFree(work_mem_d) ); #ifdef CUDATIME hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&tmp_t, start, stop); *free_t += (double) tmp_t; #endif iter++; return -gl_cost_of_opening_x; }
81071067a339ebd2db53f03503aef7840e581a16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define BLOCK 16 #define NUM 2048 __shared__ unsigned b[NUM]; __device__ void devicetest(unsigned *b) { unsigned sum = 0; for (unsigned i = 0; i < NUM; i++) { sum += b[i]; } printf("sum: %u \n", sum); } __global__ void paratest(unsigned * a) { unsigned bid = blockIdx.x; unsigned tid = threadIdx.x; b[tid] = a[tid]; for (unsigned i = 0; i < 4; i++) { if (bid % 2 != 0) { if (tid < 1024) { unsigned idx = bid * blockDim.x + tid; b[tid] = a[idx] + 1; if (tid % 2 != 0) { b[tid] = 2; } else { if (tid > 0) b[tid] = b[tid-1]+1; } } else { b[tid] = b[tid-1]; } } else { unsigned idx = bid * blockDim.x + tid; b[tid] = a[idx] + 1; } } __syncthreads(); if (tid % 2 == 0) { printf("even number !\n"); } else { printf("odd number !\n"); } } int main() { unsigned *da; hipMalloc((void **)&da, sizeof(unsigned)*BLOCK*NUM); //unsigned a = 8; //unsigned b = test_external_library(a); //printf("The returned b's value is %d\n", b); hipLaunchKernelGGL(( paratest), dim3(BLOCK), dim3(NUM), 0, 0, da); hipFree(da); }
81071067a339ebd2db53f03503aef7840e581a16.cu
#include <stdio.h> #define BLOCK 16 #define NUM 2048 __shared__ unsigned b[NUM]; __device__ void devicetest(unsigned *b) { unsigned sum = 0; for (unsigned i = 0; i < NUM; i++) { sum += b[i]; } printf("sum: %u \n", sum); } __global__ void paratest(unsigned * a) { unsigned bid = blockIdx.x; unsigned tid = threadIdx.x; b[tid] = a[tid]; for (unsigned i = 0; i < 4; i++) { if (bid % 2 != 0) { if (tid < 1024) { unsigned idx = bid * blockDim.x + tid; b[tid] = a[idx] + 1; if (tid % 2 != 0) { b[tid] = 2; } else { if (tid > 0) b[tid] = b[tid-1]+1; } } else { b[tid] = b[tid-1]; } } else { unsigned idx = bid * blockDim.x + tid; b[tid] = a[idx] + 1; } } __syncthreads(); if (tid % 2 == 0) { printf("even number !\n"); } else { printf("odd number !\n"); } } int main() { unsigned *da; cudaMalloc((void **)&da, sizeof(unsigned)*BLOCK*NUM); //unsigned a = 8; //unsigned b = test_external_library(a); //printf("The returned b's value is %d\n", b); paratest<<<BLOCK, NUM>>>(da); cudaFree(da); }
4346c8567e3729129f74f15324f9d3ef5b7e2336.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/reduce.h> #include <algorithm> #include <cstdlib> #include <sys/time.h> #include "src/parse_data.h" #include "src/gather_reduce.cuh" #define METHOD 0 #define CUDA_RT_CALL(call) \ { \ hipError_t cudaStatus = call; \ if (hipSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \ } struct timeval t1, t2; double mytime; int main(void) { int *input_row_ind_d, *input_col_ind_d, *input_csr_row_d; int *input_row_ind_h, *input_col_ind_h, *input_csr_row_h; #if METHOD == 1 int *input_cm_row_ind_d, *input_cm_col_ind_d, *input_cm_col_d; int *input_cm_row_ind_h, *input_cm_col_ind_h, *input_cm_col_h; #endif int *output_value_d; // char file[] = "./data/Theory-3-4-B1k.tsv"; // int num_nodes = 20; // int num_edges = 31; // char file[] = "./data/Theory-25-81-B1k.tsv"; // int num_nodes = 2132; // int num_edges = 4156; // 133321 char file[] = "./data/Theory-16-25-81-B1k.tsv"; int num_nodes = 36244; int num_edges = 137164; // 2102761 // char file[] = "./data/Theory-25-81-256-B1k.tsv"; // int num_nodes = 547924; // int num_edges = 2132284; // 66758995 // char file[] = "./data/Theory-5-9-16-25-81-B1k.tsv"; // int num_nodes = 2174640; // int num_edges = 28667380; input_row_ind_h = (int*) malloc(num_edges * sizeof(int)); input_col_ind_h = (int*) malloc(num_edges * sizeof(int)); input_csr_row_h = (int*) malloc((num_nodes + 1) * sizeof(int)); #if METHOD == 1 input_cm_row_ind_h = (int*) malloc(num_edges * sizeof(int)); input_cm_col_ind_h = (int*) malloc(num_edges * sizeof(int)); input_cm_col_h = (int*) malloc((num_nodes + 1) * sizeof(int)); #endif CUDA_RT_CALL( hipMalloc((void **)&input_row_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( hipMalloc((void **)&input_col_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( hipMalloc((void **)&input_csr_row_d, (num_nodes + 1) * sizeof(int)) ); CUDA_RT_CALL( hipMalloc((void **)&output_value_d, num_edges * sizeof(int))); #if METHOD == 1 CUDA_RT_CALL( hipMalloc((void **)&input_cm_row_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( hipMalloc((void **)&input_cm_col_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( hipMalloc((void **)&input_cm_col_d, (num_nodes + 1) * sizeof(int)) ); #endif read_tsv(input_row_ind_h, input_col_ind_h, input_csr_row_h, num_nodes + 1, num_edges, file); CUDA_RT_CALL( hipMemcpy(input_row_ind_d, input_row_ind_h, num_edges * sizeof(int), hipMemcpyHostToDevice) ); CUDA_RT_CALL( hipMemcpy(input_col_ind_d, input_col_ind_h, num_edges * sizeof(int), hipMemcpyHostToDevice) ); CUDA_RT_CALL( hipMemcpy(input_csr_row_d, input_csr_row_h, (num_nodes + 1) * sizeof(int), hipMemcpyHostToDevice) ); #if METHOD == 1 CUDA_RT_CALL( hipMemcpy(input_cm_row_ind_d, input_row_ind_h, num_edges * sizeof(int), hipMemcpyHostToDevice)); CUDA_RT_CALL( hipMemcpy(input_cm_col_ind_d, input_col_ind_h, num_edges * sizeof(int), hipMemcpyHostToDevice)); gettimeofday(&t1, 0); thrust::stable_sort_by_key(thrust::device, input_cm_col_ind_d, input_cm_col_ind_d + num_edges , input_cm_row_ind_d); CUDA_RT_CALL( hipMemcpy(input_cm_col_ind_h, input_cm_col_ind_d, num_edges * sizeof(int), hipMemcpyDeviceToHost)); make_col_index(input_cm_col_ind_h, input_cm_col_h, num_nodes+1, num_edges); CUDA_RT_CALL( hipMemcpy(input_cm_col_d, input_cm_col_h, (num_nodes + 1) * sizeof(int), hipMemcpyHostToDevice)); gettimeofday(&t2, 0); mytime = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time elapsed in preparation: %3.5f ms\n", mytime); #endif int num_blocks = ceil((num_edges * 1.0) / BLOCK_SIZE); gettimeofday(&t1, 0); #if METHOD == 0 hipLaunchKernelGGL(( gather_naive), dim3(num_blocks),dim3(BLOCK_SIZE), 0, 0, num_edges, input_row_ind_d, input_col_ind_d, input_csr_row_d, output_value_d); #elif METHOD == 1 hipLaunchKernelGGL(( gather_binned), dim3(num_blocks),dim3(BLOCK_SIZE), 0, 0, num_edges, input_row_ind_d, input_col_ind_d, input_csr_row_d, input_cm_row_ind_d, input_cm_col_ind_d, input_cm_col_d, output_value_d); #endif CUDA_RT_CALL( hipDeviceSynchronize() ); gettimeofday(&t2, 0); mytime = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time elapsed in real work: %3.5f ms\n", mytime); gettimeofday(&t1, 0); int total_tc = thrust::reduce(thrust::device, output_value_d, output_value_d + num_edges); gettimeofday(&t2, 0); mytime = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time elapsed in reduce: %3.5f ms\n", mytime); printf("Total number of triangle: %d\n", total_tc); CUDA_RT_CALL( hipFree(input_row_ind_d) ); CUDA_RT_CALL( hipFree(input_col_ind_d) ); CUDA_RT_CALL( hipFree(input_csr_row_d) ); CUDA_RT_CALL( hipFree(output_value_d) ); free(input_row_ind_h); free(input_col_ind_h); free(input_csr_row_h); #if METHOD == 1 CUDA_RT_CALL( hipFree(input_cm_col_ind_d) ); CUDA_RT_CALL( hipFree(input_cm_row_ind_d) ); CUDA_RT_CALL( hipFree(input_cm_col_d) ); free(input_cm_col_ind_h); free(input_cm_row_ind_h); free(input_cm_col_h); #endif return 0; }
4346c8567e3729129f74f15324f9d3ef5b7e2336.cu
#include <thrust/reduce.h> #include <algorithm> #include <cstdlib> #include <sys/time.h> #include "src/parse_data.h" #include "src/gather_reduce.cuh" #define METHOD 0 #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } struct timeval t1, t2; double mytime; int main(void) { int *input_row_ind_d, *input_col_ind_d, *input_csr_row_d; int *input_row_ind_h, *input_col_ind_h, *input_csr_row_h; #if METHOD == 1 int *input_cm_row_ind_d, *input_cm_col_ind_d, *input_cm_col_d; int *input_cm_row_ind_h, *input_cm_col_ind_h, *input_cm_col_h; #endif int *output_value_d; // char file[] = "./data/Theory-3-4-B1k.tsv"; // int num_nodes = 20; // int num_edges = 31; // char file[] = "./data/Theory-25-81-B1k.tsv"; // int num_nodes = 2132; // int num_edges = 4156; // 133321 char file[] = "./data/Theory-16-25-81-B1k.tsv"; int num_nodes = 36244; int num_edges = 137164; // 2102761 // char file[] = "./data/Theory-25-81-256-B1k.tsv"; // int num_nodes = 547924; // int num_edges = 2132284; // 66758995 // char file[] = "./data/Theory-5-9-16-25-81-B1k.tsv"; // int num_nodes = 2174640; // int num_edges = 28667380; input_row_ind_h = (int*) malloc(num_edges * sizeof(int)); input_col_ind_h = (int*) malloc(num_edges * sizeof(int)); input_csr_row_h = (int*) malloc((num_nodes + 1) * sizeof(int)); #if METHOD == 1 input_cm_row_ind_h = (int*) malloc(num_edges * sizeof(int)); input_cm_col_ind_h = (int*) malloc(num_edges * sizeof(int)); input_cm_col_h = (int*) malloc((num_nodes + 1) * sizeof(int)); #endif CUDA_RT_CALL( cudaMalloc((void **)&input_row_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( cudaMalloc((void **)&input_col_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( cudaMalloc((void **)&input_csr_row_d, (num_nodes + 1) * sizeof(int)) ); CUDA_RT_CALL( cudaMalloc((void **)&output_value_d, num_edges * sizeof(int))); #if METHOD == 1 CUDA_RT_CALL( cudaMalloc((void **)&input_cm_row_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( cudaMalloc((void **)&input_cm_col_ind_d, num_edges * sizeof(int)) ); CUDA_RT_CALL( cudaMalloc((void **)&input_cm_col_d, (num_nodes + 1) * sizeof(int)) ); #endif read_tsv(input_row_ind_h, input_col_ind_h, input_csr_row_h, num_nodes + 1, num_edges, file); CUDA_RT_CALL( cudaMemcpy(input_row_ind_d, input_row_ind_h, num_edges * sizeof(int), cudaMemcpyHostToDevice) ); CUDA_RT_CALL( cudaMemcpy(input_col_ind_d, input_col_ind_h, num_edges * sizeof(int), cudaMemcpyHostToDevice) ); CUDA_RT_CALL( cudaMemcpy(input_csr_row_d, input_csr_row_h, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice) ); #if METHOD == 1 CUDA_RT_CALL( cudaMemcpy(input_cm_row_ind_d, input_row_ind_h, num_edges * sizeof(int), cudaMemcpyHostToDevice)); CUDA_RT_CALL( cudaMemcpy(input_cm_col_ind_d, input_col_ind_h, num_edges * sizeof(int), cudaMemcpyHostToDevice)); gettimeofday(&t1, 0); thrust::stable_sort_by_key(thrust::device, input_cm_col_ind_d, input_cm_col_ind_d + num_edges , input_cm_row_ind_d); CUDA_RT_CALL( cudaMemcpy(input_cm_col_ind_h, input_cm_col_ind_d, num_edges * sizeof(int), cudaMemcpyDeviceToHost)); make_col_index(input_cm_col_ind_h, input_cm_col_h, num_nodes+1, num_edges); CUDA_RT_CALL( cudaMemcpy(input_cm_col_d, input_cm_col_h, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice)); gettimeofday(&t2, 0); mytime = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time elapsed in preparation: %3.5f ms\n", mytime); #endif int num_blocks = ceil((num_edges * 1.0) / BLOCK_SIZE); gettimeofday(&t1, 0); #if METHOD == 0 gather_naive<<<num_blocks,BLOCK_SIZE>>>(num_edges, input_row_ind_d, input_col_ind_d, input_csr_row_d, output_value_d); #elif METHOD == 1 gather_binned<<<num_blocks,BLOCK_SIZE>>>(num_edges, input_row_ind_d, input_col_ind_d, input_csr_row_d, input_cm_row_ind_d, input_cm_col_ind_d, input_cm_col_d, output_value_d); #endif CUDA_RT_CALL( cudaDeviceSynchronize() ); gettimeofday(&t2, 0); mytime = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time elapsed in real work: %3.5f ms\n", mytime); gettimeofday(&t1, 0); int total_tc = thrust::reduce(thrust::device, output_value_d, output_value_d + num_edges); gettimeofday(&t2, 0); mytime = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; printf("Time elapsed in reduce: %3.5f ms\n", mytime); printf("Total number of triangle: %d\n", total_tc); CUDA_RT_CALL( cudaFree(input_row_ind_d) ); CUDA_RT_CALL( cudaFree(input_col_ind_d) ); CUDA_RT_CALL( cudaFree(input_csr_row_d) ); CUDA_RT_CALL( cudaFree(output_value_d) ); free(input_row_ind_h); free(input_col_ind_h); free(input_csr_row_h); #if METHOD == 1 CUDA_RT_CALL( cudaFree(input_cm_col_ind_d) ); CUDA_RT_CALL( cudaFree(input_cm_row_ind_d) ); CUDA_RT_CALL( cudaFree(input_cm_col_d) ); free(input_cm_col_ind_h); free(input_cm_row_ind_h); free(input_cm_col_h); #endif return 0; }
9d5e2e27868a7cff962e622dfb89dd0943410e9e.hip
// !!! This is a file automatically generated by hipify!!! #include <SPH/surface/surfaceDetection.cuh> #include <utility/include_all.h> neighFunctionType estimateNormal(SPH::detection::Memory arrays) { checkedParticleIdx(i); cache_arrays((pos, position), (vol, volume)); float4 normal{ 0.f,0.f,0.f,0.f }; iterateNeighbors(j){ if (i == j) continue; auto distance = pos[i] - pos[j]; normal += math::normalize3(distance); //normal += -arrays.volume[j] / arrays.density[j] * GW_ij; } arrays.particleNormal[i] = math::normalize3(math::castTo<float4>(normal)); } neighFunctionType detectSurface(SPH::detection::Memory arrays) { checkedParticleIdx(i); float4 normal = arrays.particleNormal[i]; bool state = false; iterateNeighbors(j) { if (i == j) continue; auto distance = arrays.position[j] - arrays.position[i]; auto angle = acosf(math::dot3(normal, math::normalize3(distance)).val); state = state || angle <= CUDART_PI_F / 6.f; } auto V0 = 4.f / 3.f * CUDART_PI_F * math::cubic(arrays.radius); auto d = planeBoundary::distance(arrays.position[i], arrays.volume[i], arrays); auto db = d; //auto dist = math::planeDistance(E, arrays.position[i]); auto angle = acosf(math::dot3(normal, -math::normalize3(d)).val); auto h0 = support_from_volume(V0) * kernelSize() * 2.f; arrays.debugArray[i] = float4{ 0.f, 0.f, 0.f, h0.val / 2.f * 1.1f }; if (d.val.w < h0) { state = state || angle <= CUDART_PI_F / 2.f; arrays.debugArray[i] = float4{ d.val.x, d.val.y, d.val.z, d.val.w }; auto x = d.val.w; auto h = support_from_volume(arrays.volume[i]); auto H = h.val * kernelSize(); auto xRel = math::clamp((x + H) / (2.f * H), 0.f, 1.f) * ((float)arrays.boundaryLUTSize - 1.f); auto xL = math::floorf(xRel); auto xH = math::ceilf(xRel); auto xD = xRel - xL; int32_t xLi = math::clamp(static_cast<int32_t>(xL), 0, (int32_t)arrays.boundaryLUTSize - 1); int32_t xHi = math::clamp(static_cast<int32_t>(xH), 0, (int32_t)arrays.boundaryLUTSize - 1); auto lL = arrays.splineLUT[xLi]; auto lH = arrays.splineLUT[xHi]; auto val = lL * xD + (1.f - xD) * lH; arrays.debugArray[i] = float4{ val.val, xRel, boundary::g(d, h), x }; arrays.debugArray[i] = db.val; //arrays.debugArray[i] = boundary::splineGradient(arrays.position[i], arrays.volume[i], uFloat<>{0.5f}, arrays, boundary::kind::plane, -1).val; } for (int32_t v = 0; v < arrays.volumeBoundaryCounter; ++v) { auto d = volumeBoundary::distance_fn(arrays.position[i], arrays.volume[i], arrays,v); //auto dist = math::planeDistance(E, arrays.position[i]); auto angle = acosf(math::dot3(normal, -math::normalize3(d)).val); auto h0 = support_from_volume(V0) * kernelSize() * 2.f; //arrays.debugArray[i] = float4{ 0.f, 0.f, 0.f, h0.val / 2.f * 1.05f }; if (d.val.w < h0 && d.val.w < db.val.w) { //state = state || angle <= CUDART_PI_F / 2.f; auto x = d.val.w; auto h = support_from_volume(arrays.volume[i]); auto H = h.val * kernelSize(); auto xRel = math::clamp((x + H) / (2.f * H), 0.f, 1.f) * ((float)arrays.boundaryLUTSize - 1.f); auto xL = math::floorf(xRel); auto xH = math::ceilf(xRel); auto xD = xRel - xL; int32_t xLi = math::clamp(static_cast<int32_t>(xL), 0, (int32_t)arrays.boundaryLUTSize - 1); int32_t xHi = math::clamp(static_cast<int32_t>(xH), 0, (int32_t)arrays.boundaryLUTSize - 1); auto lL = arrays.splineLUT[xLi]; auto lH = arrays.splineLUT[xHi]; auto val = lL * xD + (1.f - xD) * lH; arrays.debugArray[i] = float4{ val.val, xRel, boundary::g(d, h), x }; arrays.debugArray[i] = d.val; //arrays.debugArray[i] = float4{ // boundary::g(d,h), // boundary::lookupGradient(arrays, arrays.splineGradientLUT, arrays.boundaryLUTSize, d, arrays.volume[i], uFloat<>{0.5f}, support_from_volume(arrays.volume[i]),0.f).val, // boundary::dg(d,h), // boundary::lookupValue(arrays, arrays.splineLUT, arrays.boundaryLUTSize, d, arrays.volume[i], uFloat<>{0.5f}, support_from_volume(arrays.volume[i]),0.f).val //}; //arrays.debugArray[i] = boundary::internal::lookupGradient(arrays.splineLUT, arrays.splineGradientLUT, arrays.position[i], arrays.volume[i], uFloat<>{0.5f}, arrays, boundary::kind::volume, v).val; //arrays.debugArray[i] = boundary::splineGradient(arrays.position[i], arrays.volume[i], uFloat<>{0.5f}, arrays, boundary::kind::volume, v).val; } } if (arrays.neighborListLength[i] < 5) state = false; iterateBoundaryPlanes(E) { auto dist = math::planeDistance(E, arrays.position[i]); if ( (dist.val < math::unit_get<1>(arrays.surface_distanceFieldDistances).val && fabsf(math::dot3(E, float4_u<>{1.f, 0.f, 0.f, 0.f}).val) > 0.5f) || (dist.val < math::unit_get<2>(arrays.surface_distanceFieldDistances).val && fabsf(math::dot3(E, float4_u<>{0.f, 1.f, 0.f, 0.f}).val) > 0.5f) || (dist.val < math::unit_get<3>(arrays.surface_distanceFieldDistances).val && fabsf(math::dot3(E, float4_u<>{0.f, 0.f, 1.f, 0.f}).val) > 0.5f && E.val.z > 0.f)) { //printf("b"); state = true; } } //for (int32_t b = 0; b < arrays.volumeBoundaryCounter; ++b) { // auto vos = volumeBoundary::volumeDistanceFn(arrays.position[i], arrays, b); // if (vos.val.w < HforV1 && math::dot3(vos, normal) < 0.f) // state = true; //} auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); auto phi = state ? arrays.surface_levelLimit : 0.f; auto phiOld = arrays.distanceBuffer.first[i]; phi = state ? arrays.surface_levelLimit : phi; phi = math::clamp(phi, arrays.surface_levelLimit, 0.f); phi = math::clamp(phi, phiOld - 1.0f * r , phiOld + 1.0f * r ); arrays.distanceBuffer.second[i] = phi; arrays.distanceBuffer.first[i] = phiOld; } basicFunctionType correctEstimate(SPH::detection::Memory arrays) { checkedParticleIdx(i); auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); auto phi = arrays.distanceBuffer.second[i]; arrays.distanceBuffer.second[i] = phi; arrays.decisionBuffer[i] = phi >= -0.85f * r ? 1.f : 0.f; arrays.markerBuffer[i] = phi >= -0.85f * r ? 1.f : 0.f; if (arrays.markerBuffer[i] < 0.4f) { arrays.surface_idxBuffer.second[i] = i; } } neighFunctionType propagateSurface(SPH::detection::Memory arrays, int32_t threads) { checkedThreadIdx(t); alias_arrays((pos, position)); int32_t i = arrays.surface_idxBuffer.first[t]; if (i == INT_MIN) return; int32_t partnerIdx = INT_MAX; float_u<SI::m> partnerDistance{ FLT_MAX }; auto partnerPhi = 0.0_m; //auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); arrays.markerBuffer[i] = arrays.decisionBuffer[i]; float marker = arrays.markerBuffer[i]; iterateNeighbors(j) { if (W_ij > 0.f) { if (j == i) continue; float neighbor_decision = arrays.decisionBuffer[j]; if (neighbor_decision > 0.2f && marker < 0.05f) { auto dist = math::abs(math::distance3(pos[i], pos[j])); if (dist < partnerDistance) { partnerIdx = j; partnerDistance = dist; partnerPhi = arrays.distanceBuffer.second[j]; } } } if (partnerIdx != INT_MAX) { if (arrays.decisionBuffer[i] < 0.4f) { auto phi = partnerPhi - partnerDistance; bool changed = phi > arrays.surface_levelLimit.val * 2.f * arrays.radius; if (arrays.distanceBuffer.second[i] != phi && changed) { cuda_atomic<float> change(arrays.changeBuffer); change.add(1.f); arrays.distanceBuffer.second[i] = phi; } arrays.markerBuffer[i] = changed ? 0.5f : 0.1f; } } } if (arrays.markerBuffer[i] < 0.4f) { arrays.surface_idxBuffer.second[t] = i; } else { arrays.surface_idxBuffer.second[t] = 0xFFFFFFFF; } } neighFunctionType phiSmooth(SPH::detection::Memory arrays) { checkedParticleIdx(i); cache_arrays((pos, position), (vol, volume)); arrays.markerBuffer[i] = arrays.decisionBuffer[i]; auto phiSum = 0.0_m; auto counter = 0.f; iterateNeighbors(j) { counter++; phiSum += arrays.distanceBuffer.second[j] * W_ij * vol[j]; // / arrays.density[neigh]; } //SWH2<SPH::detection::Memory> swh(arrays, pos[i], vol[i]); auto POS = planeBoundary::distance(pos[i], vol[i], arrays); auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); auto phiOld = arrays.distanceBuffer.first[i]; if (POS.val.w < 1e20f || counter < 5) phiSum = arrays.distanceBuffer.second[i]; phiSum = math::clamp(phiSum, phiOld - 2.0f * r, phiOld + 2.0f * r); phiSum = math::clamp(phiSum, arrays.surface_levelLimit, 0.f); arrays.distanceBuffer.first[i] = math::max(phiSum, arrays.surface_levelLimit); } basicFunction(correct, correctEstimate, "Surface: correct Distance"); neighFunction(propagate, propagateSurface, "Surface: Distance iteration"); neighFunction(smooth, phiSmooth, "Surface: smooth Distance", caches<float4, float>{}); struct is_set { hostDeviceInline bool operator()(const int x) { return x != -1; } }; neighFunction(estimate, estimateNormal, "Surface: estimate Normal", caches<float4, float>{}); neighFunction(detect, detectSurface, "Surface: detect surface"); void SPH::detection::distance(Memory mem) { if (mem.num_ptcls == 0) return; int32_t diff = 0; auto compact_idx = [&]() { diff = (int32_t)algorithm::copy_if(arrays::surface_idxBuffer::rear_ptr, arrays::surface_idxBuffer::ptr, mem.num_ptcls, is_set()); cuda::Memset(mem.surface_idxBuffer.second, 0xFF, sizeof(int32_t) * mem.num_ptcls); }; cuda::Memset(mem.surface_idxBuffer.second, 0xFF, sizeof(int32_t) * mem.num_ptcls); launch<estimate>(mem.num_ptcls, mem); launch<detect>(mem.num_ptcls, mem); launch<correct>(mem.num_ptcls, mem); compact_idx(); int32_t it = 0; do { cuda::Memset(mem.changeBuffer, 0x00, sizeof(float)); launch<propagate>(diff, mem, diff); cuda::memcpy(&mem.surface_phiChange, mem.changeBuffer, sizeof(float), hipMemcpyDeviceToHost); cuda::memcpy(mem.decisionBuffer, mem.markerBuffer, sizeof(float) * mem.num_ptcls); it++; if (it % 4 == 0) compact_idx(); } while (mem.surface_phiChange >= 0.5f); *parameters::surface_iterations::ptr = it; launch<smooth>(mem.num_ptcls, mem); *parameters::surface_phiMin::unit_ptr = algorithm::reduce_max(mem.distanceBuffer.first, mem.num_ptcls); }
9d5e2e27868a7cff962e622dfb89dd0943410e9e.cu
#include <SPH/surface/surfaceDetection.cuh> #include <utility/include_all.h> neighFunctionType estimateNormal(SPH::detection::Memory arrays) { checkedParticleIdx(i); cache_arrays((pos, position), (vol, volume)); float4 normal{ 0.f,0.f,0.f,0.f }; iterateNeighbors(j){ if (i == j) continue; auto distance = pos[i] - pos[j]; normal += math::normalize3(distance); //normal += -arrays.volume[j] / arrays.density[j] * GW_ij; } arrays.particleNormal[i] = math::normalize3(math::castTo<float4>(normal)); } neighFunctionType detectSurface(SPH::detection::Memory arrays) { checkedParticleIdx(i); float4 normal = arrays.particleNormal[i]; bool state = false; iterateNeighbors(j) { if (i == j) continue; auto distance = arrays.position[j] - arrays.position[i]; auto angle = acosf(math::dot3(normal, math::normalize3(distance)).val); state = state || angle <= CUDART_PI_F / 6.f; } auto V0 = 4.f / 3.f * CUDART_PI_F * math::cubic(arrays.radius); auto d = planeBoundary::distance(arrays.position[i], arrays.volume[i], arrays); auto db = d; //auto dist = math::planeDistance(E, arrays.position[i]); auto angle = acosf(math::dot3(normal, -math::normalize3(d)).val); auto h0 = support_from_volume(V0) * kernelSize() * 2.f; arrays.debugArray[i] = float4{ 0.f, 0.f, 0.f, h0.val / 2.f * 1.1f }; if (d.val.w < h0) { state = state || angle <= CUDART_PI_F / 2.f; arrays.debugArray[i] = float4{ d.val.x, d.val.y, d.val.z, d.val.w }; auto x = d.val.w; auto h = support_from_volume(arrays.volume[i]); auto H = h.val * kernelSize(); auto xRel = math::clamp((x + H) / (2.f * H), 0.f, 1.f) * ((float)arrays.boundaryLUTSize - 1.f); auto xL = math::floorf(xRel); auto xH = math::ceilf(xRel); auto xD = xRel - xL; int32_t xLi = math::clamp(static_cast<int32_t>(xL), 0, (int32_t)arrays.boundaryLUTSize - 1); int32_t xHi = math::clamp(static_cast<int32_t>(xH), 0, (int32_t)arrays.boundaryLUTSize - 1); auto lL = arrays.splineLUT[xLi]; auto lH = arrays.splineLUT[xHi]; auto val = lL * xD + (1.f - xD) * lH; arrays.debugArray[i] = float4{ val.val, xRel, boundary::g(d, h), x }; arrays.debugArray[i] = db.val; //arrays.debugArray[i] = boundary::splineGradient(arrays.position[i], arrays.volume[i], uFloat<>{0.5f}, arrays, boundary::kind::plane, -1).val; } for (int32_t v = 0; v < arrays.volumeBoundaryCounter; ++v) { auto d = volumeBoundary::distance_fn(arrays.position[i], arrays.volume[i], arrays,v); //auto dist = math::planeDistance(E, arrays.position[i]); auto angle = acosf(math::dot3(normal, -math::normalize3(d)).val); auto h0 = support_from_volume(V0) * kernelSize() * 2.f; //arrays.debugArray[i] = float4{ 0.f, 0.f, 0.f, h0.val / 2.f * 1.05f }; if (d.val.w < h0 && d.val.w < db.val.w) { //state = state || angle <= CUDART_PI_F / 2.f; auto x = d.val.w; auto h = support_from_volume(arrays.volume[i]); auto H = h.val * kernelSize(); auto xRel = math::clamp((x + H) / (2.f * H), 0.f, 1.f) * ((float)arrays.boundaryLUTSize - 1.f); auto xL = math::floorf(xRel); auto xH = math::ceilf(xRel); auto xD = xRel - xL; int32_t xLi = math::clamp(static_cast<int32_t>(xL), 0, (int32_t)arrays.boundaryLUTSize - 1); int32_t xHi = math::clamp(static_cast<int32_t>(xH), 0, (int32_t)arrays.boundaryLUTSize - 1); auto lL = arrays.splineLUT[xLi]; auto lH = arrays.splineLUT[xHi]; auto val = lL * xD + (1.f - xD) * lH; arrays.debugArray[i] = float4{ val.val, xRel, boundary::g(d, h), x }; arrays.debugArray[i] = d.val; //arrays.debugArray[i] = float4{ // boundary::g(d,h), // boundary::lookupGradient(arrays, arrays.splineGradientLUT, arrays.boundaryLUTSize, d, arrays.volume[i], uFloat<>{0.5f}, support_from_volume(arrays.volume[i]),0.f).val, // boundary::dg(d,h), // boundary::lookupValue(arrays, arrays.splineLUT, arrays.boundaryLUTSize, d, arrays.volume[i], uFloat<>{0.5f}, support_from_volume(arrays.volume[i]),0.f).val //}; //arrays.debugArray[i] = boundary::internal::lookupGradient(arrays.splineLUT, arrays.splineGradientLUT, arrays.position[i], arrays.volume[i], uFloat<>{0.5f}, arrays, boundary::kind::volume, v).val; //arrays.debugArray[i] = boundary::splineGradient(arrays.position[i], arrays.volume[i], uFloat<>{0.5f}, arrays, boundary::kind::volume, v).val; } } if (arrays.neighborListLength[i] < 5) state = false; iterateBoundaryPlanes(E) { auto dist = math::planeDistance(E, arrays.position[i]); if ( (dist.val < math::unit_get<1>(arrays.surface_distanceFieldDistances).val && fabsf(math::dot3(E, float4_u<>{1.f, 0.f, 0.f, 0.f}).val) > 0.5f) || (dist.val < math::unit_get<2>(arrays.surface_distanceFieldDistances).val && fabsf(math::dot3(E, float4_u<>{0.f, 1.f, 0.f, 0.f}).val) > 0.5f) || (dist.val < math::unit_get<3>(arrays.surface_distanceFieldDistances).val && fabsf(math::dot3(E, float4_u<>{0.f, 0.f, 1.f, 0.f}).val) > 0.5f && E.val.z > 0.f)) { //printf("b"); state = true; } } //for (int32_t b = 0; b < arrays.volumeBoundaryCounter; ++b) { // auto vos = volumeBoundary::volumeDistanceFn(arrays.position[i], arrays, b); // if (vos.val.w < HforV1 && math::dot3(vos, normal) < 0.f) // state = true; //} auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); auto phi = state ? arrays.surface_levelLimit : 0.f; auto phiOld = arrays.distanceBuffer.first[i]; phi = state ? arrays.surface_levelLimit : phi; phi = math::clamp(phi, arrays.surface_levelLimit, 0.f); phi = math::clamp(phi, phiOld - 1.0f * r , phiOld + 1.0f * r ); arrays.distanceBuffer.second[i] = phi; arrays.distanceBuffer.first[i] = phiOld; } basicFunctionType correctEstimate(SPH::detection::Memory arrays) { checkedParticleIdx(i); auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); auto phi = arrays.distanceBuffer.second[i]; arrays.distanceBuffer.second[i] = phi; arrays.decisionBuffer[i] = phi >= -0.85f * r ? 1.f : 0.f; arrays.markerBuffer[i] = phi >= -0.85f * r ? 1.f : 0.f; if (arrays.markerBuffer[i] < 0.4f) { arrays.surface_idxBuffer.second[i] = i; } } neighFunctionType propagateSurface(SPH::detection::Memory arrays, int32_t threads) { checkedThreadIdx(t); alias_arrays((pos, position)); int32_t i = arrays.surface_idxBuffer.first[t]; if (i == INT_MIN) return; int32_t partnerIdx = INT_MAX; float_u<SI::m> partnerDistance{ FLT_MAX }; auto partnerPhi = 0.0_m; //auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); arrays.markerBuffer[i] = arrays.decisionBuffer[i]; float marker = arrays.markerBuffer[i]; iterateNeighbors(j) { if (W_ij > 0.f) { if (j == i) continue; float neighbor_decision = arrays.decisionBuffer[j]; if (neighbor_decision > 0.2f && marker < 0.05f) { auto dist = math::abs(math::distance3(pos[i], pos[j])); if (dist < partnerDistance) { partnerIdx = j; partnerDistance = dist; partnerPhi = arrays.distanceBuffer.second[j]; } } } if (partnerIdx != INT_MAX) { if (arrays.decisionBuffer[i] < 0.4f) { auto phi = partnerPhi - partnerDistance; bool changed = phi > arrays.surface_levelLimit.val * 2.f * arrays.radius; if (arrays.distanceBuffer.second[i] != phi && changed) { cuda_atomic<float> change(arrays.changeBuffer); change.add(1.f); arrays.distanceBuffer.second[i] = phi; } arrays.markerBuffer[i] = changed ? 0.5f : 0.1f; } } } if (arrays.markerBuffer[i] < 0.4f) { arrays.surface_idxBuffer.second[t] = i; } else { arrays.surface_idxBuffer.second[t] = 0xFFFFFFFF; } } neighFunctionType phiSmooth(SPH::detection::Memory arrays) { checkedParticleIdx(i); cache_arrays((pos, position), (vol, volume)); arrays.markerBuffer[i] = arrays.decisionBuffer[i]; auto phiSum = 0.0_m; auto counter = 0.f; iterateNeighbors(j) { counter++; phiSum += arrays.distanceBuffer.second[j] * W_ij * vol[j]; // / arrays.density[neigh]; } //SWH2<SPH::detection::Memory> swh(arrays, pos[i], vol[i]); auto POS = planeBoundary::distance(pos[i], vol[i], arrays); auto r = math::power<ratio<1, 3>>(arrays.volume[i] * PI4O3_1); auto phiOld = arrays.distanceBuffer.first[i]; if (POS.val.w < 1e20f || counter < 5) phiSum = arrays.distanceBuffer.second[i]; phiSum = math::clamp(phiSum, phiOld - 2.0f * r, phiOld + 2.0f * r); phiSum = math::clamp(phiSum, arrays.surface_levelLimit, 0.f); arrays.distanceBuffer.first[i] = math::max(phiSum, arrays.surface_levelLimit); } basicFunction(correct, correctEstimate, "Surface: correct Distance"); neighFunction(propagate, propagateSurface, "Surface: Distance iteration"); neighFunction(smooth, phiSmooth, "Surface: smooth Distance", caches<float4, float>{}); struct is_set { hostDeviceInline bool operator()(const int x) { return x != -1; } }; neighFunction(estimate, estimateNormal, "Surface: estimate Normal", caches<float4, float>{}); neighFunction(detect, detectSurface, "Surface: detect surface"); void SPH::detection::distance(Memory mem) { if (mem.num_ptcls == 0) return; int32_t diff = 0; auto compact_idx = [&]() { diff = (int32_t)algorithm::copy_if(arrays::surface_idxBuffer::rear_ptr, arrays::surface_idxBuffer::ptr, mem.num_ptcls, is_set()); cuda::Memset(mem.surface_idxBuffer.second, 0xFF, sizeof(int32_t) * mem.num_ptcls); }; cuda::Memset(mem.surface_idxBuffer.second, 0xFF, sizeof(int32_t) * mem.num_ptcls); launch<estimate>(mem.num_ptcls, mem); launch<detect>(mem.num_ptcls, mem); launch<correct>(mem.num_ptcls, mem); compact_idx(); int32_t it = 0; do { cuda::Memset(mem.changeBuffer, 0x00, sizeof(float)); launch<propagate>(diff, mem, diff); cuda::memcpy(&mem.surface_phiChange, mem.changeBuffer, sizeof(float), cudaMemcpyDeviceToHost); cuda::memcpy(mem.decisionBuffer, mem.markerBuffer, sizeof(float) * mem.num_ptcls); it++; if (it % 4 == 0) compact_idx(); } while (mem.surface_phiChange >= 0.5f); *parameters::surface_iterations::ptr = it; launch<smooth>(mem.num_ptcls, mem); *parameters::surface_phiMin::unit_ptr = algorithm::reduce_max(mem.distanceBuffer.first, mem.num_ptcls); }
fd0b9b32b85e48692a6bb15251cccaa5f37a4a72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Created on: 2019 * Author: Eduardo Xavier * * */ #include "BRKGA.h" /*** Constructor ***/ BRKGA::BRKGA(unsigned n, unsigned p, float pe, float pm, float rhoe, unsigned K, unsigned decode_type, unsigned NUM_THREADS, unsigned RAND_SEED){ if(p%THREADS_PER_BLOCK != 0){ //round population size to a multiple of THREADS_PER_BLOCK p = ((p/THREADS_PER_BLOCK)+1)*THREADS_PER_BLOCK; } //set to the maximum number of blocks allowed in CUDA compute capability 2.0 if(K > (unsigned)(2<<30)){ K = (unsigned) 2<<30; } this->population_size = p; this->number_populations = K; this->number_chromosomes = p * K; this->chromosome_size = n; this->elite_size = (unsigned)(pe*p); this->mutants_size = (unsigned)(pm*p); this->rhoe = rhoe; this->decode_type = decode_type; this->NUM_THREADS = NUM_THREADS; using std::range_error; if(chromosome_size == 0) { throw range_error("Chromosome size equals zero."); } if(population_size == 0) { throw range_error("Population size equals zero."); } if(elite_size == 0) { throw range_error("Elite-set size equals zero."); } if(elite_size + mutants_size > population_size) { throw range_error("elite + mutant sets greater than population size (p)."); } if(number_populations == 0) { throw range_error("Number of parallel populations cannot be zero."); } long unsigned total_memory=0; // Allocate a float array representing all K populations on host and device h_population = (float *)malloc(number_chromosomes*chromosome_size*sizeof(float)); total_memory += number_chromosomes*chromosome_size*sizeof(float); test_memory_malloc(hipMalloc((void **)&d_population, number_chromosomes*chromosome_size*sizeof(float)), 1, total_memory); total_memory += number_chromosomes*chromosome_size*sizeof(float); test_memory_malloc(hipMalloc((void **)&d_population2, number_chromosomes*chromosome_size*sizeof(float)), 2, total_memory); total_memory += number_chromosomes*sizeof(float); // Allocate an array representing the scores of each chromosome on host and device h_scores = (float *)malloc(number_chromosomes*sizeof(float)); test_memory_malloc(hipMalloc((void **)&d_scores, number_chromosomes*sizeof(float)), 3, total_memory); total_memory += number_chromosomes*sizeof(PopIdxThreadIdxPair); // Allocate an array representing the indices of each chromosome on host and device h_scores_idx = (PopIdxThreadIdxPair *)malloc(number_chromosomes*sizeof(PopIdxThreadIdxPair)); test_memory_malloc(hipMalloc((void **)&d_scores_idx, number_chromosomes*sizeof(PopIdxThreadIdxPair)), 4, total_memory); total_memory += number_chromosomes*chromosome_size*sizeof(ChromosomeGeneIdxPair); // Allocate an array representing the indices of each gene of each chromosome on host and device h_chromosome_gene_idx = (ChromosomeGeneIdxPair *)malloc(number_chromosomes*chromosome_size*sizeof(ChromosomeGeneIdxPair)); test_memory_malloc(hipMalloc((void **)&d_chromosome_gene_idx, number_chromosomes*chromosome_size*sizeof(ChromosomeGeneIdxPair)), 5, total_memory); total_memory += number_chromosomes*sizeof(float); test_memory_malloc(hipMalloc((void **)&d_random_elite_parent, number_chromosomes*sizeof(float)), 6, total_memory); total_memory += number_chromosomes*sizeof(float); test_memory_malloc(hipMalloc((void **)&d_random_parent, number_chromosomes*sizeof(float)), 7, total_memory); // Allocate a poll to save the POOL_SIZE best solutions, where the first value in each chromosome is the chromosome score h_best_solutions = (float *)malloc(POOL_SIZE*(chromosome_size+1)*sizeof(float)); test_memory_malloc(hipMalloc((void **)&d_best_solutions, POOL_SIZE*(chromosome_size+1)*sizeof(float)), 8, total_memory); printf("Total Memory Used In GPU %lu bytes(%lu Mbytes)\n", total_memory, total_memory/1000000); this->dimBlock.x = THREADS_PER_BLOCK; this->dimGrid.x = (population_size*number_populations)/THREADS_PER_BLOCK; // Create pseudo-random number generator hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); // Set seed hiprandSetPseudoRandomGeneratorSeed(gen, RAND_SEED); //Initialize population with random alleles with generated random floats on device reset_population(); } BRKGA::~BRKGA(){ // Cleanup hiprandDestroyGenerator(gen); hipFree(d_population); hipFree(d_population2); free(h_population); hipFree(d_scores); free(h_scores); hipFree(d_scores_idx); free(h_scores_idx); hipFree(d_chromosome_gene_idx); free(h_chromosome_gene_idx); hipFree(d_random_elite_parent); hipFree(d_random_parent); hipFree(d_best_solutions); free(h_best_solutions); if(d_instance_info != NULL){ hipFree(d_instance_info); d_instance_info = NULL; } } void BRKGA::test_memory_malloc(hipError_t err, unsigned code, unsigned total_memory){ if(err != hipSuccess){ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(err)); fprintf(stderr, "In hipMalloc: %u with total memory %u\n", code, total_memory); exit(1); } } /*** Allocate information used to evaluate chromosomes on the device. It also receives the number of elements (num) in the array info and the size (size) of each element. Notice we assume the type of the info elements to be float. ***/ void BRKGA::setInstanceInfo(void *info, long unsigned num, long unsigned size){ if(info != NULL){ long unsigned total_memory = num*size; printf("Extra Memory Used In GPU due to Instance Info %lu bytes(%lu Mbytes)\n", total_memory, total_memory/1000000); if(decode_type == DEVICE_DECODE || decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ test_memory_malloc(hipMalloc((void **)&d_instance_info, num*size),8,total_memory); hipMemcpy(d_instance_info, info, num*size, hipMemcpyHostToDevice); } h_instance_info = info; } } /*** Generate random alleles for all chromosomes on GPGPU. ***/ void BRKGA::reset_population(void){ hiprandGenerateUniform(gen, d_population, number_chromosomes*chromosome_size); } /*** If HOST_DECODE is used then this function decodes each cromosome with the host_decode function provided in Decoder.cpp ***/ void BRKGA::evaluate_chromosomes_host(){ hipMemcpy(h_population, d_population, number_chromosomes*chromosome_size*sizeof(float),hipMemcpyDeviceToHost); #pragma omp parallel for default(none) shared(dimGrid,dimBlock,h_population,h_scores) collapse(2) num_threads(NUM_THREADS) for(int b=0; b < dimGrid.x ; b++){ for(int t=0; t < dimBlock.x; t++){ unsigned tx = b*dimBlock.x + t; //Found the thread index since each thread is associated with //a cromosome. float *chromosome = h_population + (tx*chromosome_size); h_scores[tx] = host_decode(chromosome, chromosome_size, h_instance_info); } } hipMemcpy(d_scores, h_scores, number_chromosomes*sizeof(float),hipMemcpyHostToDevice); } /*** If DEVICE_DECODE is used then this kernel function decodes each cromosome with the device_decode function provided in Decoder.cpp. We use one thread per cromosome to process them. ***/ __global__ void decode(float *d_scores, float *d_population, int chromosome_size, void * d_instance_info){ unsigned global_tx = blockIdx.x*blockDim.x + threadIdx.x; d_scores[global_tx] = device_decode(d_population + global_tx*chromosome_size, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE is used then this function decodes each cromosome with the kernel function decode above. ***/ void BRKGA::evaluate_chromosomes_device(){ //Make a copy of chromossomes to d_population2 such that they can be messed up inside //the decoder functions without afecting the real chromosomes on d_population. hipMemcpy(d_population2, d_population, number_chromosomes*chromosome_size*sizeof(float),hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( decode), dim3(dimGrid), dim3(dimBlock), 0, 0, d_scores, d_population2, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used then this kernel function decodes each cromosome with the device_decode_chromosomeSorted function provided in Decoder.cpp. We use one thread per cromosome to process them. Notice that we use the struct ChromosomeGeneIdxPair since the cromosome is given already sorted to the function, and so it has a field with the original index of each gene in the cromosome. ***/ __global__ void decode_chromosomes_sorted(float *d_scores, ChromosomeGeneIdxPair *d_chromosome_gene_idx, int chromosome_size, void *d_instance_info){ unsigned global_tx = blockIdx.x*blockDim.x + threadIdx.x; d_scores[global_tx] = device_decode_chromosome_sorted(d_chromosome_gene_idx + global_tx*chromosome_size, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used then this function decodes each cromosome with the kernel function decode_chromosomes_sorted above. But first we sort each chromosome by its genes values. We save this information in the struct ChromosomeGeneIdxPair d_chromosome_gene_idx. ***/ void BRKGA::evaluate_chromosomes_sorted_device(){ sort_chromosomes_genes(); hipLaunchKernelGGL(( decode_chromosomes_sorted), dim3(dimGrid), dim3(dimBlock), 0, 0, d_scores, d_chromosome_gene_idx, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used. Kernel function used to save for each gene of each chromosome, the chromosome index, and the original gene index. Used later to sort all chromossomes by gene values. We save gene indexes to preserve this information after sorting. ***/ __global__ void device_set_chromosome_gene_idx(ChromosomeGeneIdxPair *d_chromosome_gene_idx, int chromosome_size){ int tx = blockIdx.x*blockDim.x + threadIdx.x; for(int i=0; i<chromosome_size; i++){ d_chromosome_gene_idx[tx*chromosome_size + i].chromosomeIdx = tx; d_chromosome_gene_idx[tx*chromosome_size + i].geneIdx = i; } } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used. Used as comparator to sort genes of the chromosomes. After sorting by gene we need to reagroup genes by their chromosomes which are indexed by threadIdx. ***/ __device__ bool operator<(const ChromosomeGeneIdxPair& lhs, const ChromosomeGeneIdxPair& rhs){ return lhs.chromosomeIdx < rhs.chromosomeIdx; } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used. We sort the genes of each chromosome. We perform 2 stable_sort sorts: first we sort all genes of all chromosomes by their values, and than we sort by the chromosome index, and since stable_sort is used, for each chromosome we will have its genes sorted by their values. ***/ void BRKGA::sort_chromosomes_genes(){ //First set for each gene, its chromosome index and its original index in the chromosome hipLaunchKernelGGL(( device_set_chromosome_gene_idx), dim3(dimGrid), dim3(dimBlock), 0, 0, d_chromosome_gene_idx, chromosome_size); //we use d_population2 to sorte all genes by their values hipMemcpy(d_population2, d_population, number_chromosomes*chromosome_size*sizeof(float), hipMemcpyDeviceToDevice); thrust::device_ptr<float> keys(d_population2); thrust::device_ptr<ChromosomeGeneIdxPair> vals(d_chromosome_gene_idx); //stable sort both d_population2 and d_chromosome_gene_idx by all the genes values thrust::stable_sort_by_key(keys, keys + number_chromosomes*chromosome_size, vals); //stable sort both d_population2 and d_chromosome_gene_idx by the chromosome index values thrust::stable_sort_by_key(vals, vals + number_chromosomes*chromosome_size, keys); } /*** Kernel function, where each thread process one chromosome. It receives the current population *d_population, the next population pointer *d_population2, two random vectors for indices of parents, d_random_elite_parent and d_random_parent, ***/ __global__ void device_next_population(float *d_population, float *d_population2, float *d_random_elite_parent, float *d_random_parent, int chromosome_size, unsigned population_size, unsigned elite_size, unsigned mutants_size, float rhoe, PopIdxThreadIdxPair *d_scores_idx){ unsigned tx = blockIdx.x*blockDim.x + threadIdx.x; //global thread index unsigned chromosome_idx = tx*chromosome_size; unsigned pop_idx = (unsigned)tx/population_size; //the population index of this thread unsigned inside_pop_idx = tx%population_size; //below are the inside population random indexes of a elite parent and regular parent for crossover unsigned parent_elite_idx = (unsigned)(ceilf(d_random_elite_parent[tx]*elite_size)-1); unsigned parent_idx = (unsigned)(elite_size+ceilf(d_random_parent[tx]*(population_size-elite_size))-1); //if inside_pop_idx < elite_size then thread is elite, so we copy elite chromosome to the next population if(inside_pop_idx < elite_size){ unsigned elite_chromosome_idx = d_scores_idx[tx].thIdx*chromosome_size; for(int i=0; i<chromosome_size; i++) d_population2[chromosome_idx + i] = d_population[elite_chromosome_idx + i]; }else if(inside_pop_idx < population_size - mutants_size){ //if inside_pop_idex >= elite_size and inside < population_size - mutants_size //thread is responsible to crossover unsigned elite_chromosome_idx = d_scores_idx[pop_idx*population_size + parent_elite_idx].thIdx*chromosome_size; unsigned parent_chromosome_idx = d_scores_idx[pop_idx*population_size + parent_idx].thIdx*chromosome_size; for(int i=0; i<chromosome_size; i++){ if(d_population2[chromosome_idx + i] <= rhoe) //copy allele from elite parent d_population2[chromosome_idx + i] = d_population[elite_chromosome_idx + i]; else //copy allele from regular parent d_population2[chromosome_idx + i] = d_population[parent_chromosome_idx + i]; } }//in the else case the thread corresponds to a mutant and nothing is done. } /*** Main function of the BRKGA algorithm. It evolves K populations for a certain number of generations. ***/ void BRKGA::evolve(int number_generations){ using std::domain_error; if(decode_type == DEVICE_DECODE){ evaluate_chromosomes_device(); }else if(decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ evaluate_chromosomes_sorted_device(); }else if(decode_type == HOST_DECODE){ evaluate_chromosomes_host(); }else{ throw domain_error("Function decode type is unknown"); } //After this call the vector d_scores_idx has all threads sorted by population, and //inside each population, threads are sorted by score sort_chromosomes(); //This call initialize the whole area of the next population d_population2 with random values. //So mutantes are already build. For the non mutants we use the //random values generated here to perform the crossover on the current population d_population. initialize_population(2); //generate random numbers to index parents used for crossover hiprandGenerateUniform(gen, d_random_elite_parent, number_chromosomes); hiprandGenerateUniform(gen, d_random_parent, number_chromosomes); //Kernel function, where each thread process one chromosome of the next population. hipLaunchKernelGGL(( device_next_population), dim3(dimGrid), dim3(dimBlock), 0, 0, d_population, d_population2, d_random_elite_parent, d_random_parent, chromosome_size, population_size, elite_size, mutants_size, rhoe, d_scores_idx); float *aux = d_population2; d_population2 = d_population; d_population = aux; } void BRKGA::initialize_population(int p){ if(p==1) hiprandGenerateUniform(gen, d_population, number_chromosomes*chromosome_size); if(p==2) hiprandGenerateUniform(gen, d_population2, number_chromosomes*chromosome_size); } /*** Kernel function that sets for each cromosome its global index (among all populations) and its population index. ***/ __global__ void device_set_idx(PopIdxThreadIdxPair *d_scores_idx, int population_size){ int tx = blockIdx.x*blockDim.x + threadIdx.x; d_scores_idx[tx].popIdx = tx/population_size; d_scores_idx[tx].thIdx = tx; } /*** Function used to sort chromosomes by population index ***/ __device__ bool operator<(const PopIdxThreadIdxPair& lhs, const PopIdxThreadIdxPair& rhs){ return lhs.popIdx < rhs.popIdx; } /*** We sort chromosomes for each population. We use the thread index to index each population, and perform 2 stable_sort sorts: first we sort by the chromosome scores, and than by their population index, and since stable_sort is used in each population the chromosomes are sorted by scores. ***/ void BRKGA::sort_chromosomes(){ //For each thread we store in d_scores_idx the global chromosome index and its population index. hipLaunchKernelGGL(( device_set_idx), dim3(dimGrid), dim3(dimBlock), 0, 0, d_scores_idx, population_size); thrust::device_ptr<float> keys(d_scores); thrust::device_ptr<PopIdxThreadIdxPair> vals(d_scores_idx); //now sort all chromosomes by their scores (vals) thrust::stable_sort_by_key(keys, keys + number_chromosomes, vals); //now sort all chromossomes by their population index //in the sorting process it is used operator< above to compare two structs of this type thrust::stable_sort_by_key(vals, vals + number_chromosomes, keys); } /*** Kernel function to operate the exchange of elite chromosomes. It was launched M*number_populations threads. For each population each one of M threads do the copy of an elite chromosome of its own population into the other populations. To do: make kernel save in local memory the chromosome and then copy to each other population ***/ __global__ void device_exchange_elite(float *d_population, int chromosome_size, unsigned population_size, unsigned number_populations, PopIdxThreadIdxPair *d_scores_idx, unsigned M){ unsigned tx = threadIdx.x; //this thread value between 0 and M-1 unsigned pop_idx = blockIdx.x; //this thread population index, a value between 0 and number_populations-1 unsigned elite_idx = pop_idx*population_size + tx; unsigned elite_chromosome_idx = d_scores_idx[elite_idx].thIdx; unsigned inside_destiny_idx = population_size-1-(M*pop_idx)-tx;//index of the destiny of this thread inside each population for(int i=0; i<number_populations; i++){ if(i != pop_idx){ unsigned destiny_chromosome_idx = d_scores_idx[i*population_size + inside_destiny_idx].thIdx; for(int j=0; j<chromosome_size;j++) d_population[destiny_chromosome_idx*chromosome_size + j] = d_population[elite_chromosome_idx*chromosome_size + j]; } } } /*** Exchange M individuals among the different populations. ***/ void BRKGA::exchangeElite(unsigned M){ using std::range_error; if(M > elite_size) { throw range_error("Exchange elite size M greater than elite size."); } if(M*number_populations > population_size) { throw range_error("Total exchange elite size greater than population size."); } using std::domain_error; if(decode_type == DEVICE_DECODE){ evaluate_chromosomes_device(); }else if(decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ evaluate_chromosomes_sorted_device(); }else if(decode_type == HOST_DECODE){ evaluate_chromosomes_host(); } else{ throw domain_error("Function decode type is unknown"); } sort_chromosomes(); hipLaunchKernelGGL(( device_exchange_elite), dim3(number_populations), dim3(M), 0, 0, d_population, chromosome_size, population_size, number_populations, d_scores_idx, M); } /*** Return a vector of vectors, where each line vector corresponds to a chromosome, where in position 0 we have its score and in positions 1 to chromosome_size the aleles values ***/ std::vector<std::vector <float>> BRKGA::getkBestChromosomes(unsigned k){ std::vector<std::vector <float>> ret(k, std::vector<float>(chromosome_size+1)); global_sort_chromosomes(); hipMemcpy(h_scores_idx, d_scores_idx, number_chromosomes*sizeof(PopIdxThreadIdxPair),hipMemcpyDeviceToHost); hipMemcpy(h_scores, d_scores, number_chromosomes*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(h_population, d_population, number_chromosomes*chromosome_size*sizeof(float),hipMemcpyDeviceToHost); for(int i=0; i<k; i++){ unsigned tx = h_scores_idx[i].thIdx; float *begin = &h_population[tx*chromosome_size]; ret[i][0] = h_scores[i]; for(int u=1; u <= chromosome_size; u++){ ret[i][u] = begin[u-1]; } } return ret; } /*** Return a vector of vectors, where each line vector corresponds to a chromosome, where in position 0 we have its score and in positions 1 to chromosome_size the aleles values ***/ std::vector<std::vector <float>> BRKGA::getkBestChromosomes2(unsigned k){ if(k>POOL_SIZE) k=POOL_SIZE; std::vector<std::vector <float>> ret(k, std::vector<float>(chromosome_size+1)); saveBestChromosomes(); hipMemcpy(h_best_solutions, d_best_solutions, POOL_SIZE*(chromosome_size+1)*sizeof(float),hipMemcpyDeviceToHost); for(int i=0; i<k; i++){ for(int j=0; j <= chromosome_size; j++){ ret[i][j] = h_best_solutions[i*(chromosome_size+1) + j]; } } return ret; } __global__ void device_save_best_chromosomes(float *d_population, unsigned chromosome_size, PopIdxThreadIdxPair *d_scores_idx, float *d_best_solutions, float *d_scores, unsigned best_saved){ if(!best_saved){//this is the first time saving best solutions in to the pool for(int i=0; i<POOL_SIZE; i++){ unsigned tx = d_scores_idx[i].thIdx; float *begin = (float *)&d_population[tx*chromosome_size]; d_best_solutions[i*(chromosome_size+1)] = d_scores[i]; //save the value of the chromosome for(int j=1; j <= chromosome_size; j++){ //save the chromosome d_best_solutions[i*(chromosome_size+1)+j] = begin[j-1]; } } }else{//Since best solutions were already saved //only save now if the i-th best current solution is better than the i-th best overall for(int i=0; i<POOL_SIZE; i++){ unsigned tx = d_scores_idx[i].thIdx; float *begin = (float *)&d_population[tx*chromosome_size]; if(d_scores[i] < d_best_solutions[i*(chromosome_size+1)]){ d_best_solutions[i*(chromosome_size+1)] = d_scores[i]; for(int j=1; j <= chromosome_size; j++){ d_best_solutions[i*(chromosome_size+1)+j] = begin[j-1]; } } } } } /*** This Function saves in the pool d_best_solutions and h_best_solutions the best solutions generated so far among all populations. ***/ void BRKGA::saveBestChromosomes(){ global_sort_chromosomes(); hipLaunchKernelGGL(( device_save_best_chromosomes), dim3(1), dim3(1), 0, 0, d_population, chromosome_size, d_scores_idx, d_best_solutions, d_scores, best_saved); best_saved = 1; } /*** We sort all chromosomes of all populations toguether. We use the global thread index to index each chromosome, since each thread is responsible for one thread. Notice that in this function we only perform one sort, since we want the best chromosomes overall, so we do not perform a second sort to separate chromosomes by their population. ***/ void BRKGA::global_sort_chromosomes(){ using std::domain_error; if(decode_type == DEVICE_DECODE){ evaluate_chromosomes_device(); }else if(decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ evaluate_chromosomes_sorted_device(); }else if(decode_type == HOST_DECODE){ evaluate_chromosomes_host(); } else{ throw domain_error("Function decode type is unknown"); } hipLaunchKernelGGL(( device_set_idx), dim3(dimGrid), dim3(dimBlock), 0, 0, d_scores_idx, population_size); thrust::device_ptr<float> keys(d_scores); thrust::device_ptr<PopIdxThreadIdxPair> vals(d_scores_idx); thrust::sort_by_key(keys, keys + number_chromosomes, vals); }
fd0b9b32b85e48692a6bb15251cccaa5f37a4a72.cu
/* * * Created on: 2019 * Author: Eduardo Xavier * * */ #include "BRKGA.h" /*** Constructor ***/ BRKGA::BRKGA(unsigned n, unsigned p, float pe, float pm, float rhoe, unsigned K, unsigned decode_type, unsigned NUM_THREADS, unsigned RAND_SEED){ if(p%THREADS_PER_BLOCK != 0){ //round population size to a multiple of THREADS_PER_BLOCK p = ((p/THREADS_PER_BLOCK)+1)*THREADS_PER_BLOCK; } //set to the maximum number of blocks allowed in CUDA compute capability 2.0 if(K > (unsigned)(2<<30)){ K = (unsigned) 2<<30; } this->population_size = p; this->number_populations = K; this->number_chromosomes = p * K; this->chromosome_size = n; this->elite_size = (unsigned)(pe*p); this->mutants_size = (unsigned)(pm*p); this->rhoe = rhoe; this->decode_type = decode_type; this->NUM_THREADS = NUM_THREADS; using std::range_error; if(chromosome_size == 0) { throw range_error("Chromosome size equals zero."); } if(population_size == 0) { throw range_error("Population size equals zero."); } if(elite_size == 0) { throw range_error("Elite-set size equals zero."); } if(elite_size + mutants_size > population_size) { throw range_error("elite + mutant sets greater than population size (p)."); } if(number_populations == 0) { throw range_error("Number of parallel populations cannot be zero."); } long unsigned total_memory=0; // Allocate a float array representing all K populations on host and device h_population = (float *)malloc(number_chromosomes*chromosome_size*sizeof(float)); total_memory += number_chromosomes*chromosome_size*sizeof(float); test_memory_malloc(cudaMalloc((void **)&d_population, number_chromosomes*chromosome_size*sizeof(float)), 1, total_memory); total_memory += number_chromosomes*chromosome_size*sizeof(float); test_memory_malloc(cudaMalloc((void **)&d_population2, number_chromosomes*chromosome_size*sizeof(float)), 2, total_memory); total_memory += number_chromosomes*sizeof(float); // Allocate an array representing the scores of each chromosome on host and device h_scores = (float *)malloc(number_chromosomes*sizeof(float)); test_memory_malloc(cudaMalloc((void **)&d_scores, number_chromosomes*sizeof(float)), 3, total_memory); total_memory += number_chromosomes*sizeof(PopIdxThreadIdxPair); // Allocate an array representing the indices of each chromosome on host and device h_scores_idx = (PopIdxThreadIdxPair *)malloc(number_chromosomes*sizeof(PopIdxThreadIdxPair)); test_memory_malloc(cudaMalloc((void **)&d_scores_idx, number_chromosomes*sizeof(PopIdxThreadIdxPair)), 4, total_memory); total_memory += number_chromosomes*chromosome_size*sizeof(ChromosomeGeneIdxPair); // Allocate an array representing the indices of each gene of each chromosome on host and device h_chromosome_gene_idx = (ChromosomeGeneIdxPair *)malloc(number_chromosomes*chromosome_size*sizeof(ChromosomeGeneIdxPair)); test_memory_malloc(cudaMalloc((void **)&d_chromosome_gene_idx, number_chromosomes*chromosome_size*sizeof(ChromosomeGeneIdxPair)), 5, total_memory); total_memory += number_chromosomes*sizeof(float); test_memory_malloc(cudaMalloc((void **)&d_random_elite_parent, number_chromosomes*sizeof(float)), 6, total_memory); total_memory += number_chromosomes*sizeof(float); test_memory_malloc(cudaMalloc((void **)&d_random_parent, number_chromosomes*sizeof(float)), 7, total_memory); // Allocate a poll to save the POOL_SIZE best solutions, where the first value in each chromosome is the chromosome score h_best_solutions = (float *)malloc(POOL_SIZE*(chromosome_size+1)*sizeof(float)); test_memory_malloc(cudaMalloc((void **)&d_best_solutions, POOL_SIZE*(chromosome_size+1)*sizeof(float)), 8, total_memory); printf("Total Memory Used In GPU %lu bytes(%lu Mbytes)\n", total_memory, total_memory/1000000); this->dimBlock.x = THREADS_PER_BLOCK; this->dimGrid.x = (population_size*number_populations)/THREADS_PER_BLOCK; // Create pseudo-random number generator curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // Set seed curandSetPseudoRandomGeneratorSeed(gen, RAND_SEED); //Initialize population with random alleles with generated random floats on device reset_population(); } BRKGA::~BRKGA(){ // Cleanup curandDestroyGenerator(gen); cudaFree(d_population); cudaFree(d_population2); free(h_population); cudaFree(d_scores); free(h_scores); cudaFree(d_scores_idx); free(h_scores_idx); cudaFree(d_chromosome_gene_idx); free(h_chromosome_gene_idx); cudaFree(d_random_elite_parent); cudaFree(d_random_parent); cudaFree(d_best_solutions); free(h_best_solutions); if(d_instance_info != NULL){ cudaFree(d_instance_info); d_instance_info = NULL; } } void BRKGA::test_memory_malloc(cudaError_t err, unsigned code, unsigned total_memory){ if(err != cudaSuccess){ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err)); fprintf(stderr, "In cudaMalloc: %u with total memory %u\n", code, total_memory); exit(1); } } /*** Allocate information used to evaluate chromosomes on the device. It also receives the number of elements (num) in the array info and the size (size) of each element. Notice we assume the type of the info elements to be float. ***/ void BRKGA::setInstanceInfo(void *info, long unsigned num, long unsigned size){ if(info != NULL){ long unsigned total_memory = num*size; printf("Extra Memory Used In GPU due to Instance Info %lu bytes(%lu Mbytes)\n", total_memory, total_memory/1000000); if(decode_type == DEVICE_DECODE || decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ test_memory_malloc(cudaMalloc((void **)&d_instance_info, num*size),8,total_memory); cudaMemcpy(d_instance_info, info, num*size, cudaMemcpyHostToDevice); } h_instance_info = info; } } /*** Generate random alleles for all chromosomes on GPGPU. ***/ void BRKGA::reset_population(void){ curandGenerateUniform(gen, d_population, number_chromosomes*chromosome_size); } /*** If HOST_DECODE is used then this function decodes each cromosome with the host_decode function provided in Decoder.cpp ***/ void BRKGA::evaluate_chromosomes_host(){ cudaMemcpy(h_population, d_population, number_chromosomes*chromosome_size*sizeof(float),cudaMemcpyDeviceToHost); #pragma omp parallel for default(none) shared(dimGrid,dimBlock,h_population,h_scores) collapse(2) num_threads(NUM_THREADS) for(int b=0; b < dimGrid.x ; b++){ for(int t=0; t < dimBlock.x; t++){ unsigned tx = b*dimBlock.x + t; //Found the thread index since each thread is associated with //a cromosome. float *chromosome = h_population + (tx*chromosome_size); h_scores[tx] = host_decode(chromosome, chromosome_size, h_instance_info); } } cudaMemcpy(d_scores, h_scores, number_chromosomes*sizeof(float),cudaMemcpyHostToDevice); } /*** If DEVICE_DECODE is used then this kernel function decodes each cromosome with the device_decode function provided in Decoder.cpp. We use one thread per cromosome to process them. ***/ __global__ void decode(float *d_scores, float *d_population, int chromosome_size, void * d_instance_info){ unsigned global_tx = blockIdx.x*blockDim.x + threadIdx.x; d_scores[global_tx] = device_decode(d_population + global_tx*chromosome_size, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE is used then this function decodes each cromosome with the kernel function decode above. ***/ void BRKGA::evaluate_chromosomes_device(){ //Make a copy of chromossomes to d_population2 such that they can be messed up inside //the decoder functions without afecting the real chromosomes on d_population. cudaMemcpy(d_population2, d_population, number_chromosomes*chromosome_size*sizeof(float),cudaMemcpyDeviceToDevice); decode<<<dimGrid, dimBlock>>>(d_scores, d_population2, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used then this kernel function decodes each cromosome with the device_decode_chromosomeSorted function provided in Decoder.cpp. We use one thread per cromosome to process them. Notice that we use the struct ChromosomeGeneIdxPair since the cromosome is given already sorted to the function, and so it has a field with the original index of each gene in the cromosome. ***/ __global__ void decode_chromosomes_sorted(float *d_scores, ChromosomeGeneIdxPair *d_chromosome_gene_idx, int chromosome_size, void *d_instance_info){ unsigned global_tx = blockIdx.x*blockDim.x + threadIdx.x; d_scores[global_tx] = device_decode_chromosome_sorted(d_chromosome_gene_idx + global_tx*chromosome_size, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used then this function decodes each cromosome with the kernel function decode_chromosomes_sorted above. But first we sort each chromosome by its genes values. We save this information in the struct ChromosomeGeneIdxPair d_chromosome_gene_idx. ***/ void BRKGA::evaluate_chromosomes_sorted_device(){ sort_chromosomes_genes(); decode_chromosomes_sorted<<<dimGrid, dimBlock>>>(d_scores, d_chromosome_gene_idx, chromosome_size, d_instance_info); } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used. Kernel function used to save for each gene of each chromosome, the chromosome index, and the original gene index. Used later to sort all chromossomes by gene values. We save gene indexes to preserve this information after sorting. ***/ __global__ void device_set_chromosome_gene_idx(ChromosomeGeneIdxPair *d_chromosome_gene_idx, int chromosome_size){ int tx = blockIdx.x*blockDim.x + threadIdx.x; for(int i=0; i<chromosome_size; i++){ d_chromosome_gene_idx[tx*chromosome_size + i].chromosomeIdx = tx; d_chromosome_gene_idx[tx*chromosome_size + i].geneIdx = i; } } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used. Used as comparator to sort genes of the chromosomes. After sorting by gene we need to reagroup genes by their chromosomes which are indexed by threadIdx. ***/ __device__ bool operator<(const ChromosomeGeneIdxPair& lhs, const ChromosomeGeneIdxPair& rhs){ return lhs.chromosomeIdx < rhs.chromosomeIdx; } /*** If DEVICE_DECODE_CHROMOSOME_SORTED is used. We sort the genes of each chromosome. We perform 2 stable_sort sorts: first we sort all genes of all chromosomes by their values, and than we sort by the chromosome index, and since stable_sort is used, for each chromosome we will have its genes sorted by their values. ***/ void BRKGA::sort_chromosomes_genes(){ //First set for each gene, its chromosome index and its original index in the chromosome device_set_chromosome_gene_idx<<<dimGrid, dimBlock>>>(d_chromosome_gene_idx, chromosome_size); //we use d_population2 to sorte all genes by their values cudaMemcpy(d_population2, d_population, number_chromosomes*chromosome_size*sizeof(float), cudaMemcpyDeviceToDevice); thrust::device_ptr<float> keys(d_population2); thrust::device_ptr<ChromosomeGeneIdxPair> vals(d_chromosome_gene_idx); //stable sort both d_population2 and d_chromosome_gene_idx by all the genes values thrust::stable_sort_by_key(keys, keys + number_chromosomes*chromosome_size, vals); //stable sort both d_population2 and d_chromosome_gene_idx by the chromosome index values thrust::stable_sort_by_key(vals, vals + number_chromosomes*chromosome_size, keys); } /*** Kernel function, where each thread process one chromosome. It receives the current population *d_population, the next population pointer *d_population2, two random vectors for indices of parents, d_random_elite_parent and d_random_parent, ***/ __global__ void device_next_population(float *d_population, float *d_population2, float *d_random_elite_parent, float *d_random_parent, int chromosome_size, unsigned population_size, unsigned elite_size, unsigned mutants_size, float rhoe, PopIdxThreadIdxPair *d_scores_idx){ unsigned tx = blockIdx.x*blockDim.x + threadIdx.x; //global thread index unsigned chromosome_idx = tx*chromosome_size; unsigned pop_idx = (unsigned)tx/population_size; //the population index of this thread unsigned inside_pop_idx = tx%population_size; //below are the inside population random indexes of a elite parent and regular parent for crossover unsigned parent_elite_idx = (unsigned)(ceilf(d_random_elite_parent[tx]*elite_size)-1); unsigned parent_idx = (unsigned)(elite_size+ceilf(d_random_parent[tx]*(population_size-elite_size))-1); //if inside_pop_idx < elite_size then thread is elite, so we copy elite chromosome to the next population if(inside_pop_idx < elite_size){ unsigned elite_chromosome_idx = d_scores_idx[tx].thIdx*chromosome_size; for(int i=0; i<chromosome_size; i++) d_population2[chromosome_idx + i] = d_population[elite_chromosome_idx + i]; }else if(inside_pop_idx < population_size - mutants_size){ //if inside_pop_idex >= elite_size and inside < population_size - mutants_size //thread is responsible to crossover unsigned elite_chromosome_idx = d_scores_idx[pop_idx*population_size + parent_elite_idx].thIdx*chromosome_size; unsigned parent_chromosome_idx = d_scores_idx[pop_idx*population_size + parent_idx].thIdx*chromosome_size; for(int i=0; i<chromosome_size; i++){ if(d_population2[chromosome_idx + i] <= rhoe) //copy allele from elite parent d_population2[chromosome_idx + i] = d_population[elite_chromosome_idx + i]; else //copy allele from regular parent d_population2[chromosome_idx + i] = d_population[parent_chromosome_idx + i]; } }//in the else case the thread corresponds to a mutant and nothing is done. } /*** Main function of the BRKGA algorithm. It evolves K populations for a certain number of generations. ***/ void BRKGA::evolve(int number_generations){ using std::domain_error; if(decode_type == DEVICE_DECODE){ evaluate_chromosomes_device(); }else if(decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ evaluate_chromosomes_sorted_device(); }else if(decode_type == HOST_DECODE){ evaluate_chromosomes_host(); }else{ throw domain_error("Function decode type is unknown"); } //After this call the vector d_scores_idx has all threads sorted by population, and //inside each population, threads are sorted by score sort_chromosomes(); //This call initialize the whole area of the next population d_population2 with random values. //So mutantes are already build. For the non mutants we use the //random values generated here to perform the crossover on the current population d_population. initialize_population(2); //generate random numbers to index parents used for crossover curandGenerateUniform(gen, d_random_elite_parent, number_chromosomes); curandGenerateUniform(gen, d_random_parent, number_chromosomes); //Kernel function, where each thread process one chromosome of the next population. device_next_population<<<dimGrid, dimBlock>>>(d_population, d_population2, d_random_elite_parent, d_random_parent, chromosome_size, population_size, elite_size, mutants_size, rhoe, d_scores_idx); float *aux = d_population2; d_population2 = d_population; d_population = aux; } void BRKGA::initialize_population(int p){ if(p==1) curandGenerateUniform(gen, d_population, number_chromosomes*chromosome_size); if(p==2) curandGenerateUniform(gen, d_population2, number_chromosomes*chromosome_size); } /*** Kernel function that sets for each cromosome its global index (among all populations) and its population index. ***/ __global__ void device_set_idx(PopIdxThreadIdxPair *d_scores_idx, int population_size){ int tx = blockIdx.x*blockDim.x + threadIdx.x; d_scores_idx[tx].popIdx = tx/population_size; d_scores_idx[tx].thIdx = tx; } /*** Function used to sort chromosomes by population index ***/ __device__ bool operator<(const PopIdxThreadIdxPair& lhs, const PopIdxThreadIdxPair& rhs){ return lhs.popIdx < rhs.popIdx; } /*** We sort chromosomes for each population. We use the thread index to index each population, and perform 2 stable_sort sorts: first we sort by the chromosome scores, and than by their population index, and since stable_sort is used in each population the chromosomes are sorted by scores. ***/ void BRKGA::sort_chromosomes(){ //For each thread we store in d_scores_idx the global chromosome index and its population index. device_set_idx<<<dimGrid, dimBlock>>>(d_scores_idx, population_size); thrust::device_ptr<float> keys(d_scores); thrust::device_ptr<PopIdxThreadIdxPair> vals(d_scores_idx); //now sort all chromosomes by their scores (vals) thrust::stable_sort_by_key(keys, keys + number_chromosomes, vals); //now sort all chromossomes by their population index //in the sorting process it is used operator< above to compare two structs of this type thrust::stable_sort_by_key(vals, vals + number_chromosomes, keys); } /*** Kernel function to operate the exchange of elite chromosomes. It was launched M*number_populations threads. For each population each one of M threads do the copy of an elite chromosome of its own population into the other populations. To do: make kernel save in local memory the chromosome and then copy to each other population ***/ __global__ void device_exchange_elite(float *d_population, int chromosome_size, unsigned population_size, unsigned number_populations, PopIdxThreadIdxPair *d_scores_idx, unsigned M){ unsigned tx = threadIdx.x; //this thread value between 0 and M-1 unsigned pop_idx = blockIdx.x; //this thread population index, a value between 0 and number_populations-1 unsigned elite_idx = pop_idx*population_size + tx; unsigned elite_chromosome_idx = d_scores_idx[elite_idx].thIdx; unsigned inside_destiny_idx = population_size-1-(M*pop_idx)-tx;//index of the destiny of this thread inside each population for(int i=0; i<number_populations; i++){ if(i != pop_idx){ unsigned destiny_chromosome_idx = d_scores_idx[i*population_size + inside_destiny_idx].thIdx; for(int j=0; j<chromosome_size;j++) d_population[destiny_chromosome_idx*chromosome_size + j] = d_population[elite_chromosome_idx*chromosome_size + j]; } } } /*** Exchange M individuals among the different populations. ***/ void BRKGA::exchangeElite(unsigned M){ using std::range_error; if(M > elite_size) { throw range_error("Exchange elite size M greater than elite size."); } if(M*number_populations > population_size) { throw range_error("Total exchange elite size greater than population size."); } using std::domain_error; if(decode_type == DEVICE_DECODE){ evaluate_chromosomes_device(); }else if(decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ evaluate_chromosomes_sorted_device(); }else if(decode_type == HOST_DECODE){ evaluate_chromosomes_host(); } else{ throw domain_error("Function decode type is unknown"); } sort_chromosomes(); device_exchange_elite<<<number_populations, M>>>(d_population, chromosome_size, population_size, number_populations, d_scores_idx, M); } /*** Return a vector of vectors, where each line vector corresponds to a chromosome, where in position 0 we have its score and in positions 1 to chromosome_size the aleles values ***/ std::vector<std::vector <float>> BRKGA::getkBestChromosomes(unsigned k){ std::vector<std::vector <float>> ret(k, std::vector<float>(chromosome_size+1)); global_sort_chromosomes(); cudaMemcpy(h_scores_idx, d_scores_idx, number_chromosomes*sizeof(PopIdxThreadIdxPair),cudaMemcpyDeviceToHost); cudaMemcpy(h_scores, d_scores, number_chromosomes*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(h_population, d_population, number_chromosomes*chromosome_size*sizeof(float),cudaMemcpyDeviceToHost); for(int i=0; i<k; i++){ unsigned tx = h_scores_idx[i].thIdx; float *begin = &h_population[tx*chromosome_size]; ret[i][0] = h_scores[i]; for(int u=1; u <= chromosome_size; u++){ ret[i][u] = begin[u-1]; } } return ret; } /*** Return a vector of vectors, where each line vector corresponds to a chromosome, where in position 0 we have its score and in positions 1 to chromosome_size the aleles values ***/ std::vector<std::vector <float>> BRKGA::getkBestChromosomes2(unsigned k){ if(k>POOL_SIZE) k=POOL_SIZE; std::vector<std::vector <float>> ret(k, std::vector<float>(chromosome_size+1)); saveBestChromosomes(); cudaMemcpy(h_best_solutions, d_best_solutions, POOL_SIZE*(chromosome_size+1)*sizeof(float),cudaMemcpyDeviceToHost); for(int i=0; i<k; i++){ for(int j=0; j <= chromosome_size; j++){ ret[i][j] = h_best_solutions[i*(chromosome_size+1) + j]; } } return ret; } __global__ void device_save_best_chromosomes(float *d_population, unsigned chromosome_size, PopIdxThreadIdxPair *d_scores_idx, float *d_best_solutions, float *d_scores, unsigned best_saved){ if(!best_saved){//this is the first time saving best solutions in to the pool for(int i=0; i<POOL_SIZE; i++){ unsigned tx = d_scores_idx[i].thIdx; float *begin = (float *)&d_population[tx*chromosome_size]; d_best_solutions[i*(chromosome_size+1)] = d_scores[i]; //save the value of the chromosome for(int j=1; j <= chromosome_size; j++){ //save the chromosome d_best_solutions[i*(chromosome_size+1)+j] = begin[j-1]; } } }else{//Since best solutions were already saved //only save now if the i-th best current solution is better than the i-th best overall for(int i=0; i<POOL_SIZE; i++){ unsigned tx = d_scores_idx[i].thIdx; float *begin = (float *)&d_population[tx*chromosome_size]; if(d_scores[i] < d_best_solutions[i*(chromosome_size+1)]){ d_best_solutions[i*(chromosome_size+1)] = d_scores[i]; for(int j=1; j <= chromosome_size; j++){ d_best_solutions[i*(chromosome_size+1)+j] = begin[j-1]; } } } } } /*** This Function saves in the pool d_best_solutions and h_best_solutions the best solutions generated so far among all populations. ***/ void BRKGA::saveBestChromosomes(){ global_sort_chromosomes(); device_save_best_chromosomes<<<1, 1>>>(d_population, chromosome_size, d_scores_idx, d_best_solutions, d_scores, best_saved); best_saved = 1; } /*** We sort all chromosomes of all populations toguether. We use the global thread index to index each chromosome, since each thread is responsible for one thread. Notice that in this function we only perform one sort, since we want the best chromosomes overall, so we do not perform a second sort to separate chromosomes by their population. ***/ void BRKGA::global_sort_chromosomes(){ using std::domain_error; if(decode_type == DEVICE_DECODE){ evaluate_chromosomes_device(); }else if(decode_type == DEVICE_DECODE_CHROMOSOME_SORTED){ evaluate_chromosomes_sorted_device(); }else if(decode_type == HOST_DECODE){ evaluate_chromosomes_host(); } else{ throw domain_error("Function decode type is unknown"); } device_set_idx<<<dimGrid, dimBlock>>>(d_scores_idx, population_size); thrust::device_ptr<float> keys(d_scores); thrust::device_ptr<PopIdxThreadIdxPair> vals(d_scores_idx); thrust::sort_by_key(keys, keys + number_chromosomes, vals); }
6509f4bdc457d7ddc09d52fa7557b74d4070197c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #include "cuda_helpers.h" template <typename T> __global__ void RoIPoolForward( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* rois, T* output, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_input = input + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; if (offset_input[input_index] > maxval) { maxval = offset_input[input_index]; maxidx = input_index; } } } output[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPoolBackward( const int nthreads, const T* grad_output, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* grad_input, const T* rois, const int n_stride, const int c_stride, const int h_stride, const int w_stride) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; T* grad_input_offset = grad_input + ((roi_batch_ind * channels + c) * height * width); int output_offset = n * n_stride + c * c_stride; const int* argmax_data_offset = argmax_data + (n * channels + c) * pooled_height * pooled_width; int argmax = argmax_data_offset[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( grad_input_offset + argmax, static_cast<T>( grad_output[output_offset + ph * h_stride + pw * w_stride])); } } } std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ROIPool_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); at::Tensor output = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options()); at::Tensor argmax = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] { hipLaunchKernelGGL(( RoIPoolForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(output, argmax); } at::Tensor ROIPool_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(argmax.device().is_cuda(), "argmax must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, argmax_t{argmax, "argmax", 3}; at::CheckedFrom c = "ROIPool_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device()); auto num_rois = rois.size(0); at::Tensor grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_input; } int n_stride = grad.stride(0); int c_stride = grad.stride(1); int h_stride = grad.stride(2); int w_stride = grad.stride(3); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] { hipLaunchKernelGGL(( RoIPoolBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.data<scalar_t>(), argmax.contiguous().data<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>(), n_stride, c_stride, h_stride, w_stride); }); AT_CUDA_CHECK(hipGetLastError()); return grad_input; }
6509f4bdc457d7ddc09d52fa7557b74d4070197c.cu
#include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include "cuda_helpers.h" template <typename T> __global__ void RoIPoolForward( const int nthreads, const T* input, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* rois, T* output, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_input = input + (roi_batch_ind * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int input_index = h * width + w; if (offset_input[input_index] > maxval) { maxval = offset_input[input_index]; maxidx = input_index; } } } output[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPoolBackward( const int nthreads, const T* grad_output, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, T* grad_input, const T* rois, const int n_stride, const int c_stride, const int h_stride, const int w_stride) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; T* grad_input_offset = grad_input + ((roi_batch_ind * channels + c) * height * width); int output_offset = n * n_stride + c * c_stride; const int* argmax_data_offset = argmax_data + (n * channels + c) * pooled_height * pooled_width; int argmax = argmax_data_offset[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( grad_input_offset + argmax, static_cast<T>( grad_output[output_offset + ph * h_stride + pw * w_stride])); } } } std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda( const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; at::CheckedFrom c = "ROIPool_forward_cuda"; at::checkAllSameGPU(c, {input_t, rois_t}); at::checkAllSameType(c, {input_t, rois_t}); at::cuda::CUDAGuard device_guard(input.device()); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); at::Tensor output = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options()); at::Tensor argmax = at::zeros( {num_rois, channels, pooled_height, pooled_width}, input.options().dtype(at::kInt)); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIPool_forward", [&] { RoIPoolForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(output, argmax); } at::Tensor ROIPool_backward_cuda( const at::Tensor& grad, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { // Check if input tensors are CUDA tensors AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); AT_ASSERTM(argmax.device().is_cuda(), "argmax must be a CUDA tensor"); at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, argmax_t{argmax, "argmax", 3}; at::CheckedFrom c = "ROIPool_backward_cuda"; at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t}); at::checkAllSameType(c, {grad_t, rois_t}); at::cuda::CUDAGuard device_guard(grad.device()); auto num_rois = rois.size(0); at::Tensor grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_input; } int n_stride = grad.stride(0); int c_stride = grad.stride(1); int h_stride = grad.stride(2); int w_stride = grad.stride(3); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIPool_backward", [&] { RoIPoolBackward<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.data<scalar_t>(), argmax.contiguous().data<int>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>(), n_stride, c_stride, h_stride, w_stride); }); AT_CUDA_CHECK(cudaGetLastError()); return grad_input; }
6430ca323b7826c5b85401dffdfffc7f4fef1bec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_runtime.h" __global__ void div_const_kernel(const float *input, float *output, float value, size_t size) { size_t ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= size) return; output[ind] = value / input[ind]; } int DLGpuMatrixDivConst(float val, const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ /* TODO: Your code here */ size_t size = 1; for (index_t i = 0; i < input->ndim; i++) { size *= input->shape[i]; } dim3 blocks; dim3 threads; float *output_data = (float *)output->data; const float *input_data = (const float *)input->data; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) hipLaunchKernelGGL(( div_const_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, input_data, output_data, val, size); else hipLaunchKernelGGL(( div_const_kernel), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, val, size); if(p != NULL){ int size_input = 1, size_output = 1; for(int i = 0; i < input -> ndim; i++) size_input *= input -> shape[i]; for(int i = 0; i < output -> ndim; i++) size_output *= output -> shape[i]; p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024; p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; p -> workspace_memory = 0; } return 0; }
6430ca323b7826c5b85401dffdfffc7f4fef1bec.cu
#include "gpu_runtime.h" __global__ void div_const_kernel(const float *input, float *output, float value, size_t size) { size_t ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= size) return; output[ind] = value / input[ind]; } int DLGpuMatrixDivConst(float val, const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ /* TODO: Your code here */ size_t size = 1; for (index_t i = 0; i < input->ndim; i++) { size *= input->shape[i]; } dim3 blocks; dim3 threads; float *output_data = (float *)output->data; const float *input_data = (const float *)input->data; if (size <= 1024) { threads.x = size; blocks.x = 1; } else { threads.x = 1024; blocks.x = (size + 1023) / 1024; } if (stream_handle) div_const_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(input_data, output_data, val, size); else div_const_kernel<<<blocks, threads>>>(input_data, output_data, val, size); if(p != NULL){ int size_input = 1, size_output = 1; for(int i = 0; i < input -> ndim; i++) size_input *= input -> shape[i]; for(int i = 0; i < output -> ndim; i++) size_output *= output -> shape[i]; p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024; p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; p -> workspace_memory = 0; } return 0; }
55bcc25ec788cdf36d994ee4994dc10047a97b18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "constants.h" __global__ void scaleVectorKernel(double *d_outVec, double scale, double *d_inpVec, int varNum) { int varIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( varIdx < varNum ) d_outVec[varIdx] = scale * d_inpVec[varIdx]; return; } void scaleVector(double *d_outVec, double scale, double *d_inpVec, int varNum) { int blkNum = (varNum - 1) / BLKDIM + 1; hipLaunchKernelGGL(( scaleVectorKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_outVec, scale, d_inpVec, varNum); return; }
55bcc25ec788cdf36d994ee4994dc10047a97b18.cu
#include "constants.h" __global__ void scaleVectorKernel(double *d_outVec, double scale, double *d_inpVec, int varNum) { int varIdx = blockIdx.x * blockDim.x + threadIdx.x; if ( varIdx < varNum ) d_outVec[varIdx] = scale * d_inpVec[varIdx]; return; } void scaleVector(double *d_outVec, double scale, double *d_inpVec, int varNum) { int blkNum = (varNum - 1) / BLKDIM + 1; scaleVectorKernel <<<blkNum, BLKDIM>>> (d_outVec, scale, d_inpVec, varNum); return; }
079930f28f490e6b86740d5b73a297b2d3c05149.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ConvDepthWiseExecution.hpp" #include "core/ConvolutionCommon.hpp" #include "Raster.cuh" #include <float.h> #include "MNNCUDADefine.hpp" #include "MNNCUDAFunction.cuh" namespace MNN { namespace CUDA { template<typename T> __global__ void CONV_DW(const T* input, const half* kernel, const half* bias, T *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/2; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox); d_oh.divmod(tmp2, ob, oy); int oz = oz_2 << 1; int ix = ox * sw - pw; int iy = oy * sh - ph; float color0 = bias[oz]; float color1 = bias[oz+1]; int fxSta = max(0, (UP_DIV(-ix, dw))); int fySta = max(0, (UP_DIV(-iy, dh))); int fxEnd = min(kw, UP_DIV(iw - ix, dw)); int fyEnd = min(kh, UP_DIV(ih - iy, dh)); int fx, fy, fz; for (fy=fySta; fy<fyEnd; ++fy) { int sy = fy*dh + iy; for (fx=fxSta; fx<fxEnd; ++fx) { int sx = fx*dw + ix; int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz; float inp0 = input[src_offset]; float inp1 = input[src_offset+1]; float ker0 = kernel[(fy * kw + fx) * c_p + oz]; float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1]; color0 = color0 + inp0 * ker0; color1 = color1 + inp1 * ker1; } } color0 = max(color0, minV); color0 = min(color0, maxV); color1 = max(color1, minV); color1 = min(color1, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color0; output[dst_offset+1] = color1; } } __global__ void CONV_DW_HALF2_OPT(const half2* input, const half2* kernel, const half2* bias, half2 *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/2; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox); d_oh.divmod(tmp2, ob, oy); int oz = oz_2; int ix = ox * sw - pw; int iy = oy * sh - ph; half2 color = bias[oz]; int fxSta = max(0, -ix); int fySta = max(0, -iy); int fxEnd = min(kw, iw - ix); int fyEnd = min(kh, ih - iy); int fx, fy, fz; for (fy=fySta; fy<fyEnd; ++fy) { int sy = fy + iy; for (fx=fxSta; fx<fxEnd; ++fx) { int sx = fx + ix; int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz; half2 inp = input[src_offset]; half2 ker = kernel[(fy * kw + fx) * c_p + oz]; color = __hfma2(inp, ker, color); } } color.x = max(color.x, minV); color.x = min(color.x, maxV); color.y = max(color.y, minV); color.y = min(color.y, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color; } } __global__ void CONV_DW3x3_HALF2_OPT(const half2* input, const half2* kernel, const half2* bias, half2 *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/4; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox_2, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox_2); d_oh.divmod(tmp2, ob, oy); int oz = oz_2; int ox = ox_2 << 1; int ix = ox - 1; int iy = oy - 1; half2 color0 = bias[oz]; half2 color1 = color0; half2 zero; zero.x = (half)0.0; zero.y = (half)0.0; half2 inp[12]; half2 ker[3][3]; for(int j=0; j<3; j++) { if(iy < 0 && j==0) { for(int i=0; i<4; i++) { inp[i] = zero; } continue; } if(iy+2 > ih-1 && j==2) { for(int i=0; i<4; i++) { inp[8+i] = zero; } continue; } for(int i=0; i<4; i++) { if(ix < 0 && i==0) { for(int j=0; j<3; j++) { inp[4*j+0] = zero; } continue; } if(ix+3 > iw-1 && i==3) { for(int j=0; j<3; j++) { inp[4*j+3] = zero; } continue; } int src_offset = ((ob * ih + iy+j) * iw + ix+i) * c_p + oz; inp[4*j+i] = input[src_offset]; } } for(int j=0; j<3; j++) { for(int i=0; i<3; i++) { ker[j][i] = kernel[(j * 3 + i) * c_p + oz]; } } for(int j=0; j<3; j++) { for(int i=0; i<3; i++) { color0 = __hfma2(inp[4*j+i], ker[j][i], color0); color1 = __hfma2(inp[4*j+i+1], ker[j][i], color1); } } color0.x = max(color0.x, minV); color0.x = min(color0.x, maxV); color0.y = max(color0.y, minV); color0.y = min(color0.y, maxV); color1.x = max(color1.x, minV); color1.x = min(color1.x, maxV); color1.y = max(color1.y, minV); color1.y = min(color1.y, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color0; output[dst_offset+c_p] = color1; } } __global__ void CONV_DW_OPT(const float* input, const half* kernel, const half* bias, float *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total / 2; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox); d_oh.divmod(tmp2, ob, oy); int oz = oz_2 << 1; int ix = ox * sw - pw; int iy = oy * sh - ph; float color0 = bias[oz]; float color1 = bias[oz+1]; int fxSta = max(0, -ix); int fySta = max(0, -iy); int fxEnd = min(kw, iw - ix); int fyEnd = min(kh, ih - iy); int fx, fy, fz; for (fy=fySta; fy<fyEnd; ++fy) { int sy = fy + iy; for (fx=fxSta; fx<fxEnd; ++fx) { int sx = fx + ix; int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz; float inp0 = input[src_offset]; float inp1 = input[src_offset+1]; float ker0 = kernel[(fy * kw + fx) * c_p + oz]; float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1]; color0 = color0 + inp0 * ker0; color1 = color1 + inp1 * ker1; } } color0 = max(color0, minV); color0 = min(color0, maxV); color1 = max(color1, minV); color1 = min(color1, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color0; output[dst_offset+1] = color1; } } template<typename T> __global__ void CONV_DW_MULTI_WIDTH4(const T* input, const half* kernel, const half* bias, T *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int total, DivModFast d_oc, DivModFast d_ow_4, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total / 4; index += blockDim.x * gridDim.x) { int oz, tmp2, oy, ox_4, tmp1, ob; d_oc.divmod(index, tmp1, oz); d_ow_4.divmod(tmp1, tmp2, ox_4); d_oh.divmod(tmp2, ob, oy); float color0 = bias[oz]; float color1 = color0; float color2 = color0; float color3 = color0; // Parallel pipelining read and calculate float src; float filter0, filter1, filter2, filter3; int src_offset = ((ob * ih + oy) * iw + (ox_4 << 2)) * c_p + oz; int filter_offset = 0 * c_p + oz; src = input[src_offset + 0 * c_p]; filter0 = kernel[filter_offset + 0 * c_p]; color0 += (src * filter0); filter1 = kernel[filter_offset + 1 * c_p]; src = input[src_offset + 1 * c_p]; color0 += (src * filter1); color1 += (src * filter0); filter2 = kernel[filter_offset + 2 * c_p]; src = input[src_offset + 2 * c_p]; color0 += (src * filter2); color1 += (src * filter1); color2 += (src * filter0); filter3 = kernel[filter_offset + 3 * c_p]; for (int fx=3; fx<kw; ++fx) { src = input[src_offset + fx * c_p]; color0 += (src * filter3); color1 += (src * filter2); color2 += (src * filter1); color3 += (src * filter0); filter0 = filter1; filter1 = filter2; filter2 = filter3; filter3 = kernel[filter_offset + (fx+1) * c_p]; } src = input[src_offset + kw * c_p]; color1 += (src * filter2); color2 += (src * filter1); color3 += (src * filter0); src = input[src_offset + (kw+1) * c_p]; color2 += (src * filter2); color3 += (src * filter1); src = input[src_offset + (kw+2) * c_p]; color3 += (src * filter2); color0 = max(color0, minV); color0 = min(color0, maxV); color1 = max(color1, minV); color1 = min(color1, maxV); color2 = max(color2, minV); color2 = min(color2, maxV); color3 = max(color3, minV); color3 = min(color3, maxV); int dst_offset = ((ob * oh + oy) * ow + (ox_4 << 2)) * c_p + oz; output[dst_offset] = color0; output[dst_offset+c_p] = color1; output[dst_offset+2*c_p] = color2; output[dst_offset+3*c_p] = color3; } } __global__ void CONV_DW_MULTI_WIDTH_CHANNEL(const float* input, const half* kernel, const half* bias, float *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int total, DivModFast d_oc_2, DivModFast d_ow_2, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total / 4; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox_2, tmp1, ob; d_oc_2.divmod(index, tmp1, oz_2); d_ow_2.divmod(tmp1, tmp2, ox_2); d_oh.divmod(tmp2, ob, oy); float2 color0 = __half22float2((( half2 *)(bias + (oz_2 << 1)))[0]); float2 color1 = color0; // Parallel pipelining read and calculate float src0, src2, filter0, filter2; int src_offset = ((ob * ih + oy) * iw + (ox_2 << 1)) * c_p + (oz_2 << 1); int filter_offset = 0 * c_p + (oz_2 << 1); float2 src = ((float2 *)(input + src_offset + 0 * c_p))[0]; float2 filter = __half22float2(((half2 *)(kernel + filter_offset + 0 * c_p))[0]); color0.x += (src.x * filter.x); color0.y += (src.y * filter.y); for (int fx=1; fx<kw; ++fx) { src = ((float2 *)(input + src_offset + fx * c_p))[0]; color1.x += (src.x * filter.x); color1.y += (src.y * filter.y); filter = __half22float2(((half2 *)(void *)(kernel + filter_offset + fx * c_p))[0]); color0.x += (src.x * filter.x); color0.y += (src.y * filter.y); } src = ((float2 *)(input + src_offset + kw * c_p))[0]; color1.x += (src.x * filter.x); color1.y += (src.y * filter.y); color0.x = max(color0.x, minV); color0.x = min(color0.x, maxV); color1.x = max(color1.x, minV); color1.x = min(color1.x, maxV); color0.y = max(color0.y, minV); color0.y = min(color0.y, maxV); color1.y = max(color1.y, minV); color1.y = min(color1.y, maxV); int dst_offset = ((ob * oh + oy) * ow + (ox_2 << 1)) * c_p + (oz_2 << 1); ((float2 *)(output + dst_offset))[0] = color0; ((float2 *)(output + dst_offset + c_p))[0] = color1; } } static std::shared_ptr<ConvDepthWiseExecution::Resource> _makeResource(const Op* op, Backend* bn) { std::shared_ptr<ConvDepthWiseExecution::Resource> res(new ConvDepthWiseExecution::Resource); auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool(); auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime(); auto conv = op->main_as_Convolution2D(); auto convCommon = conv->common(); int kernelX = convCommon->kernelX(); int kernelY = convCommon->kernelY(); int depth = convCommon->outputCount(); int depthC = UP_DIV(depth, PACK_NUMBER); res->weightTensor.reset(Tensor::createDevice<float>({kernelX * kernelY * depthC * PACK_NUMBER})); bool success = bn->onAcquireBuffer(res->weightTensor.get(), Backend::STATIC); if (!success) { return nullptr; } res->mFilter = (void *)res->weightTensor.get()->buffer().device; FuseRegion reg; int offset[8 * PACK_NUMBER]; auto regionStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(FuseRegion)); auto offsetGpuStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(offset)); auto offsetGpu = (uint8_t*)offsetGpuStorage.first + offsetGpuStorage.second; //weight host->device const float* filterDataPtr = nullptr; int weightSize = 0; std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon; ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize); auto tempWeightStorage = pool->alloc(depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float)); auto tempWeight = (uint8_t*)tempWeightStorage.first + tempWeightStorage.second; cuda_check(hipMemset(tempWeight, 0, depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float))); cuda_check(hipMemcpy(tempWeight, filterDataPtr, weightSize*sizeof(float), hipMemcpyHostToDevice)); reg.size[0] = 1; reg.size[1] = kernelY * kernelX; reg.size[2] = depthC * PACK_NUMBER; reg.srcStride[0] = 0; reg.srcStride[1] = 1; reg.srcStride[2] = kernelY * kernelX; reg.dstStride[0] = 0; reg.dstStride[1] = depthC * PACK_NUMBER; reg.dstStride[2] = 1; offset[0] = 1; offset[1] = kernelY * kernelX; offset[2] = depth; offset[3] = 0; offset[4] = 1; offset[5] = reg.size[1]; offset[6] = reg.size[2]; offset[7] = 0; reg.fuseNumber = 1; runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, &reg, sizeof(FuseRegion), MNNMemcpyHostToDevice, true); runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true); FuseRasterBlitFloatToHalf((uint8_t*)res->mFilter, (uint8_t*)tempWeight, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime); pool->free(tempWeightStorage); res->biasTensor.reset(Tensor::createDevice<float>({depthC * PACK_NUMBER})); success = bn->onAcquireBuffer(res->biasTensor.get(), Backend::STATIC); res->mBias = (void *)res->biasTensor.get()->buffer().device; if (!success) { return nullptr; } if(conv->bias() != nullptr) { auto tempBiasStorage = pool->alloc(depth * sizeof(float)); auto tempBias = (uint8_t*)tempBiasStorage.first + tempBiasStorage.second; cuda_check(hipMemcpy(tempBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), hipMemcpyHostToDevice)); reg.size[0] = 1; reg.size[1] = 1; reg.size[2] = depthC * PACK_NUMBER; reg.srcStride[0] = 0; reg.srcStride[1] = 0; reg.srcStride[2] = 1; reg.dstStride[0] = 0; reg.dstStride[1] = 0; reg.dstStride[2] = 1; offset[0] = 1; offset[1] = 1; offset[2] = conv->bias()->size(); offset[3] = 0; offset[4] = 1; offset[5] = 1; offset[6] = reg.size[2]; offset[7] = 0; reg.fuseNumber = 1; runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, &reg, sizeof(FuseRegion), MNNMemcpyHostToDevice, true); runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true); FuseRasterBlitFloatToHalf((uint8_t*)res->mBias, (uint8_t*)tempBias, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime); pool->free(tempBiasStorage); } static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(regionStorage); static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(offsetGpuStorage); return res; } ConvDepthWiseExecution::ConvDepthWiseExecution(const Op* op, Backend* bn, std::shared_ptr<Resource> resource) : Execution(bn) { mOp = op; mResource = resource; auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool(); } ConvDepthWiseExecution::~ ConvDepthWiseExecution() { // } ErrorCode ConvDepthWiseExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto pad = ConvolutionCommon::convolutionPad(inputs[0], outputs[0], mOp->main_as_Convolution2D()->common()); auto conv = mOp->main_as_Convolution2D(); auto convCommon = mOp->main_as_Convolution2D()->common(); int channel = inputs[0]->channel(); int channelDiv = UP_DIV(channel, PACK_NUMBER); parameters.pad[0] = pad.first; parameters.pad[1] = pad.second; parameters.kernelSize[0] = convCommon->kernelX(); parameters.kernelSize[1] = convCommon->kernelY(); parameters.stride[0] = convCommon->strideX(); parameters.stride[1] = convCommon->strideY(); parameters.dilate[0] = convCommon->dilateX(); parameters.dilate[1] = convCommon->dilateY(); parameters.inputSize[0] = inputs[0]->width(); parameters.inputSize[1] = inputs[0]->height(); parameters.channel = channelDiv; parameters.outputSize[0] = outputs[0]->width(); parameters.outputSize[1] = outputs[0]->height(); parameters.batch = inputs[0]->batch(); parameters.total = parameters.batch * parameters.outputSize[1] * parameters.outputSize[0] * parameters.channel * PACK_NUMBER; if (static_cast<CUDABackend*>(backend())->useFp16()) { // Do nothing } else { parameters.minValue = -FLT_MAX; parameters.maxValue = FLT_MAX; } if (convCommon->relu()) { parameters.minValue = 0.0f; } if (convCommon->relu6()) { parameters.minValue = 0.0f; parameters.maxValue = 6.0f; } mTotalCount = parameters.total; //MNN_PRINT("%d-%d-%d-%d, %d-%d-%d-%d-%d\n", parameters.kernelSize[0], parameters.kernelSize[1], parameters.stride[0], parameters.stride[1], parameters.inputSize[0], parameters.inputSize[1], channel, parameters.outputSize[0], parameters.outputSize[1]); return NO_ERROR; } ErrorCode ConvDepthWiseExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto& prop = runtime->prop(); int limitThreads = UP_DIV(mTotalCount, prop.multiProcessorCount); int threads_num = ALIMIN(prop.maxThreadsPerBlock/2, limitThreads); int block_num = prop.multiProcessorCount; DivModFast d_oc(parameters.channel * PACK_NUMBER / 2); DivModFast d_ow(parameters.outputSize[0]); DivModFast d_oh(parameters.outputSize[1]); const float maxV = parameters.maxValue; const float minV = parameters.minValue; const int iw = parameters.inputSize[0]; const int ih = parameters.inputSize[1]; const int c = parameters.channel; const int c_p = c * PACK_NUMBER; const int ow = parameters.outputSize[0]; const int oh = parameters.outputSize[1]; const int kw = parameters.kernelSize[0]; const int kh = parameters.kernelSize[1]; const int dw = parameters.dilate[0]; const int dh = parameters.dilate[1]; const int sw = parameters.stride[0]; const int sh = parameters.stride[1]; const int pw = parameters.pad[0]; const int ph = parameters.pad[1]; const int total = parameters.total; if (static_cast<CUDABackend*>(backend())->useFp16()) { if(parameters.kernelSize[0]==3 && parameters.kernelSize[1]==3 && parameters.stride[0]==1 && parameters.stride[1]==1 && parameters.pad[0]==1 && parameters.pad[1]==1 && parameters.outputSize[0] % 2 ==0) { DivModFast d_ow2(parameters.outputSize[0]/2); hipLaunchKernelGGL(( CONV_DW3x3_HALF2_OPT), dim3(block_num), dim3(threads_num), 0, 0, (const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter, (const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p/2, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow2, d_oh); checkKernelErrors; return NO_ERROR; } if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) { if(sw == 1 && sh == 1 && pw == 0 && ph == 0 && kw > 3 && kw < 12 && kh == 1 && pw == 0 && ph == 0 && ow % 4 == 0) { DivModFast d_oc(parameters.channel * PACK_NUMBER); DivModFast d_ow(ow/4); hipLaunchKernelGGL(( CONV_DW_MULTI_WIDTH4), dim3(block_num), dim3(threads_num), 0, 0, (const half*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (half*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, total, d_oc, d_ow, d_oh); checkKernelErrors; } else { hipLaunchKernelGGL(( CONV_DW_HALF2_OPT), dim3(block_num), dim3(threads_num), 0, 0, (const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter, (const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p/2, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh);//_HALF_OPT checkKernelErrors; } } else { hipLaunchKernelGGL(( CONV_DW), dim3(block_num), dim3(threads_num), 0, 0, (const half*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (half*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh); checkKernelErrors; } return NO_ERROR; } if (inputs.size() == 1) { // block_num = runtime->blocks_num(mTotalCount); // threads_num = runtime->threads_num(); if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) { if(sw == 1 && sh == 1 && pw == 0 && ph == 0 && kw > 3 && kw < 12 && kh == 1 && pw == 0 && ph == 0) { if(ow % 4 == 0) { DivModFast d_oc(parameters.channel * PACK_NUMBER); DivModFast d_ow(ow/4); hipLaunchKernelGGL(( CONV_DW_MULTI_WIDTH4), dim3(block_num), dim3(threads_num), 0, 0, (const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, total, d_oc, d_ow, d_oh); checkKernelErrors; } else if(ow % 2 == 0) { DivModFast d_oc(parameters.channel * PACK_NUMBER / 2); DivModFast d_ow(ow/2); hipLaunchKernelGGL(( CONV_DW_MULTI_WIDTH_CHANNEL), dim3(block_num), dim3(threads_num), 0, 0, (const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, total, d_oc, d_ow, d_oh); checkKernelErrors; } } else { hipLaunchKernelGGL(( CONV_DW_OPT), dim3(block_num), dim3(threads_num), 0, 0, (const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh); checkKernelErrors; } } else { hipLaunchKernelGGL(( CONV_DW), dim3(block_num), dim3(threads_num), 0, 0, (const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh); checkKernelErrors; } } return NO_ERROR; } class ConvDepthWiseExecutionCreator : public CUDABackend::Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override { if (inputs.size() > 1) { return nullptr; } auto res = _makeResource(op, backend); if (nullptr == res) { return nullptr; } return new ConvDepthWiseExecution(op, backend, res); } }; static CUDACreatorRegister<ConvDepthWiseExecutionCreator> __init(OpType_ConvolutionDepthwise); } }
079930f28f490e6b86740d5b73a297b2d3c05149.cu
#include "ConvDepthWiseExecution.hpp" #include "core/ConvolutionCommon.hpp" #include "Raster.cuh" #include <float.h> #include "MNNCUDADefine.hpp" #include "MNNCUDAFunction.cuh" namespace MNN { namespace CUDA { template<typename T> __global__ void CONV_DW(const T* input, const half* kernel, const half* bias, T *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/2; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox); d_oh.divmod(tmp2, ob, oy); int oz = oz_2 << 1; int ix = ox * sw - pw; int iy = oy * sh - ph; float color0 = bias[oz]; float color1 = bias[oz+1]; int fxSta = max(0, (UP_DIV(-ix, dw))); int fySta = max(0, (UP_DIV(-iy, dh))); int fxEnd = min(kw, UP_DIV(iw - ix, dw)); int fyEnd = min(kh, UP_DIV(ih - iy, dh)); int fx, fy, fz; for (fy=fySta; fy<fyEnd; ++fy) { int sy = fy*dh + iy; for (fx=fxSta; fx<fxEnd; ++fx) { int sx = fx*dw + ix; int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz; float inp0 = input[src_offset]; float inp1 = input[src_offset+1]; float ker0 = kernel[(fy * kw + fx) * c_p + oz]; float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1]; color0 = color0 + inp0 * ker0; color1 = color1 + inp1 * ker1; } } color0 = max(color0, minV); color0 = min(color0, maxV); color1 = max(color1, minV); color1 = min(color1, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color0; output[dst_offset+1] = color1; } } __global__ void CONV_DW_HALF2_OPT(const half2* input, const half2* kernel, const half2* bias, half2 *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/2; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox); d_oh.divmod(tmp2, ob, oy); int oz = oz_2; int ix = ox * sw - pw; int iy = oy * sh - ph; half2 color = bias[oz]; int fxSta = max(0, -ix); int fySta = max(0, -iy); int fxEnd = min(kw, iw - ix); int fyEnd = min(kh, ih - iy); int fx, fy, fz; for (fy=fySta; fy<fyEnd; ++fy) { int sy = fy + iy; for (fx=fxSta; fx<fxEnd; ++fx) { int sx = fx + ix; int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz; half2 inp = input[src_offset]; half2 ker = kernel[(fy * kw + fx) * c_p + oz]; color = __hfma2(inp, ker, color); } } color.x = max(color.x, minV); color.x = min(color.x, maxV); color.y = max(color.y, minV); color.y = min(color.y, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color; } } __global__ void CONV_DW3x3_HALF2_OPT(const half2* input, const half2* kernel, const half2* bias, half2 *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total/4; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox_2, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox_2); d_oh.divmod(tmp2, ob, oy); int oz = oz_2; int ox = ox_2 << 1; int ix = ox - 1; int iy = oy - 1; half2 color0 = bias[oz]; half2 color1 = color0; half2 zero; zero.x = (half)0.0; zero.y = (half)0.0; half2 inp[12]; half2 ker[3][3]; for(int j=0; j<3; j++) { if(iy < 0 && j==0) { for(int i=0; i<4; i++) { inp[i] = zero; } continue; } if(iy+2 > ih-1 && j==2) { for(int i=0; i<4; i++) { inp[8+i] = zero; } continue; } for(int i=0; i<4; i++) { if(ix < 0 && i==0) { for(int j=0; j<3; j++) { inp[4*j+0] = zero; } continue; } if(ix+3 > iw-1 && i==3) { for(int j=0; j<3; j++) { inp[4*j+3] = zero; } continue; } int src_offset = ((ob * ih + iy+j) * iw + ix+i) * c_p + oz; inp[4*j+i] = input[src_offset]; } } for(int j=0; j<3; j++) { for(int i=0; i<3; i++) { ker[j][i] = kernel[(j * 3 + i) * c_p + oz]; } } for(int j=0; j<3; j++) { for(int i=0; i<3; i++) { color0 = __hfma2(inp[4*j+i], ker[j][i], color0); color1 = __hfma2(inp[4*j+i+1], ker[j][i], color1); } } color0.x = max(color0.x, minV); color0.x = min(color0.x, maxV); color0.y = max(color0.y, minV); color0.y = min(color0.y, maxV); color1.x = max(color1.x, minV); color1.x = min(color1.x, maxV); color1.y = max(color1.y, minV); color1.y = min(color1.y, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color0; output[dst_offset+c_p] = color1; } } __global__ void CONV_DW_OPT(const float* input, const half* kernel, const half* bias, float *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int dw, const int dh, const int sw, const int sh, const int pw, const int ph, const int total, DivModFast d_oc, DivModFast d_ow, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total / 2; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox, tmp1, ob; d_oc.divmod(index, tmp1, oz_2); d_ow.divmod(tmp1, tmp2, ox); d_oh.divmod(tmp2, ob, oy); int oz = oz_2 << 1; int ix = ox * sw - pw; int iy = oy * sh - ph; float color0 = bias[oz]; float color1 = bias[oz+1]; int fxSta = max(0, -ix); int fySta = max(0, -iy); int fxEnd = min(kw, iw - ix); int fyEnd = min(kh, ih - iy); int fx, fy, fz; for (fy=fySta; fy<fyEnd; ++fy) { int sy = fy + iy; for (fx=fxSta; fx<fxEnd; ++fx) { int sx = fx + ix; int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz; float inp0 = input[src_offset]; float inp1 = input[src_offset+1]; float ker0 = kernel[(fy * kw + fx) * c_p + oz]; float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1]; color0 = color0 + inp0 * ker0; color1 = color1 + inp1 * ker1; } } color0 = max(color0, minV); color0 = min(color0, maxV); color1 = max(color1, minV); color1 = min(color1, maxV); int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz; output[dst_offset] = color0; output[dst_offset+1] = color1; } } template<typename T> __global__ void CONV_DW_MULTI_WIDTH4(const T* input, const half* kernel, const half* bias, T *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int total, DivModFast d_oc, DivModFast d_ow_4, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total / 4; index += blockDim.x * gridDim.x) { int oz, tmp2, oy, ox_4, tmp1, ob; d_oc.divmod(index, tmp1, oz); d_ow_4.divmod(tmp1, tmp2, ox_4); d_oh.divmod(tmp2, ob, oy); float color0 = bias[oz]; float color1 = color0; float color2 = color0; float color3 = color0; // Parallel pipelining read and calculate float src; float filter0, filter1, filter2, filter3; int src_offset = ((ob * ih + oy) * iw + (ox_4 << 2)) * c_p + oz; int filter_offset = 0 * c_p + oz; src = input[src_offset + 0 * c_p]; filter0 = kernel[filter_offset + 0 * c_p]; color0 += (src * filter0); filter1 = kernel[filter_offset + 1 * c_p]; src = input[src_offset + 1 * c_p]; color0 += (src * filter1); color1 += (src * filter0); filter2 = kernel[filter_offset + 2 * c_p]; src = input[src_offset + 2 * c_p]; color0 += (src * filter2); color1 += (src * filter1); color2 += (src * filter0); filter3 = kernel[filter_offset + 3 * c_p]; for (int fx=3; fx<kw; ++fx) { src = input[src_offset + fx * c_p]; color0 += (src * filter3); color1 += (src * filter2); color2 += (src * filter1); color3 += (src * filter0); filter0 = filter1; filter1 = filter2; filter2 = filter3; filter3 = kernel[filter_offset + (fx+1) * c_p]; } src = input[src_offset + kw * c_p]; color1 += (src * filter2); color2 += (src * filter1); color3 += (src * filter0); src = input[src_offset + (kw+1) * c_p]; color2 += (src * filter2); color3 += (src * filter1); src = input[src_offset + (kw+2) * c_p]; color3 += (src * filter2); color0 = max(color0, minV); color0 = min(color0, maxV); color1 = max(color1, minV); color1 = min(color1, maxV); color2 = max(color2, minV); color2 = min(color2, maxV); color3 = max(color3, minV); color3 = min(color3, maxV); int dst_offset = ((ob * oh + oy) * ow + (ox_4 << 2)) * c_p + oz; output[dst_offset] = color0; output[dst_offset+c_p] = color1; output[dst_offset+2*c_p] = color2; output[dst_offset+3*c_p] = color3; } } __global__ void CONV_DW_MULTI_WIDTH_CHANNEL(const float* input, const half* kernel, const half* bias, float *output, const float maxV, const float minV, const int iw, const int ih, const int c, const int c_p, const int ow, const int oh, const int kw, const int kh, const int total, DivModFast d_oc_2, DivModFast d_ow_2, DivModFast d_oh ) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < total / 4; index += blockDim.x * gridDim.x) { int oz_2, tmp2, oy, ox_2, tmp1, ob; d_oc_2.divmod(index, tmp1, oz_2); d_ow_2.divmod(tmp1, tmp2, ox_2); d_oh.divmod(tmp2, ob, oy); float2 color0 = __half22float2((( half2 *)(bias + (oz_2 << 1)))[0]); float2 color1 = color0; // Parallel pipelining read and calculate float src0, src2, filter0, filter2; int src_offset = ((ob * ih + oy) * iw + (ox_2 << 1)) * c_p + (oz_2 << 1); int filter_offset = 0 * c_p + (oz_2 << 1); float2 src = ((float2 *)(input + src_offset + 0 * c_p))[0]; float2 filter = __half22float2(((half2 *)(kernel + filter_offset + 0 * c_p))[0]); color0.x += (src.x * filter.x); color0.y += (src.y * filter.y); for (int fx=1; fx<kw; ++fx) { src = ((float2 *)(input + src_offset + fx * c_p))[0]; color1.x += (src.x * filter.x); color1.y += (src.y * filter.y); filter = __half22float2(((half2 *)(void *)(kernel + filter_offset + fx * c_p))[0]); color0.x += (src.x * filter.x); color0.y += (src.y * filter.y); } src = ((float2 *)(input + src_offset + kw * c_p))[0]; color1.x += (src.x * filter.x); color1.y += (src.y * filter.y); color0.x = max(color0.x, minV); color0.x = min(color0.x, maxV); color1.x = max(color1.x, minV); color1.x = min(color1.x, maxV); color0.y = max(color0.y, minV); color0.y = min(color0.y, maxV); color1.y = max(color1.y, minV); color1.y = min(color1.y, maxV); int dst_offset = ((ob * oh + oy) * ow + (ox_2 << 1)) * c_p + (oz_2 << 1); ((float2 *)(output + dst_offset))[0] = color0; ((float2 *)(output + dst_offset + c_p))[0] = color1; } } static std::shared_ptr<ConvDepthWiseExecution::Resource> _makeResource(const Op* op, Backend* bn) { std::shared_ptr<ConvDepthWiseExecution::Resource> res(new ConvDepthWiseExecution::Resource); auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool(); auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime(); auto conv = op->main_as_Convolution2D(); auto convCommon = conv->common(); int kernelX = convCommon->kernelX(); int kernelY = convCommon->kernelY(); int depth = convCommon->outputCount(); int depthC = UP_DIV(depth, PACK_NUMBER); res->weightTensor.reset(Tensor::createDevice<float>({kernelX * kernelY * depthC * PACK_NUMBER})); bool success = bn->onAcquireBuffer(res->weightTensor.get(), Backend::STATIC); if (!success) { return nullptr; } res->mFilter = (void *)res->weightTensor.get()->buffer().device; FuseRegion reg; int offset[8 * PACK_NUMBER]; auto regionStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(FuseRegion)); auto offsetGpuStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(offset)); auto offsetGpu = (uint8_t*)offsetGpuStorage.first + offsetGpuStorage.second; //weight host->device const float* filterDataPtr = nullptr; int weightSize = 0; std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon; ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize); auto tempWeightStorage = pool->alloc(depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float)); auto tempWeight = (uint8_t*)tempWeightStorage.first + tempWeightStorage.second; cuda_check(cudaMemset(tempWeight, 0, depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float))); cuda_check(cudaMemcpy(tempWeight, filterDataPtr, weightSize*sizeof(float), cudaMemcpyHostToDevice)); reg.size[0] = 1; reg.size[1] = kernelY * kernelX; reg.size[2] = depthC * PACK_NUMBER; reg.srcStride[0] = 0; reg.srcStride[1] = 1; reg.srcStride[2] = kernelY * kernelX; reg.dstStride[0] = 0; reg.dstStride[1] = depthC * PACK_NUMBER; reg.dstStride[2] = 1; offset[0] = 1; offset[1] = kernelY * kernelX; offset[2] = depth; offset[3] = 0; offset[4] = 1; offset[5] = reg.size[1]; offset[6] = reg.size[2]; offset[7] = 0; reg.fuseNumber = 1; runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, &reg, sizeof(FuseRegion), MNNMemcpyHostToDevice, true); runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true); FuseRasterBlitFloatToHalf((uint8_t*)res->mFilter, (uint8_t*)tempWeight, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime); pool->free(tempWeightStorage); res->biasTensor.reset(Tensor::createDevice<float>({depthC * PACK_NUMBER})); success = bn->onAcquireBuffer(res->biasTensor.get(), Backend::STATIC); res->mBias = (void *)res->biasTensor.get()->buffer().device; if (!success) { return nullptr; } if(conv->bias() != nullptr) { auto tempBiasStorage = pool->alloc(depth * sizeof(float)); auto tempBias = (uint8_t*)tempBiasStorage.first + tempBiasStorage.second; cuda_check(cudaMemcpy(tempBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), cudaMemcpyHostToDevice)); reg.size[0] = 1; reg.size[1] = 1; reg.size[2] = depthC * PACK_NUMBER; reg.srcStride[0] = 0; reg.srcStride[1] = 0; reg.srcStride[2] = 1; reg.dstStride[0] = 0; reg.dstStride[1] = 0; reg.dstStride[2] = 1; offset[0] = 1; offset[1] = 1; offset[2] = conv->bias()->size(); offset[3] = 0; offset[4] = 1; offset[5] = 1; offset[6] = reg.size[2]; offset[7] = 0; reg.fuseNumber = 1; runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, &reg, sizeof(FuseRegion), MNNMemcpyHostToDevice, true); runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true); FuseRasterBlitFloatToHalf((uint8_t*)res->mBias, (uint8_t*)tempBias, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime); pool->free(tempBiasStorage); } static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(regionStorage); static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(offsetGpuStorage); return res; } ConvDepthWiseExecution::ConvDepthWiseExecution(const Op* op, Backend* bn, std::shared_ptr<Resource> resource) : Execution(bn) { mOp = op; mResource = resource; auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool(); } ConvDepthWiseExecution::~ ConvDepthWiseExecution() { // } ErrorCode ConvDepthWiseExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto pad = ConvolutionCommon::convolutionPad(inputs[0], outputs[0], mOp->main_as_Convolution2D()->common()); auto conv = mOp->main_as_Convolution2D(); auto convCommon = mOp->main_as_Convolution2D()->common(); int channel = inputs[0]->channel(); int channelDiv = UP_DIV(channel, PACK_NUMBER); parameters.pad[0] = pad.first; parameters.pad[1] = pad.second; parameters.kernelSize[0] = convCommon->kernelX(); parameters.kernelSize[1] = convCommon->kernelY(); parameters.stride[0] = convCommon->strideX(); parameters.stride[1] = convCommon->strideY(); parameters.dilate[0] = convCommon->dilateX(); parameters.dilate[1] = convCommon->dilateY(); parameters.inputSize[0] = inputs[0]->width(); parameters.inputSize[1] = inputs[0]->height(); parameters.channel = channelDiv; parameters.outputSize[0] = outputs[0]->width(); parameters.outputSize[1] = outputs[0]->height(); parameters.batch = inputs[0]->batch(); parameters.total = parameters.batch * parameters.outputSize[1] * parameters.outputSize[0] * parameters.channel * PACK_NUMBER; if (static_cast<CUDABackend*>(backend())->useFp16()) { // Do nothing } else { parameters.minValue = -FLT_MAX; parameters.maxValue = FLT_MAX; } if (convCommon->relu()) { parameters.minValue = 0.0f; } if (convCommon->relu6()) { parameters.minValue = 0.0f; parameters.maxValue = 6.0f; } mTotalCount = parameters.total; //MNN_PRINT("%d-%d-%d-%d, %d-%d-%d-%d-%d\n", parameters.kernelSize[0], parameters.kernelSize[1], parameters.stride[0], parameters.stride[1], parameters.inputSize[0], parameters.inputSize[1], channel, parameters.outputSize[0], parameters.outputSize[1]); return NO_ERROR; } ErrorCode ConvDepthWiseExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) { auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); auto& prop = runtime->prop(); int limitThreads = UP_DIV(mTotalCount, prop.multiProcessorCount); int threads_num = ALIMIN(prop.maxThreadsPerBlock/2, limitThreads); int block_num = prop.multiProcessorCount; DivModFast d_oc(parameters.channel * PACK_NUMBER / 2); DivModFast d_ow(parameters.outputSize[0]); DivModFast d_oh(parameters.outputSize[1]); const float maxV = parameters.maxValue; const float minV = parameters.minValue; const int iw = parameters.inputSize[0]; const int ih = parameters.inputSize[1]; const int c = parameters.channel; const int c_p = c * PACK_NUMBER; const int ow = parameters.outputSize[0]; const int oh = parameters.outputSize[1]; const int kw = parameters.kernelSize[0]; const int kh = parameters.kernelSize[1]; const int dw = parameters.dilate[0]; const int dh = parameters.dilate[1]; const int sw = parameters.stride[0]; const int sh = parameters.stride[1]; const int pw = parameters.pad[0]; const int ph = parameters.pad[1]; const int total = parameters.total; if (static_cast<CUDABackend*>(backend())->useFp16()) { if(parameters.kernelSize[0]==3 && parameters.kernelSize[1]==3 && parameters.stride[0]==1 && parameters.stride[1]==1 && parameters.pad[0]==1 && parameters.pad[1]==1 && parameters.outputSize[0] % 2 ==0) { DivModFast d_ow2(parameters.outputSize[0]/2); CONV_DW3x3_HALF2_OPT<<<block_num, threads_num>>>((const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter, (const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p/2, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow2, d_oh); checkKernelErrors; return NO_ERROR; } if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) { if(sw == 1 && sh == 1 && pw == 0 && ph == 0 && kw > 3 && kw < 12 && kh == 1 && pw == 0 && ph == 0 && ow % 4 == 0) { DivModFast d_oc(parameters.channel * PACK_NUMBER); DivModFast d_ow(ow/4); CONV_DW_MULTI_WIDTH4<<<block_num, threads_num>>>((const half*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (half*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, total, d_oc, d_ow, d_oh); checkKernelErrors; } else { CONV_DW_HALF2_OPT<<<block_num, threads_num>>>((const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter, (const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p/2, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh);//_HALF_OPT checkKernelErrors; } } else { CONV_DW<<<block_num, threads_num>>>((const half*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (half*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh); checkKernelErrors; } return NO_ERROR; } if (inputs.size() == 1) { // block_num = runtime->blocks_num(mTotalCount); // threads_num = runtime->threads_num(); if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) { if(sw == 1 && sh == 1 && pw == 0 && ph == 0 && kw > 3 && kw < 12 && kh == 1 && pw == 0 && ph == 0) { if(ow % 4 == 0) { DivModFast d_oc(parameters.channel * PACK_NUMBER); DivModFast d_ow(ow/4); CONV_DW_MULTI_WIDTH4<<<block_num, threads_num>>>((const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, total, d_oc, d_ow, d_oh); checkKernelErrors; } else if(ow % 2 == 0) { DivModFast d_oc(parameters.channel * PACK_NUMBER / 2); DivModFast d_ow(ow/2); CONV_DW_MULTI_WIDTH_CHANNEL<<<block_num, threads_num>>>((const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, total, d_oc, d_ow, d_oh); checkKernelErrors; } } else { CONV_DW_OPT<<<block_num, threads_num>>>((const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh); checkKernelErrors; } } else { CONV_DW<<<block_num, threads_num>>>((const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter, (const half*)mResource->mBias, (float*)outputs[0]->deviceId(), maxV, minV, iw, ih, c, c_p, ow, oh, kw, kh, dw, dh, sw, sh, pw, ph, total, d_oc, d_ow, d_oh); checkKernelErrors; } } return NO_ERROR; } class ConvDepthWiseExecutionCreator : public CUDABackend::Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override { if (inputs.size() > 1) { return nullptr; } auto res = _makeResource(op, backend); if (nullptr == res) { return nullptr; } return new ConvDepthWiseExecution(op, backend, res); } }; static CUDACreatorRegister<ConvDepthWiseExecutionCreator> __init(OpType_ConvolutionDepthwise); } }
d8fa2bcecb39bfb4a51e69b737a7edc3c17b5bbf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * purpose: CUDA managed unified memory for >= pascal architectures; * this version just uses hipMallocManaged() on the host, * then runs kernels on the GPU to add together two arrays * of size 1 GB and save the results into a third array; * n.b. here we want to stick to a separated initialization * kernel, but then before running the actual compute * kernel do the unified memory prefetching and see * whether this will affect compute/memory bandwith/page * faults performance; * result: from profiling via 'nvprof ./a.out' we now see pretty * much the best results so far, hence prefetching seems to * really pay off ! interestingly the number of page faults * has also decreased; * compilation: nvcc ./unified_memory_example_4.cu * usage: ./a.out */ #include <stdio.h> #define ARRAYDIM 268435456 /* * GPU kernel doing the initialization */ __global__ void KrnlDmmyInit(float *x, float *y, float *z) { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; x[i] = (float) i; y[i] = (float) (i + 1); return; } /* * GPU kernel doing the calculation, ie adding together two arrays */ __global__ void KrnlDmmyCalc(float *x, float *y, float *z) { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; z[i] = x[i] + y[i]; return; } /* * host main */ int main() { int i, cudaRtrn; dim3 thrds_per_block, blcks_per_grid; float *a, *b, *c; /* * Let us make use of hipMallocManaged() to allocate 3 arrays * of size 1 GB each for subsequent usage on the GPU. */ if (cudaRtrn = hipMallocManaged(&a, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array a[], %d ***\n", cudaRtrn); } if (cudaRtrn = hipMallocManaged(&b, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array b[], %d ***\n", cudaRtrn); } if (cudaRtrn = hipMallocManaged(&c, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array c[], %d ***\n", cudaRtrn); } /* * next we want to call simple kernels that (i) initialize array * elements a[] and b[] with thread-specific values and (ii) add * together these values and store back the results into array c[] * where the latter task shall be repeated within a loop over * 100 iterations and memory be explicitly sent to the device * with the help of prefetching */ thrds_per_block.x = 256; blcks_per_grid.x = ARRAYDIM / thrds_per_block.x; hipLaunchKernelGGL(( KrnlDmmyInit), dim3(blcks_per_grid), dim3(thrds_per_block), 0, 0, a, b, c); hipDeviceSynchronize(); hipMemPrefetchAsync(a, ARRAYDIM * sizeof(float), 0, NULL); hipMemPrefetchAsync(b, ARRAYDIM * sizeof(float), 0, NULL); hipMemPrefetchAsync(c, ARRAYDIM * sizeof(float), 0, NULL); for (i=0; i<100; i++) { hipLaunchKernelGGL(( KrnlDmmyCalc), dim3(blcks_per_grid), dim3(thrds_per_block), 0, 0, a, b, c); hipDeviceSynchronize(); } hipFree(c); hipFree(b); hipFree(a); return(0); }
d8fa2bcecb39bfb4a51e69b737a7edc3c17b5bbf.cu
/* * purpose: CUDA managed unified memory for >= pascal architectures; * this version just uses cudaMallocManaged() on the host, * then runs kernels on the GPU to add together two arrays * of size 1 GB and save the results into a third array; * n.b. here we want to stick to a separated initialization * kernel, but then before running the actual compute * kernel do the unified memory prefetching and see * whether this will affect compute/memory bandwith/page * faults performance; * result: from profiling via 'nvprof ./a.out' we now see pretty * much the best results so far, hence prefetching seems to * really pay off ! interestingly the number of page faults * has also decreased; * compilation: nvcc ./unified_memory_example_4.cu * usage: ./a.out */ #include <stdio.h> #define ARRAYDIM 268435456 /* * GPU kernel doing the initialization */ __global__ void KrnlDmmyInit(float *x, float *y, float *z) { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; x[i] = (float) i; y[i] = (float) (i + 1); return; } /* * GPU kernel doing the calculation, ie adding together two arrays */ __global__ void KrnlDmmyCalc(float *x, float *y, float *z) { int i; i = (blockIdx.x * blockDim.x) + threadIdx.x; z[i] = x[i] + y[i]; return; } /* * host main */ int main() { int i, cudaRtrn; dim3 thrds_per_block, blcks_per_grid; float *a, *b, *c; /* * Let us make use of cudaMallocManaged() to allocate 3 arrays * of size 1 GB each for subsequent usage on the GPU. */ if (cudaRtrn = cudaMallocManaged(&a, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array a[], %d ***\n", cudaRtrn); } if (cudaRtrn = cudaMallocManaged(&b, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array b[], %d ***\n", cudaRtrn); } if (cudaRtrn = cudaMallocManaged(&c, ARRAYDIM * sizeof(float)) != 0) { printf("*** allocation failed for array c[], %d ***\n", cudaRtrn); } /* * next we want to call simple kernels that (i) initialize array * elements a[] and b[] with thread-specific values and (ii) add * together these values and store back the results into array c[] * where the latter task shall be repeated within a loop over * 100 iterations and memory be explicitly sent to the device * with the help of prefetching */ thrds_per_block.x = 256; blcks_per_grid.x = ARRAYDIM / thrds_per_block.x; KrnlDmmyInit<<<blcks_per_grid, thrds_per_block>>>(a, b, c); cudaDeviceSynchronize(); cudaMemPrefetchAsync(a, ARRAYDIM * sizeof(float), 0, NULL); cudaMemPrefetchAsync(b, ARRAYDIM * sizeof(float), 0, NULL); cudaMemPrefetchAsync(c, ARRAYDIM * sizeof(float), 0, NULL); for (i=0; i<100; i++) { KrnlDmmyCalc<<<blcks_per_grid, thrds_per_block>>>(a, b, c); cudaDeviceSynchronize(); } cudaFree(c); cudaFree(b); cudaFree(a); return(0); }
28855ee09feaeccecf86d4b3ffa3bb954f1464cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <limits> #include <vector> #include "caffe/common.hpp" #include "caffe/layer.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void PerturbationForward(const int n, const Dtype* in, const Dtype* mask, const Dtype scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * mask[index] * scale; } } template <typename Dtype> void PerturbationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { Dtype* mask = static_cast<Dtype*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_gaussian(count, mean_, std_, mask); hipLaunchKernelGGL(( PerturbationForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, mask, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } } template <typename Dtype> __global__ void PerturbationBackward(const int n, const Dtype* in_diff, const Dtype* mask, const Dtype scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * mask[index] * scale; } } template <typename Dtype> void PerturbationLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const Dtype* mask = static_cast<const Dtype*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( PerturbationBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(PerturbationLayer); } // namespace caffe
28855ee09feaeccecf86d4b3ffa3bb954f1464cf.cu
#include <algorithm> #include <limits> #include <vector> #include "caffe/common.hpp" #include "caffe/layer.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void PerturbationForward(const int n, const Dtype* in, const Dtype* mask, const Dtype scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * mask[index] * scale; } } template <typename Dtype> void PerturbationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { Dtype* mask = static_cast<Dtype*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_gaussian(count, mean_, std_, mask); PerturbationForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, bottom_data, mask, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } } template <typename Dtype> __global__ void PerturbationBackward(const int n, const Dtype* in_diff, const Dtype* mask, const Dtype scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * mask[index] * scale; } } template <typename Dtype> void PerturbationLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const Dtype* mask = static_cast<const Dtype*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PerturbationBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(PerturbationLayer); } // namespace caffe
f1a918be140bc3d38e5b46d264a077be51ff42a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- // #define EIGEN_USE_GPU #include "BallQueryOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/contrib/BallQuery.cuh" #include "open3d/ml/contrib/cuda_utils.h" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::contrib; using namespace tensorflow; class BallQueryOpKernelCUDA : public BallQueryOpKernel { public: explicit BallQueryOpKernelCUDA(OpKernelConstruction *construction) : BallQueryOpKernel(construction) {} void Kernel(tensorflow::OpKernelContext *context, int b, int n, int m, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) auto stream = context->eigen_gpu_device().stream(); hipError_t err; dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( ball_query_kernel), dim3(blocks), dim3(threads), 0, stream, b, n, m, radius, nsample, new_xyz, xyz, idx); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } }; REGISTER_KERNEL_BUILDER(Name("Open3DBallQuery").Device(DEVICE_GPU), BallQueryOpKernelCUDA);
f1a918be140bc3d38e5b46d264a077be51ff42a2.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- // #define EIGEN_USE_GPU #include "BallQueryOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/contrib/BallQuery.cuh" #include "open3d/ml/contrib/cuda_utils.h" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::contrib; using namespace tensorflow; class BallQueryOpKernelCUDA : public BallQueryOpKernel { public: explicit BallQueryOpKernelCUDA(OpKernelConstruction *construction) : BallQueryOpKernel(construction) {} void Kernel(tensorflow::OpKernelContext *context, int b, int n, int m, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) auto stream = context->eigen_gpu_device().stream(); cudaError_t err; dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); ball_query_kernel<<<blocks, threads, 0, stream>>>( b, n, m, radius, nsample, new_xyz, xyz, idx); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } }; REGISTER_KERNEL_BUILDER(Name("Open3DBallQuery").Device(DEVICE_GPU), BallQueryOpKernelCUDA);
b1b1f16ff4b1d9f1bd5b23d96dde9546aa589d11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include "mish.h" namespace nvinfer1 { MishPlugin::MishPlugin() { } MishPlugin::~MishPlugin() { } // create the plugin at runtime from a byte stream MishPlugin::MishPlugin(const void* data, size_t length) { assert(length == sizeof(input_size_)); input_size_ = *reinterpret_cast<const int*>(data); } void MishPlugin::serialize(void* buffer) const noexcept { *reinterpret_cast<int*>(buffer) = input_size_; } size_t MishPlugin::getSerializationSize() const noexcept { return sizeof(input_size_); } int MishPlugin::initialize()noexcept { return 0; } bool MishPlugin::supportsFormat(DataType type, PluginFormat format) const noexcept { return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } void MishPlugin::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept { } Dims MishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)noexcept { assert(nbInputDims == 1); assert(index == 0); input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2]; // Output dimensions return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]); } // Set plugin namespace void MishPlugin::setPluginNamespace(const char* pluginNamespace)noexcept { mPluginNamespace = pluginNamespace; } const char* MishPlugin::getPluginNamespace() const noexcept { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType MishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool MishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool MishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept { return false; } void MishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)noexcept { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void MishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)noexcept { } // Detach the plugin object from its execution context. void MishPlugin::detachFromContext()noexcept {} const char* MishPlugin::getPluginType() const noexcept { return "Mish_TRT"; } const char* MishPlugin::getPluginVersion() const noexcept { return "1"; } void MishPlugin::destroy()noexcept { delete this; } // Clone the plugin IPluginV2* MishPlugin::clone() const noexcept { MishPlugin *p = new MishPlugin(); p->input_size_ = input_size_; p->setPluginNamespace(mPluginNamespace); return p; } __device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);} __device__ float softplus_kernel(float x, float threshold = 20) { if (x > threshold) return x; // too large else if (x < -threshold) return expf(x); // too small return logf(expf(x) + 1); } __global__ void mish_kernel(const float *input, float *output, int num_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; //float t = exp(input[idx]); //if (input[idx] > 20.0) { // t *= t; // output[idx] = (t - 1.0) / (t + 1.0); //} else { // float tt = t * t; // output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0); //} //output[idx] *= input[idx]; output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx])); } void MishPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) { int block_size = thread_count_; int grid_size = (input_size_ * batchSize + block_size - 1) / block_size; hipLaunchKernelGGL(( mish_kernel), dim3(grid_size), dim3(block_size), 0, 0, inputs[0], output, input_size_ * batchSize); } int MishPlugin::enqueue(int batchSize, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) noexcept { //assert(batchSize == 1); //GPU //CUDA_CHECK(hipStreamSynchronize(stream)); forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection MishPluginCreator::mFC{}; std::vector<PluginField> MishPluginCreator::mPluginAttributes; MishPluginCreator::MishPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* MishPluginCreator::getPluginName() const noexcept { return "Mish_TRT"; } const char* MishPluginCreator::getPluginVersion() const noexcept { return "1"; } const PluginFieldCollection* MishPluginCreator::getFieldNames()noexcept { return &mFC; } IPluginV2* MishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)noexcept { MishPlugin* obj = new MishPlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2* MishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)noexcept { // This object will be deleted when the network is destroyed, which will // call MishPlugin::destroy() MishPlugin* obj = new MishPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } void MishPluginCreator::setPluginNamespace(const char* libNamespace)noexcept { mNamespace = libNamespace; } const char* MishPluginCreator::getPluginNamespace() const noexcept { return mNamespace.c_str(); } }
b1b1f16ff4b1d9f1bd5b23d96dde9546aa589d11.cu
#include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include "mish.h" namespace nvinfer1 { MishPlugin::MishPlugin() { } MishPlugin::~MishPlugin() { } // create the plugin at runtime from a byte stream MishPlugin::MishPlugin(const void* data, size_t length) { assert(length == sizeof(input_size_)); input_size_ = *reinterpret_cast<const int*>(data); } void MishPlugin::serialize(void* buffer) const noexcept { *reinterpret_cast<int*>(buffer) = input_size_; } size_t MishPlugin::getSerializationSize() const noexcept { return sizeof(input_size_); } int MishPlugin::initialize()noexcept { return 0; } bool MishPlugin::supportsFormat(DataType type, PluginFormat format) const noexcept { return (type == DataType::kFLOAT && format == PluginFormat::kLINEAR); } void MishPlugin::configureWithFormat(const Dims* inputDims, int nbInputs, const Dims* outputDims, int nbOutputs, DataType type, PluginFormat format, int maxBatchSize) noexcept { } Dims MishPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)noexcept { assert(nbInputDims == 1); assert(index == 0); input_size_ = inputs[0].d[0] * inputs[0].d[1] * inputs[0].d[2]; // Output dimensions return Dims3(inputs[0].d[0], inputs[0].d[1], inputs[0].d[2]); } // Set plugin namespace void MishPlugin::setPluginNamespace(const char* pluginNamespace)noexcept { mPluginNamespace = pluginNamespace; } const char* MishPlugin::getPluginNamespace() const noexcept { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType MishPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool MishPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const noexcept { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool MishPlugin::canBroadcastInputAcrossBatch(int inputIndex) const noexcept { return false; } void MishPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)noexcept { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void MishPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)noexcept { } // Detach the plugin object from its execution context. void MishPlugin::detachFromContext()noexcept {} const char* MishPlugin::getPluginType() const noexcept { return "Mish_TRT"; } const char* MishPlugin::getPluginVersion() const noexcept { return "1"; } void MishPlugin::destroy()noexcept { delete this; } // Clone the plugin IPluginV2* MishPlugin::clone() const noexcept { MishPlugin *p = new MishPlugin(); p->input_size_ = input_size_; p->setPluginNamespace(mPluginNamespace); return p; } __device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);} __device__ float softplus_kernel(float x, float threshold = 20) { if (x > threshold) return x; // too large else if (x < -threshold) return expf(x); // too small return logf(expf(x) + 1); } __global__ void mish_kernel(const float *input, float *output, int num_elem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= num_elem) return; //float t = exp(input[idx]); //if (input[idx] > 20.0) { // t *= t; // output[idx] = (t - 1.0) / (t + 1.0); //} else { // float tt = t * t; // output[idx] = (tt + 2.0 * t) / (tt + 2.0 * t + 2.0); //} //output[idx] *= input[idx]; output[idx] = input[idx] * tanh_activate_kernel(softplus_kernel(input[idx])); } void MishPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) { int block_size = thread_count_; int grid_size = (input_size_ * batchSize + block_size - 1) / block_size; mish_kernel<<<grid_size, block_size>>>(inputs[0], output, input_size_ * batchSize); } int MishPlugin::enqueue(int batchSize, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { //assert(batchSize == 1); //GPU //CUDA_CHECK(cudaStreamSynchronize(stream)); forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection MishPluginCreator::mFC{}; std::vector<PluginField> MishPluginCreator::mPluginAttributes; MishPluginCreator::MishPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* MishPluginCreator::getPluginName() const noexcept { return "Mish_TRT"; } const char* MishPluginCreator::getPluginVersion() const noexcept { return "1"; } const PluginFieldCollection* MishPluginCreator::getFieldNames()noexcept { return &mFC; } IPluginV2* MishPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)noexcept { MishPlugin* obj = new MishPlugin(); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2* MishPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)noexcept { // This object will be deleted when the network is destroyed, which will // call MishPlugin::destroy() MishPlugin* obj = new MishPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } void MishPluginCreator::setPluginNamespace(const char* libNamespace)noexcept { mNamespace = libNamespace; } const char* MishPluginCreator::getPluginNamespace() const noexcept { return mNamespace.c_str(); } }
b13a8cc02e172153e82a85181d27680697741a03.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * This code implements the interleaved and neighbor-paired approaches to * parallel reduction in CUDA. For this example, the sum operation is used. A * variety of optimizations on parallel reduction aimed at reducing divergence * are also demonstrated, such as unrolling. */ // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; // calculate new reduction } // call recursively // Check for initial value on exit return recursiveReduce(data, stride); } // Neighbored Pair Implementation with divergence __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Neighbored Pair Implementation with less divergence __global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { // convert tid into local array index int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Interleaved Pair Implementation with less divergence __global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } template <unsigned int iBlockSize> __global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(hipMalloc((void **) &d_idata, bytes)); CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighboredLess), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceInterleaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling2), dim3(grid.x / 2), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling4), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrolling8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnrollWarsp8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceCompleteUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnroll CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: hipLaunchKernelGGL(( reduceCompleteUnroll<1024>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 512: hipLaunchKernelGGL(( reduceCompleteUnroll<512>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reduceCompleteUnroll<256>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduceCompleteUnroll<128>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reduceCompleteUnroll<64>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; } CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(hipFree(d_idata)); CHECK(hipFree(d_odata)); // reset device CHECK(hipDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
b13a8cc02e172153e82a85181d27680697741a03.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * This code implements the interleaved and neighbor-paired approaches to * parallel reduction in CUDA. For this example, the sum operation is used. A * variety of optimizations on parallel reduction aimed at reducing divergence * are also demonstrated, such as unrolling. */ // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; // calculate new reduction } // call recursively // Check for initial value on exit return recursiveReduce(data, stride); } // Neighbored Pair Implementation with divergence __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Neighbored Pair Implementation with less divergence __global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { // convert tid into local array index int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } // Interleaved Pair Implementation with less divergence __global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 4; // unrolling 4 if (idx + 3 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling warp if (tid < 32) { volatile int *vmem = idata; vmem[tid] += vmem[tid + 32]; vmem[tid] += vmem[tid + 16]; vmem[tid] += vmem[tid + 8]; vmem[tid] += vmem[tid + 4]; vmem[tid] += vmem[tid + 2]; vmem[tid] += vmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } template <unsigned int iBlockSize> __global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 8; // unrolling 8 if (idx + 7 * blockDim.x < n) { int a1 = g_idata[idx]; int a2 = g_idata[idx + blockDim.x]; int a3 = g_idata[idx + 2 * blockDim.x]; int a4 = g_idata[idx + 3 * blockDim.x]; int b1 = g_idata[idx + 4 * blockDim.x]; int b2 = g_idata[idx + 5 * blockDim.x]; int b3 = g_idata[idx + 6 * blockDim.x]; int b4 = g_idata[idx + 7 * blockDim.x]; g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); // in-place reduction and complete unroll if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64]; __syncthreads(); // unrolling warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(cudaMalloc((void **) &d_idata, bytes)); CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnrollWarsp8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reduceCompleteUnroll CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 512: reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 256: reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 128: reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 64: reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; } CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(cudaFree(d_idata)); CHECK(cudaFree(d_odata)); // reset device CHECK(cudaDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
1cd82301f7c5b7626b0da400cd2a579b31cbaa87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./headers.h" /* * Function to perform Choleshky Factorization for a tile * input is the pointer to shared memory for a tile given by t_A */ __device__ void spotrf_tile(float* t_A) { int ty = threadIdx.x; // col int tx = threadIdx.y; // row for(int k{0};k<TILE_SIZE;k++){ // square root of diagonal elements if(tx==0 && ty==0) t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]); __syncthreads(); // division step done parallaly if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty == k) { t_A[(tx+1)*TILE_SIZE + k]/= t_A[k*TILE_SIZE + k]; } __syncthreads(); if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty >= k) { t_A[(tx+1)*TILE_SIZE + (ty+1)]-= t_A[(tx+1)*TILE_SIZE + k]*t_A[(ty+1)*TILE_SIZE + k]; } __syncthreads(); } } /* * Function to perform triangular solve for a tile * inputs are two shared memory pointer of tiles given by t_A1 and t_A2 * implemnting triangular solve on tile t_A2 using t_A1 */ __device__ void strsm_tile(float *t_A1, float *t_A2) { // t_A2 is current unkonown int ty = threadIdx.x; // access column int tx = threadIdx.y; // access row for(int i{0};i<TILE_SIZE;i++){ if(ty==0){ t_A2[tx*TILE_SIZE + i]/= t_A1[i*TILE_SIZE + i]; // divison step } __syncthreads(); if(ty>i && i<TILE_SIZE-1) { t_A2[tx*TILE_SIZE+ty]-= t_A2[tx*TILE_SIZE + i]*t_A1[ty*TILE_SIZE + i]; } __syncthreads(); } } /* * Function to perform rank-k update * half of the threads working * inputs are pointers to the shared memory for two tiles given by rA1 and rA2 * implementing rank-k update of the tile rA2 using tile rA1 */ __device__ void ssyrk_tile(float* rA1, float* rA2) { int row = threadIdx.y; int column = threadIdx.x; if(column <= row) { float updatedValue = rA2[row * TILE_SIZE + column]; for(int k=0; k<TILE_SIZE; k++) { updatedValue -= rA1[row * TILE_SIZE + k] * rA1[column * TILE_SIZE + k]; } rA2[row * TILE_SIZE + column] = updatedValue; } } /* * Function to perform general matrix multiplication * DOUBT: I think calculation is given wrong in paper it should be rA2[k][n] we are taking in row major form * inputs are pointers to the shared memory for three tiles given by rA1, rA2 and rA3 * implementing sgemm on tile rA3 using rA1 and rA2 */ __device__ void sgemm_tile(const float* rA1, const float* rA2, float* rA3) { int row = threadIdx.y; int column = threadIdx.x; float updatedValue = rA3[row * TILE_SIZE + column]; for(int i=0; i<TILE_SIZE; i++) { updatedValue -= rA1[row * TILE_SIZE + i] * rA2[i * TILE_SIZE + column]; } rA3[row * TILE_SIZE + column] = updatedValue; } /* * Function to store full tile from shared memory back to global memory * inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions */ __device__ void store_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { g_mem[row * N + column] = (tx < TILE_SIZE && ty < TILE_SIZE) ? s_mem[ty * TILE_SIZE + tx] : 0; } __syncthreads(); } /* * Function to store lower triangular tile from shared memory to global memory * inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size */ __device__ void store_lower(float *g_mem, float *s_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { g_mem[row * N + column] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx] : 0; } __syncthreads(); } /* * Function to load a full tile from global memory to shared memory * inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size */ __device__ void load_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(tx < TILE_SIZE && ty < TILE_SIZE) { s_mem[ty * TILE_SIZE + tx] = (row < N && column < N) ? g_mem[row * N + column] : 0; } __syncthreads(); } /* * function to store 0 element in in global memory tile given by g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size */ __device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { g_mem[row * N + column] = 0; } __syncthreads(); }
1cd82301f7c5b7626b0da400cd2a579b31cbaa87.cu
#include "./headers.h" /* * Function to perform Choleshky Factorization for a tile * input is the pointer to shared memory for a tile given by t_A */ __device__ void spotrf_tile(float* t_A) { int ty = threadIdx.x; // col int tx = threadIdx.y; // row for(int k{0};k<TILE_SIZE;k++){ // square root of diagonal elements if(tx==0 && ty==0) t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]); __syncthreads(); // division step done parallaly if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty == k) { t_A[(tx+1)*TILE_SIZE + k]/= t_A[k*TILE_SIZE + k]; } __syncthreads(); if(ty<=tx && tx<TILE_SIZE - 1 && ty<TILE_SIZE - 1 && ty >= k) { t_A[(tx+1)*TILE_SIZE + (ty+1)]-= t_A[(tx+1)*TILE_SIZE + k]*t_A[(ty+1)*TILE_SIZE + k]; } __syncthreads(); } } /* * Function to perform triangular solve for a tile * inputs are two shared memory pointer of tiles given by t_A1 and t_A2 * implemnting triangular solve on tile t_A2 using t_A1 */ __device__ void strsm_tile(float *t_A1, float *t_A2) { // t_A2 is current unkonown int ty = threadIdx.x; // access column int tx = threadIdx.y; // access row for(int i{0};i<TILE_SIZE;i++){ if(ty==0){ t_A2[tx*TILE_SIZE + i]/= t_A1[i*TILE_SIZE + i]; // divison step } __syncthreads(); if(ty>i && i<TILE_SIZE-1) { t_A2[tx*TILE_SIZE+ty]-= t_A2[tx*TILE_SIZE + i]*t_A1[ty*TILE_SIZE + i]; } __syncthreads(); } } /* * Function to perform rank-k update * half of the threads working * inputs are pointers to the shared memory for two tiles given by rA1 and rA2 * implementing rank-k update of the tile rA2 using tile rA1 */ __device__ void ssyrk_tile(float* rA1, float* rA2) { int row = threadIdx.y; int column = threadIdx.x; if(column <= row) { float updatedValue = rA2[row * TILE_SIZE + column]; for(int k=0; k<TILE_SIZE; k++) { updatedValue -= rA1[row * TILE_SIZE + k] * rA1[column * TILE_SIZE + k]; } rA2[row * TILE_SIZE + column] = updatedValue; } } /* * Function to perform general matrix multiplication * DOUBT: I think calculation is given wrong in paper it should be rA2[k][n] we are taking in row major form * inputs are pointers to the shared memory for three tiles given by rA1, rA2 and rA3 * implementing sgemm on tile rA3 using rA1 and rA2 */ __device__ void sgemm_tile(const float* rA1, const float* rA2, float* rA3) { int row = threadIdx.y; int column = threadIdx.x; float updatedValue = rA3[row * TILE_SIZE + column]; for(int i=0; i<TILE_SIZE; i++) { updatedValue -= rA1[row * TILE_SIZE + i] * rA2[i * TILE_SIZE + column]; } rA3[row * TILE_SIZE + column] = updatedValue; } /* * Function to store full tile from shared memory back to global memory * inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions */ __device__ void store_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { g_mem[row * N + column] = (tx < TILE_SIZE && ty < TILE_SIZE) ? s_mem[ty * TILE_SIZE + tx] : 0; } __syncthreads(); } /* * Function to store lower triangular tile from shared memory to global memory * inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size */ __device__ void store_lower(float *g_mem, float *s_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { g_mem[row * N + column] = (tx < TILE_SIZE && ty < TILE_SIZE && column <= row) ? s_mem[ty * TILE_SIZE + tx] : 0; } __syncthreads(); } /* * Function to load a full tile from global memory to shared memory * inputs are pointers to tile of shared memory and global memory given by s_mem and g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size */ __device__ void load_full(float *g_mem, float *s_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(tx < TILE_SIZE && ty < TILE_SIZE) { s_mem[ty * TILE_SIZE + tx] = (row < N && column < N) ? g_mem[row * N + column] : 0; } __syncthreads(); } /* * function to store 0 element in in global memory tile given by g_mem * tile_y and tile_x are integers representing tile access numbers in y and x dimensions and N is matrix size */ __device__ void store_zeros(float *g_mem, int tile_y, int tile_x, int N) { int tx = threadIdx.x; // local threadid in x int ty = threadIdx.y; // local threadid in y int row = tile_y * TILE_SIZE + ty; // access row int column = tile_x * TILE_SIZE + tx; // access col if(row < N && column < N) { g_mem[row * N + column] = 0; } __syncthreads(); }
7c4ef918b94d93a7f24866698c579e93b2033fc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../common/book.h" #define imin(a, b) (a < b ? a : b) #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) const int N = 33 * 1044; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N + threadsPerBlock - 1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the CPU side a = (float*)malloc( N * sizeof(float) ); b = (float*)malloc( N * sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid * sizeof(float) ); // allocate the memory on the GPU HANDLE_ERROR( hipMalloc( (void**)&dev_a, N * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b, N * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_partial_c, blocksPerGrid * sizeof(float) ) ); // fill in the host memory with data for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( hipMemcpy( dev_a, a, N * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b, b, N * sizeof(float), hipMemcpyHostToDevice ) ); hipLaunchKernelGGL(( dot), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( hipMemcpy( partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the GPU side hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_partial_c ); // free memory on the CPU side free( a ); free( b ); free( partial_c ); }
7c4ef918b94d93a7f24866698c579e93b2033fc8.cu
#include "../common/book.h" #define imin(a, b) (a < b ? a : b) #define sum_squares(x) (x * (x + 1) * (2 * x + 1) / 6) const int N = 33 * 1044; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N + threadsPerBlock - 1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memory on the CPU side a = (float*)malloc( N * sizeof(float) ); b = (float*)malloc( N * sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid * sizeof(float) ); // allocate the memory on the GPU HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_partial_c, blocksPerGrid * sizeof(float) ) ); // fill in the host memory with data for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } // copy the arrays 'a' and 'b' to the GPU HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice ) ); dot<<<blocksPerGrid, threadsPerBlock>>>( dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU HANDLE_ERROR( cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf( "Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the GPU side cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_partial_c ); // free memory on the CPU side free( a ); free( b ); free( partial_c ); }
0e078e7be0d7c44ef1ac6446a8353af28aa68deb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/opencv.hpp> #include <vector> __global__ void blurShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) { auto op1 = blockIdx.x * (blockDim.x-2) + threadIdx.x; auto op2 = blockIdx.y * (blockDim.y-2) + threadIdx.y; auto op3 = threadIdx.x; auto op4 = threadIdx.y; extern __shared__ unsigned char sharedExt[]; if( op1 < w && op2 < h ) { sharedExt[3 * (op4 * blockDim.x + op3) ] = data[ 3 * ( op2 * w + op1 ) ]; sharedExt[3 * (op4 * blockDim.x + op3) + 1 ] = data[ 3 * ( op2 * w + op1 ) + 1]; sharedExt[3 * (op4 * blockDim.x + op3) + 2 ] = data[ 3 * ( op2 * w + op1 ) + 2 ]; __syncthreads(); auto op5 = blockDim.x; if( op3 > 0 && op3 < (blockDim.x - 1) && op4 > 0 && op4 < (blockDim.y - 1) ) { for (auto c = 0; c < 3; ++c){ auto op6 = sharedExt[((op4 - 1) * op5 + op3 - 1) * 3 + c] + sharedExt[((op4 - 1) * op5 + op3 + 1) * 3 + c] + sharedExt[( op4 * op5 + op3 - 1) * 3 + c] + sharedExt[( op4 * op5 + op3 + 1) * 3 + c] + sharedExt[((op4 + 1) * op5 + op3 - 1) * 3 + c] + sharedExt[((op4 + 1) * op5 + op3 + 1) * 3 + c] + sharedExt[(( op4 - 1) * op5 + op3) * 3 + c] + sharedExt[( op4 * op5 + op3) * 3 + c] + sharedExt[(( op4 + 1) * op5 + op3) * 3 + c]; out[(op2 * w + op1) * 3 + c] = (op6 / 9); } } } } int main() { cv::Mat img_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rows = img_in.rows; auto cols = img_in.cols; auto rgb = img_in.data; std::vector< unsigned char > g( 3 * rows * cols ); cv::Mat img_out( rows, cols, CV_8UC3, g.data() ); unsigned char * out; unsigned char * rgb_d; std::size_t size = 3 * img_in.cols * img_in.rows; hipMalloc( &rgb_d, 3 * rows * cols); hipMalloc( &out, 3 * rows * cols ); hipStream_t streams[ 2 ]; hipStreamCreate( &streams[ 0 ] ); hipStreamCreate( &streams[ 1 ] ); hipMemcpyAsync( rgb_d, rgb, size/2, hipMemcpyHostToDevice, streams[ 0 ] ); hipMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, hipMemcpyHostToDevice, streams[ 1 ] ); dim3 dim1( 32, 32 ); dim3 dim2( 3 * (( cols ) / ((dim1.x - 2) + 1) ), (( rows ) / ((dim1.y - 2) + 1) )); hipEvent_t start, stop; hipEventCreate( &start ); hipEventCreate( &stop ); hipEventRecord( start ); hipLaunchKernelGGL(( blurShared), dim3(dim2), dim3(dim1), 3 * dim1.x * dim1.y, streams[ 0 ] , rgb_d, out, cols, rows / 2 + 4); hipLaunchKernelGGL(( blurShared), dim3(dim2), dim3(dim1), 3 * dim1.x * dim1.y, streams[ 1 ] , rgb_d+size/2, out+size/2, cols, rows / 2 + 4); hipMemcpyAsync( g.data(), out, size/2, hipMemcpyDeviceToHost, streams[ 0 ] ); hipMemcpyAsync( g.data()+size/2, out+size/2, size/2, hipMemcpyDeviceToHost, streams[ 1 ] ); hipDeviceSynchronize(); hipStreamDestroy(streams[0]); hipStreamDestroy(streams[1]); auto hipError_t = hipGetLastError(); if (hipError_t != hipSuccess){ std::cout << hipGetErrorName(hipError_t) << std::endl; std::cout << hipGetErrorString(hipError_t) << std::endl; } else { std::cout << "No Errors!" << std::endl; } hipEventRecord( stop ); hipEventSynchronize( stop ); float duration = 0.0f; hipEventElapsedTime( &duration, start, stop ); std::cout << "Total: " << duration << "ms\n"; cv::imwrite( "BlurSharedStreamOutput.jpg", img_out ); hipFree( rgb_d); hipFree ( out); return 0; }
0e078e7be0d7c44ef1ac6446a8353af28aa68deb.cu
#include <opencv2/opencv.hpp> #include <vector> __global__ void blurShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) { auto op1 = blockIdx.x * (blockDim.x-2) + threadIdx.x; auto op2 = blockIdx.y * (blockDim.y-2) + threadIdx.y; auto op3 = threadIdx.x; auto op4 = threadIdx.y; extern __shared__ unsigned char sharedExt[]; if( op1 < w && op2 < h ) { sharedExt[3 * (op4 * blockDim.x + op3) ] = data[ 3 * ( op2 * w + op1 ) ]; sharedExt[3 * (op4 * blockDim.x + op3) + 1 ] = data[ 3 * ( op2 * w + op1 ) + 1]; sharedExt[3 * (op4 * blockDim.x + op3) + 2 ] = data[ 3 * ( op2 * w + op1 ) + 2 ]; __syncthreads(); auto op5 = blockDim.x; if( op3 > 0 && op3 < (blockDim.x - 1) && op4 > 0 && op4 < (blockDim.y - 1) ) { for (auto c = 0; c < 3; ++c){ auto op6 = sharedExt[((op4 - 1) * op5 + op3 - 1) * 3 + c] + sharedExt[((op4 - 1) * op5 + op3 + 1) * 3 + c] + sharedExt[( op4 * op5 + op3 - 1) * 3 + c] + sharedExt[( op4 * op5 + op3 + 1) * 3 + c] + sharedExt[((op4 + 1) * op5 + op3 - 1) * 3 + c] + sharedExt[((op4 + 1) * op5 + op3 + 1) * 3 + c] + sharedExt[(( op4 - 1) * op5 + op3) * 3 + c] + sharedExt[( op4 * op5 + op3) * 3 + c] + sharedExt[(( op4 + 1) * op5 + op3) * 3 + c]; out[(op2 * w + op1) * 3 + c] = (op6 / 9); } } } } int main() { cv::Mat img_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED ); auto rows = img_in.rows; auto cols = img_in.cols; auto rgb = img_in.data; std::vector< unsigned char > g( 3 * rows * cols ); cv::Mat img_out( rows, cols, CV_8UC3, g.data() ); unsigned char * out; unsigned char * rgb_d; std::size_t size = 3 * img_in.cols * img_in.rows; cudaMalloc( &rgb_d, 3 * rows * cols); cudaMalloc( &out, 3 * rows * cols ); cudaStream_t streams[ 2 ]; cudaStreamCreate( &streams[ 0 ] ); cudaStreamCreate( &streams[ 1 ] ); cudaMemcpyAsync( rgb_d, rgb, size/2, cudaMemcpyHostToDevice, streams[ 0 ] ); cudaMemcpyAsync( rgb_d+size/2, rgb+size/2, size/2, cudaMemcpyHostToDevice, streams[ 1 ] ); dim3 dim1( 32, 32 ); dim3 dim2( 3 * (( cols ) / ((dim1.x - 2) + 1) ), (( rows ) / ((dim1.y - 2) + 1) )); cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start ); blurShared<<< dim2, dim1, 3 * dim1.x * dim1.y, streams[ 0 ] >>>( rgb_d, out, cols, rows / 2 + 4); blurShared<<< dim2, dim1, 3 * dim1.x * dim1.y, streams[ 1 ] >>>( rgb_d+size/2, out+size/2, cols, rows / 2 + 4); cudaMemcpyAsync( g.data(), out, size/2, cudaMemcpyDeviceToHost, streams[ 0 ] ); cudaMemcpyAsync( g.data()+size/2, out+size/2, size/2, cudaMemcpyDeviceToHost, streams[ 1 ] ); cudaDeviceSynchronize(); cudaStreamDestroy(streams[0]); cudaStreamDestroy(streams[1]); auto cudaError = cudaGetLastError(); if (cudaError != cudaSuccess){ std::cout << cudaGetErrorName(cudaError) << std::endl; std::cout << cudaGetErrorString(cudaError) << std::endl; } else { std::cout << "No Errors!" << std::endl; } cudaEventRecord( stop ); cudaEventSynchronize( stop ); float duration = 0.0f; cudaEventElapsedTime( &duration, start, stop ); std::cout << "Total: " << duration << "ms\n"; cv::imwrite( "BlurSharedStreamOutput.jpg", img_out ); cudaFree( rgb_d); cudaFree ( out); return 0; }
4f47e489e7e4d38ca68c3e2177c6b6a02ea7cf19.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 /*! Mass of one "planet." */ #define planetMass 3e8f /*! Mass of the "star" at the center. */ #define starMass 5e10f /*! Size of the starting area in simulation space. */ const float scene_scale = 1e2; /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); glm::vec3 *dev_pos; glm::vec3 *dev_vel; glm::vec3 *dev_acc; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * CUDA kernel for generating planets with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale, float mass) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = 0.1 * scale * sqrt(rand.x * rand.x + rand.y * rand.y) * rand.z; } } /** * CUDA kernel for generating velocities in a vortex around the origin. * This is just to make for an interesting-looking scene. */ __global__ void kernGenerateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec3 * pos) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z); float r = glm::length(R) + EPSILON; float s = sqrt(G * starMass / r); glm::vec3 D = glm::normalize(glm::cross(R / r, glm::vec3(0, 0, 1))); arr[index].x = s * D.x; arr[index].y = s * D.y; arr[index].z = s * D.z; } } /** * Initialize memory, update some globals */ void Nbody::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel failed!"); hipMalloc((void**)&dev_acc, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_acc failed!"); hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); hipLaunchKernelGGL(( kernGenerateCircularVelArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("kernGenerateCircularVelArray failed!"); hipDeviceSynchronize(); } /****************** * copyPlanetsToVBO * ******************/ /** * Copy the planet positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPlanetsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1; } } /** * Wrapper for call to the kernCopyPlanetsToVBO CUDA kernel. */ void Nbody::copyPlanetsToVBO(float *vbodptr) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects) / float(blockSize))); hipLaunchKernelGGL(( kernCopyPlanetsToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, vbodptr, scene_scale); checkCUDAErrorWithLine("copyPlanetsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * Compute the acceleration on a body at `my_pos` due to the `N` bodies in the array `other_planets`. */ __device__ glm::vec3 accelerate(int N, int iSelf, glm::vec3 this_planet, const glm::vec3 *other_planets) { // TODO: Compute the acceleration on `my_pos` due to: // * The star at the origin (with mass `starMass`) // * All of the *other* planets (with mass `planetMass`) // Return the sum of all of these contributions. // HINT: You may want to write a helper function that will compute the acceleration at // a single point due to a single other mass. Be careful that you protect against // division by very small numbers. // HINT: Use Newtonian gravitational acceleration: // G M // g = ----- // r^2 // where: // * G is the universal gravitational constant (already defined for you) // * M is the mass of the other object // * r is the distance between this object and the other object return glm::vec3(0.0f); } /** * For each of the `N` bodies, update its acceleration. * Compute the total instantaneous acceleration using `accelerate`, then store that into `acc`. */ __global__ void kernUpdateAcc(int N, float dt, const glm::vec3 *pos, glm::vec3 *acc) { // TODO: implement updateAccArray. // This function body runs once on each CUDA thread. // To avoid race conditions, each instance should only write ONE value to `acc`! } /** * For each of the `N` bodies, update its velocity, then update its position, using a * simple Euler integration scheme. Acceleration must be updated before calling this kernel. */ __global__ void kernUpdateVelPos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel, const glm::vec3 *acc) { // TODO: implement updateVelocityPosition } /** * Step the entire N-body simulation by `dt` seconds. */ void Nbody::stepSimulation(float dt) { // TODO: Using the CUDA kernels you wrote above, write a function that // calls the kernels to perform a full simulation step. } void Nbody::endSimulation() { hipFree(dev_acc); hipFree(dev_vel); hipFree(dev_pos); }
4f47e489e7e4d38ca68c3e2177c6b6a02ea7cf19.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 /*! Mass of one "planet." */ #define planetMass 3e8f /*! Mass of the "star" at the center. */ #define starMass 5e10f /*! Size of the starting area in simulation space. */ const float scene_scale = 1e2; /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); glm::vec3 *dev_pos; glm::vec3 *dev_vel; glm::vec3 *dev_acc; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * CUDA kernel for generating planets with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale, float mass) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = 0.1 * scale * sqrt(rand.x * rand.x + rand.y * rand.y) * rand.z; } } /** * CUDA kernel for generating velocities in a vortex around the origin. * This is just to make for an interesting-looking scene. */ __global__ void kernGenerateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec3 * pos) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z); float r = glm::length(R) + EPSILON; float s = sqrt(G * starMass / r); glm::vec3 D = glm::normalize(glm::cross(R / r, glm::vec3(0, 0, 1))); arr[index].x = s * D.x; arr[index].y = s * D.y; arr[index].z = s * D.z; } } /** * Initialize memory, update some globals */ void Nbody::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel failed!"); cudaMalloc((void**)&dev_acc, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_acc failed!"); kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); kernGenerateCircularVelArray<<<fullBlocksPerGrid, blockSize>>>(2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("kernGenerateCircularVelArray failed!"); cudaThreadSynchronize(); } /****************** * copyPlanetsToVBO * ******************/ /** * Copy the planet positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPlanetsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1; } } /** * Wrapper for call to the kernCopyPlanetsToVBO CUDA kernel. */ void Nbody::copyPlanetsToVBO(float *vbodptr) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects) / float(blockSize))); kernCopyPlanetsToVBO<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, vbodptr, scene_scale); checkCUDAErrorWithLine("copyPlanetsToVBO failed!"); cudaThreadSynchronize(); } /****************** * stepSimulation * ******************/ /** * Compute the acceleration on a body at `my_pos` due to the `N` bodies in the array `other_planets`. */ __device__ glm::vec3 accelerate(int N, int iSelf, glm::vec3 this_planet, const glm::vec3 *other_planets) { // TODO: Compute the acceleration on `my_pos` due to: // * The star at the origin (with mass `starMass`) // * All of the *other* planets (with mass `planetMass`) // Return the sum of all of these contributions. // HINT: You may want to write a helper function that will compute the acceleration at // a single point due to a single other mass. Be careful that you protect against // division by very small numbers. // HINT: Use Newtonian gravitational acceleration: // G M // g = ----- // r^2 // where: // * G is the universal gravitational constant (already defined for you) // * M is the mass of the other object // * r is the distance between this object and the other object return glm::vec3(0.0f); } /** * For each of the `N` bodies, update its acceleration. * Compute the total instantaneous acceleration using `accelerate`, then store that into `acc`. */ __global__ void kernUpdateAcc(int N, float dt, const glm::vec3 *pos, glm::vec3 *acc) { // TODO: implement updateAccArray. // This function body runs once on each CUDA thread. // To avoid race conditions, each instance should only write ONE value to `acc`! } /** * For each of the `N` bodies, update its velocity, then update its position, using a * simple Euler integration scheme. Acceleration must be updated before calling this kernel. */ __global__ void kernUpdateVelPos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel, const glm::vec3 *acc) { // TODO: implement updateVelocityPosition } /** * Step the entire N-body simulation by `dt` seconds. */ void Nbody::stepSimulation(float dt) { // TODO: Using the CUDA kernels you wrote above, write a function that // calls the kernels to perform a full simulation step. } void Nbody::endSimulation() { cudaFree(dev_acc); cudaFree(dev_vel); cudaFree(dev_pos); }
6a9b864c72014d6002e1d53f77906623c5db8bab.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/cudf_test_fixtures.h> #include <tests/utilities/cudf_test_utils.cuh> #include <cudf.h> #include <thrust/device_vector.h> #include <gtest/gtest.h> #include <cstdlib> #include <iostream> #include <vector> // This is the main test feature template <class T> struct ReplaceTest : public GdfTest { std::vector<T> replace_column; std::vector<T> old_values_column; std::vector<T> new_values_column; gdf_col_pointer gdf_replace_column; gdf_col_pointer gdf_old_values_column; gdf_col_pointer gdf_new_values_column; gdf_column* gdf_raw_replace_column; gdf_column* gdf_raw_old_values_column; gdf_column* gdf_raw_new_values_column; ReplaceTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~ReplaceTest() { } /* --------------------------------------------------------------------------* * @brief Initializes the input columns with the given values. * * @param replace_column_list The original values * @param old_values_column_list The values that will be replaced * @param new_values_column_list The new values * @param print Optionally print the set of columns for debug * -------------------------------------------------------------------------*/ void create_input(const std::initializer_list<T> &replace_column_list, const std::initializer_list<T> &old_values_column_list, const std::initializer_list<T> &new_values_column_list, bool print = false) { replace_column = replace_column_list; old_values_column = old_values_column_list; new_values_column = new_values_column_list; gdf_replace_column = create_gdf_column(replace_column); gdf_old_values_column = create_gdf_column(old_values_column); gdf_new_values_column = create_gdf_column(new_values_column); gdf_raw_replace_column = gdf_replace_column.get(); gdf_raw_old_values_column = gdf_old_values_column.get(); gdf_raw_new_values_column = gdf_new_values_column.get(); if(print) { std::cout << "replace column(s) created. Size: " << replace_column.size() << std::endl; print_vector(replace_column); std::cout << "\n"; } } /* --------------------------------------------------------------------------*/ /** * @brief Computes a reference solution * * @param print Option to print the solution for debug * * @returns A vector of 'T' with the old values replaced */ /* ----------------------------------------------------------------------------*/ std::vector<T> compute_reference_solution(bool print = false) { std::vector<T> reference_result(replace_column); std::vector<bool> isReplaced(reference_result.size(), false); for(size_t i = 0; i < old_values_column.size(); i++) { size_t k = 0; auto pred = [&, this] (T element) { bool toBeReplaced = false; if(!isReplaced[k]) { toBeReplaced = (element == this->old_values_column[i]); isReplaced[k] = toBeReplaced; } ++k; return toBeReplaced; }; std::replace_if(reference_result.begin(), reference_result.end(), pred, new_values_column[i]); } if(print) { std::cout << "Reference result size: " << reference_result.size() << std::endl; print_vector(reference_result); std::cout << "\n"; } return reference_result; } /* --------------------------------------------------------------------------*/ /** * @brief Replaces the values in a column given a map of old values to be replaced * and new values with the libgdf functions * * @param print Option to print the result computed by the libgdf function * * @returns A vector of 'T' with the old values replaced */ /* ----------------------------------------------------------------------------*/ std::vector<T> compute_gdf_result(bool print = false, gdf_error expected_result = GDF_SUCCESS) { gdf_error result_error{GDF_SUCCESS}; gdf_error status = gdf_find_and_replace_all(gdf_raw_replace_column, gdf_raw_old_values_column, gdf_raw_new_values_column); EXPECT_EQ(expected_result, result_error) << "The gdf order by function did not complete successfully"; // If the expected result was not GDF_SUCCESS, then this test was testing for a // specific error condition, in which case we return imediately and do not do // any further work on the output if(GDF_SUCCESS != expected_result){ return std::vector<T>(); } size_t output_size = gdf_raw_replace_column->size; std::vector<T> host_result(output_size); EXPECT_EQ(hipMemcpy(host_result.data(), gdf_raw_replace_column->data, output_size * sizeof(T), hipMemcpyDeviceToHost), hipSuccess); if(print){ std::cout << "GDF result size: " << host_result.size() << std::endl; print_vector(host_result); std::cout << "\n"; } return host_result; } }; using Types = testing::Types<int8_t, int16_t, int, int64_t, float, double>; TYPED_TEST_CASE(ReplaceTest, Types); // This test is used for debugging purposes and is disabled by default. // The input sizes are small and has a large amount of debug printing enabled. TYPED_TEST(ReplaceTest, DISABLED_DebugTest) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {2, 6, 4, 8}, {0, 4, 2, 6}, true); auto reference_result = this->compute_reference_solution(true); auto gdf_result = this->compute_gdf_result(true); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Simple test, replacing all even gdf_new_values_column TYPED_TEST(ReplaceTest, ReplaceEvenPosition) { this->create_input({1, 2, 3, 4, 5, 6, 7, 8}, {2, 4, 6, 8}, {0, 2, 4, 6}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Similar test as ReplaceEvenPosition, but with unordered data TYPED_TEST(ReplaceTest, Unordered) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {2, 6, 4, 8}, {0, 4, 2, 6}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Testing with Empty Replace TYPED_TEST(ReplaceTest, EmptyReplace) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {}, {}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Testing with Nothing To Replace TYPED_TEST(ReplaceTest, NothingToReplace) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {10, 11, 12}, {15, 16, 17}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Testing with Empty Data TYPED_TEST(ReplaceTest, EmptyData) { this->create_input({}, {10, 11, 12}, {15, 16, 17}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Test with much larger data sets TYPED_TEST(ReplaceTest, LargeScaleReplaceTest) { const size_t DATA_SIZE = 1000000; const size_t REPLACE_SIZE = 10000; this->replace_column.resize(DATA_SIZE); for (size_t i = 0; i < DATA_SIZE; i++) { this->replace_column[i] = std::rand() % (2 * REPLACE_SIZE); } this->old_values_column.resize(REPLACE_SIZE); this->new_values_column.resize(REPLACE_SIZE); size_t count = 0; for (size_t i = 0; i < 7; i++) { for (size_t j = 0; j < REPLACE_SIZE; j += 7) { if (i + j < REPLACE_SIZE) { this->old_values_column[i + j] = count; count++; this->new_values_column[i + j] = count; } } } this->gdf_replace_column = create_gdf_column(this->replace_column); this->gdf_old_values_column = create_gdf_column(this->old_values_column); this->gdf_new_values_column = create_gdf_column(this->new_values_column); this->gdf_raw_replace_column = this->gdf_replace_column.get(); this->gdf_raw_old_values_column = this->gdf_old_values_column.get(); this->gdf_raw_new_values_column = this->gdf_new_values_column.get(); auto gdf_result = this->compute_gdf_result(); for (size_t i = 0; i < DATA_SIZE; i++) { if ((size_t)(this->replace_column[i]) < REPLACE_SIZE) { EXPECT_EQ((TypeParam)(this->replace_column[i] + 1), gdf_result[i]); } } }
6a9b864c72014d6002e1d53f77906623c5db8bab.cu
/* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Cristhian Alberto Gonzales Castillo <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/cudf_test_fixtures.h> #include <tests/utilities/cudf_test_utils.cuh> #include <cudf.h> #include <thrust/device_vector.h> #include <gtest/gtest.h> #include <cstdlib> #include <iostream> #include <vector> // This is the main test feature template <class T> struct ReplaceTest : public GdfTest { std::vector<T> replace_column; std::vector<T> old_values_column; std::vector<T> new_values_column; gdf_col_pointer gdf_replace_column; gdf_col_pointer gdf_old_values_column; gdf_col_pointer gdf_new_values_column; gdf_column* gdf_raw_replace_column; gdf_column* gdf_raw_old_values_column; gdf_column* gdf_raw_new_values_column; ReplaceTest() { // Use constant seed so the psuedo-random order is the same each time // Each time the class is constructed a new constant seed is used static size_t number_of_instantiations{0}; std::srand(number_of_instantiations++); } ~ReplaceTest() { } /* --------------------------------------------------------------------------* * @brief Initializes the input columns with the given values. * * @param replace_column_list The original values * @param old_values_column_list The values that will be replaced * @param new_values_column_list The new values * @param print Optionally print the set of columns for debug * -------------------------------------------------------------------------*/ void create_input(const std::initializer_list<T> &replace_column_list, const std::initializer_list<T> &old_values_column_list, const std::initializer_list<T> &new_values_column_list, bool print = false) { replace_column = replace_column_list; old_values_column = old_values_column_list; new_values_column = new_values_column_list; gdf_replace_column = create_gdf_column(replace_column); gdf_old_values_column = create_gdf_column(old_values_column); gdf_new_values_column = create_gdf_column(new_values_column); gdf_raw_replace_column = gdf_replace_column.get(); gdf_raw_old_values_column = gdf_old_values_column.get(); gdf_raw_new_values_column = gdf_new_values_column.get(); if(print) { std::cout << "replace column(s) created. Size: " << replace_column.size() << std::endl; print_vector(replace_column); std::cout << "\n"; } } /* --------------------------------------------------------------------------*/ /** * @brief Computes a reference solution * * @param print Option to print the solution for debug * * @returns A vector of 'T' with the old values replaced */ /* ----------------------------------------------------------------------------*/ std::vector<T> compute_reference_solution(bool print = false) { std::vector<T> reference_result(replace_column); std::vector<bool> isReplaced(reference_result.size(), false); for(size_t i = 0; i < old_values_column.size(); i++) { size_t k = 0; auto pred = [&, this] (T element) { bool toBeReplaced = false; if(!isReplaced[k]) { toBeReplaced = (element == this->old_values_column[i]); isReplaced[k] = toBeReplaced; } ++k; return toBeReplaced; }; std::replace_if(reference_result.begin(), reference_result.end(), pred, new_values_column[i]); } if(print) { std::cout << "Reference result size: " << reference_result.size() << std::endl; print_vector(reference_result); std::cout << "\n"; } return reference_result; } /* --------------------------------------------------------------------------*/ /** * @brief Replaces the values in a column given a map of old values to be replaced * and new values with the libgdf functions * * @param print Option to print the result computed by the libgdf function * * @returns A vector of 'T' with the old values replaced */ /* ----------------------------------------------------------------------------*/ std::vector<T> compute_gdf_result(bool print = false, gdf_error expected_result = GDF_SUCCESS) { gdf_error result_error{GDF_SUCCESS}; gdf_error status = gdf_find_and_replace_all(gdf_raw_replace_column, gdf_raw_old_values_column, gdf_raw_new_values_column); EXPECT_EQ(expected_result, result_error) << "The gdf order by function did not complete successfully"; // If the expected result was not GDF_SUCCESS, then this test was testing for a // specific error condition, in which case we return imediately and do not do // any further work on the output if(GDF_SUCCESS != expected_result){ return std::vector<T>(); } size_t output_size = gdf_raw_replace_column->size; std::vector<T> host_result(output_size); EXPECT_EQ(cudaMemcpy(host_result.data(), gdf_raw_replace_column->data, output_size * sizeof(T), cudaMemcpyDeviceToHost), cudaSuccess); if(print){ std::cout << "GDF result size: " << host_result.size() << std::endl; print_vector(host_result); std::cout << "\n"; } return host_result; } }; using Types = testing::Types<int8_t, int16_t, int, int64_t, float, double>; TYPED_TEST_CASE(ReplaceTest, Types); // This test is used for debugging purposes and is disabled by default. // The input sizes are small and has a large amount of debug printing enabled. TYPED_TEST(ReplaceTest, DISABLED_DebugTest) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {2, 6, 4, 8}, {0, 4, 2, 6}, true); auto reference_result = this->compute_reference_solution(true); auto gdf_result = this->compute_gdf_result(true); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Simple test, replacing all even gdf_new_values_column TYPED_TEST(ReplaceTest, ReplaceEvenPosition) { this->create_input({1, 2, 3, 4, 5, 6, 7, 8}, {2, 4, 6, 8}, {0, 2, 4, 6}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Similar test as ReplaceEvenPosition, but with unordered data TYPED_TEST(ReplaceTest, Unordered) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {2, 6, 4, 8}, {0, 4, 2, 6}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Testing with Empty Replace TYPED_TEST(ReplaceTest, EmptyReplace) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {}, {}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Testing with Nothing To Replace TYPED_TEST(ReplaceTest, NothingToReplace) { this->create_input({7, 5, 6, 3, 1, 2, 8, 4}, {10, 11, 12}, {15, 16, 17}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Testing with Empty Data TYPED_TEST(ReplaceTest, EmptyData) { this->create_input({}, {10, 11, 12}, {15, 16, 17}); auto reference_result = this->compute_reference_solution(); auto gdf_result = this->compute_gdf_result(); ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n"; // Compare the GDF and reference solutions for(size_t i = 0; i < reference_result.size(); ++i){ EXPECT_EQ(reference_result[i], gdf_result[i]); } } // Test with much larger data sets TYPED_TEST(ReplaceTest, LargeScaleReplaceTest) { const size_t DATA_SIZE = 1000000; const size_t REPLACE_SIZE = 10000; this->replace_column.resize(DATA_SIZE); for (size_t i = 0; i < DATA_SIZE; i++) { this->replace_column[i] = std::rand() % (2 * REPLACE_SIZE); } this->old_values_column.resize(REPLACE_SIZE); this->new_values_column.resize(REPLACE_SIZE); size_t count = 0; for (size_t i = 0; i < 7; i++) { for (size_t j = 0; j < REPLACE_SIZE; j += 7) { if (i + j < REPLACE_SIZE) { this->old_values_column[i + j] = count; count++; this->new_values_column[i + j] = count; } } } this->gdf_replace_column = create_gdf_column(this->replace_column); this->gdf_old_values_column = create_gdf_column(this->old_values_column); this->gdf_new_values_column = create_gdf_column(this->new_values_column); this->gdf_raw_replace_column = this->gdf_replace_column.get(); this->gdf_raw_old_values_column = this->gdf_old_values_column.get(); this->gdf_raw_new_values_column = this->gdf_new_values_column.get(); auto gdf_result = this->compute_gdf_result(); for (size_t i = 0; i < DATA_SIZE; i++) { if ((size_t)(this->replace_column[i]) < REPLACE_SIZE) { EXPECT_EQ((TypeParam)(this->replace_column[i] + 1), gdf_result[i]); } } }
fcba72cf7e961e8ab9af0a7d7ce03b80d09d4829.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file bounding_box.cu * \brief Bounding box util functions and operators * \author Joshua Zhang */ #include <hipcub/hipcub.hpp> #include "./bounding_box-inl.cuh" #include "./bounding_box-inl.h" #include "../elemwise_op_common.h" namespace mxnet { namespace op { namespace { using mshadow::Stream; using mshadow::Tensor; template <typename DType> struct TempWorkspace { size_t scores_temp_space; DType* scores; size_t scratch_space; uint8_t* scratch; size_t buffer_space; DType* buffer; size_t nms_scratch_space; uint32_t* nms_scratch; size_t indices_temp_spaces; index_t* indices; }; inline size_t ceil_div(size_t x, size_t y) { return (x + y - 1) / y; } inline size_t align(size_t x, size_t alignment) { return ceil_div(x, alignment) * alignment; } template <typename DType> __global__ void FilterAndPrepareAuxDataKernel(const DType* data, DType* out, DType* scores, index_t num_elements_per_batch, const index_t element_width, const index_t N, const float threshold, const int id_index, const int score_index, const int background_id) { index_t tid = blockIdx.x * blockDim.x + threadIdx.x; bool first_in_element = (tid % element_width == 0); index_t start_of_my_element = tid - (tid % element_width); if (tid < N) { DType my_score = data[start_of_my_element + score_index]; bool filtered_out = my_score <= threshold; if (id_index != -1 && background_id != -1) { DType my_id = data[start_of_my_element + id_index]; filtered_out = filtered_out || (my_id == background_id); } if (!filtered_out) { out[tid] = data[tid]; } else { out[tid] = -1; my_score = -1; } if (first_in_element) { index_t offset = tid / element_width; scores[offset] = my_score; } } } template <typename DType> void FilterAndPrepareAuxData(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, const TempWorkspace<DType>& workspace, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; index_t N = data.shape_.Size(); const auto blocks = ceil_div(N, n_threads); hipLaunchKernelGGL(( FilterAndPrepareAuxDataKernel), dim3(blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data.dptr_, out->dptr_, workspace.scores, data.shape_[1], data.shape_[2], N, param.valid_thresh, param.id_index, param.score_index, param.background_id); } template <bool check_topk, bool check_score, typename DType> __global__ void CompactDataKernel(const index_t* indices, const DType* source, DType* destination, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int score_index, const index_t N) { const index_t tid_start = blockIdx.x * blockDim.x + threadIdx.x; for (index_t tid = tid_start; tid < N; tid += blockDim.x * gridDim.x) { const index_t my_element = tid / element_width; const index_t my_element_in_batch = my_element % num_elements_per_batch; if (check_topk && my_element_in_batch >= topk) { destination[tid] = -1; } else { DType ret; const index_t source_element = indices[my_element]; DType score = 0; if (check_score) { score = source[source_element * element_width + score_index]; } if (score >= 0) { ret = source[source_element * element_width + tid % element_width]; } else { ret = -1; } destination[tid] = ret; } } } template <bool check_score, typename DType> void CompactData(const Tensor<gpu, 1, index_t>& indices, const Tensor<gpu, 3, DType>& source, Tensor<gpu, 3, DType>* destination, const index_t topk, const int score_index, Stream<gpu>* s) { const int n_threads = 512; const size_t max_blocks = 320; index_t N = source.shape_.Size(); const auto blocks = ::min(ceil_div(N, n_threads), max_blocks); if (topk > 0) { hipLaunchKernelGGL(( CompactDataKernel<true, check_score>) , dim3(blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } else { hipLaunchKernelGGL(( CompactDataKernel<false, check_score>) , dim3(blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } } template <typename DType> void WorkspaceForSort(const index_t num_elem, const index_t topk, const int alignment, TempWorkspace<DType>* workspace) { const size_t sort_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(num_elem, false, false); const size_t sort_topk_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(topk, false, false); workspace->scratch_space = align(::max(sort_scores_temp_space, sort_topk_scores_temp_space), alignment); } template <int encode, typename DType> __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold); template <typename DType> __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType* data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elems, const index_t start_index, const index_t topk); template <typename DType> __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch); template <typename DType> struct NMS { static constexpr int THRESHOLD = 512; void operator()(Tensor<gpu, 3, DType>* data, Tensor<gpu, 2, uint32_t>* scratch, const index_t topk, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; const index_t num_batches = data->shape_[0]; const index_t num_elements_per_batch = data->shape_[1]; const index_t element_width = data->shape_[2]; for (index_t current_start = 0; current_start < topk; current_start += THRESHOLD) { const index_t n_elems = topk - current_start; const index_t num_blocks_per_row_batch = ceil_div(n_elems, n_threads); const index_t num_blocks_per_row = num_blocks_per_row_batch * num_batches; const index_t n_blocks = THRESHOLD / (sizeof(uint32_t) * 8) * num_blocks_per_row; if (param.in_format == box_common_enum::kCorner) { hipLaunchKernelGGL(( CalculateGreedyNMSResultsKernel<box_common_enum::kCorner>) , dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } else { hipLaunchKernelGGL(( CalculateGreedyNMSResultsKernel<box_common_enum::kCenter>) , dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } hipLaunchKernelGGL(( ReduceNMSResultTriangleKernel), dim3(num_batches), dim3(THRESHOLD), 0, Stream<gpu>::GetStream(s), scratch->dptr_, data->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk); const index_t n_rest_elems = n_elems - THRESHOLD; const index_t num_rest_blocks_per_batch = ceil_div(n_rest_elems, n_threads); const index_t num_rest_blocks = num_rest_blocks_per_batch * num_batches; if (n_rest_elems > 0) { hipLaunchKernelGGL(( ReduceNMSResultRestKernel), dim3(num_rest_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data->dptr_, scratch->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk, num_rest_blocks_per_batch); } } } }; template <int encode, typename DType> __device__ __forceinline__ DType calculate_area(const DType b0, const DType b1, const DType b2, const DType b3) { DType width = b2; DType height = b3; if (encode == box_common_enum::kCorner) { width -= b0; height -= b1; } if (width < 0 || height < 0) return 0; return width * height; } template <int encode, typename DType> __device__ __forceinline__ DType calculate_intersection(const DType a0, const DType a1, const DType a2, const DType a3, const DType b0, const DType b1, const DType b2, const DType b3) { DType wx, wy; if (encode == box_common_enum::kCorner) { const DType left = a0 > b0 ? a0 : b0; const DType bottom = a1 > b1 ? a1 : b1; const DType right = a2 < b2 ? a2 : b2; const DType top = a3 < b3 ? a3 : b3; wx = right - left; wy = top - bottom; } else { const DType al = 2 * a0 - a2; const DType ar = 2 * a0 + a2; const DType bl = 2 * b0 - b2; const DType br = 2 * b0 + b2; const DType left = bl > al ? bl : al; const DType right = br < ar ? br : ar; wx = right - left; const DType ab = 2 * a1 - a3; const DType at = 2 * a1 + a3; const DType bb = 2 * b1 - b3; const DType bt = 2 * b1 + b3; const DType bottom = bb > ab ? bb : ab; const DType top = bt < at ? bt : at; wy = top - bottom; wy = wy / 4; // To compensate for both wx and wy being 2x too large } if (wx <= 0 || wy <= 0) { return 0; } else { return (wx * wy); } } template <int encode, typename DType> __launch_bounds__(512) __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold) { constexpr int max_elem_width = 20; constexpr int num_other_boxes = sizeof(uint32_t) * 8; __shared__ DType other_boxes[max_elem_width * num_other_boxes]; __shared__ DType other_boxes_areas[num_other_boxes]; const index_t my_row = blockIdx.x / num_blocks_per_row; const index_t my_block_offset_in_row = blockIdx.x % num_blocks_per_row; const index_t my_block_offset_in_batch = my_block_offset_in_row % num_blocks_per_row_batch; const index_t my_batch = (my_block_offset_in_row) / num_blocks_per_row_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + current_start + threadIdx.x; // Load other boxes const index_t offset = (my_batch * num_elements_per_batch + current_start + my_row * num_other_boxes) * element_width; for (int i = threadIdx.x; i < element_width * num_other_boxes; i += blockDim.x) { other_boxes[i] = data[offset + i]; } __syncthreads(); if (threadIdx.x < num_other_boxes) { const int other_boxes_offset = element_width * threadIdx.x; const DType their_area = calculate_area<encode>(other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); other_boxes_areas[threadIdx.x] = their_area; } __syncthreads(); if (my_element_in_batch >= topk) return; DType my_box[4]; DType my_class = -1; DType my_score = -1; const index_t my_offset = (my_batch * num_elements_per_batch + my_element_in_batch) * element_width; my_score = data[my_offset + score_index]; #pragma unroll for (int i = 0; i < 4; ++i) { my_box[i] = data[my_offset + coord_index + i]; } if (class_index != -1) { my_class = data[my_offset + class_index]; } DType my_area = calculate_area<encode>(my_box[0], my_box[1], my_box[2], my_box[3]); uint32_t ret = 0; if (my_score != -1) { #pragma unroll for (int i = 0; i < num_other_boxes; ++i) { const int other_boxes_offset = element_width * i; if ((class_index == -1 || my_class == other_boxes[other_boxes_offset + class_index]) && other_boxes[other_boxes_offset + score_index] != -1) { const DType their_area = other_boxes_areas[i]; const DType intersect = calculate_intersection<encode>(my_box[0], my_box[1], my_box[2], my_box[3], other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); if (intersect > threshold * (my_area + their_area - intersect)) { ret = ret | (1u << i); } } } } result[(my_row * num_batches + my_batch) * topk + my_element_in_batch] = ~ret; } template <typename DType> __launch_bounds__(NMS<DType>::THRESHOLD) __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType* data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk) { constexpr int n_threads = NMS<DType>::THRESHOLD; constexpr int warp_size = 32; const index_t my_batch = blockIdx.x; const index_t my_element_in_batch = threadIdx.x + start_index; const index_t my_element = my_batch * topk + my_element_in_batch; const int my_warp = threadIdx.x / warp_size; const int my_lane = threadIdx.x % warp_size; __shared__ uint32_t current_valid_boxes[n_threads / warp_size]; const uint32_t full_mask = 0xFFFFFFFF; const uint32_t my_lane_mask = 1 << my_lane; const uint32_t earlier_threads_mask = (1 << (my_lane + 1)) - 1; uint32_t valid = my_lane_mask; uint32_t valid_boxes = full_mask; uint32_t my_next_mask = my_element_in_batch < topk ? nms_results[my_element] : full_mask; #pragma unroll for (int i = 0; i < n_threads / warp_size; ++i) { uint32_t my_mask = my_next_mask; my_next_mask = (((i + 1) < n_threads / warp_size) && (my_element_in_batch < topk)) ? nms_results[(i + 1) * topk * num_batches + my_element] : full_mask; if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) { my_mask = my_mask | earlier_threads_mask; // Loop over warp_size - 1 because the last // thread does not contribute to the mask anyway #pragma unroll for (int j = 0; j < warp_size - 1; ++j) { const uint32_t mask = __shfl_sync(full_mask, valid ? my_mask : full_mask, j); valid = valid & mask; } valid_boxes = __ballot_sync(full_mask, valid); } if (my_lane == 0 && my_warp == i) { current_valid_boxes[i] = valid_boxes; } __syncthreads(); if ((my_warp > i) && (((~my_mask) & current_valid_boxes[i]) != 0)) { valid = 0; } } if (my_lane == 0) { nms_results[my_element] = valid_boxes; } if (valid == 0) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> __launch_bounds__(512) __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch) { constexpr int num_other_boxes = sizeof(uint32_t) * 8; constexpr int num_iterations = NMS<DType>::THRESHOLD / num_other_boxes; constexpr int warp_size = 32; const index_t my_block_offset_in_batch = blockIdx.x % num_blocks_per_batch; const index_t my_batch = blockIdx.x / num_blocks_per_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + start_index + NMS<DType>::THRESHOLD + threadIdx.x; const index_t my_element = my_batch * topk + my_element_in_batch; if (my_element_in_batch >= topk) return; bool valid = true; #pragma unroll for (int i = 0; i < num_iterations; ++i) { const uint32_t my_mask = nms_results[i * topk * num_batches + my_element]; const uint32_t valid_boxes = nms_results[my_batch * topk + i * warp_size + start_index]; const bool no_hit = (valid_boxes & (~my_mask)) == 0; valid = valid && no_hit; } if (!valid) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> TempWorkspace<DType> GetWorkspace(const index_t num_batch, const index_t num_elem, const int width_elem, const index_t topk, const OpContext& ctx) { TempWorkspace<DType> workspace; Stream<gpu>* s = ctx.get_stream<gpu>(); const int alignment = 128; // Get the workspace size workspace.scores_temp_space = 2 * align(num_batch * num_elem * sizeof(DType), alignment); workspace.indices_temp_spaces = 2 * align(num_batch * num_elem * sizeof(index_t), alignment); WorkspaceForSort(num_elem, topk, alignment, &workspace); // Place for a buffer workspace.buffer_space = align(num_batch * num_elem * width_elem * sizeof(DType), alignment); workspace.nms_scratch_space = align(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8) * num_batch * topk * sizeof(uint32_t), alignment); const size_t workspace_size = workspace.scores_temp_space + workspace.scratch_space + workspace.buffer_space + workspace.nms_scratch_space + workspace.indices_temp_spaces; // Obtain the memory for workspace Tensor<gpu, 1, double> scratch_memory = ctx.requested[box_nms_enum::kTempSpace].get_space_typed<gpu, 1, double>( mshadow::Shape1(ceil_div(workspace_size, sizeof(double))), s); // Populate workspace pointers workspace.scores = reinterpret_cast<DType*>(scratch_memory.dptr_); workspace.scratch = reinterpret_cast<uint8_t*>(workspace.scores) + workspace.scores_temp_space; workspace.buffer = reinterpret_cast<DType*>(workspace.scratch + workspace.scratch_space); workspace.nms_scratch = reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(workspace.buffer) + workspace.buffer_space); workspace.indices = reinterpret_cast<index_t*>(reinterpret_cast<uint8_t*>(workspace.nms_scratch) + workspace.nms_scratch_space); return workspace; } template <typename DType> __global__ void ExtractScoresKernel(const DType* data, DType* scores, const index_t N, const int element_width, const int score_index) { const index_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) { scores[tid] = data[tid * element_width + score_index]; } } template <typename DType> void CompactNMSResults(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, Tensor<gpu, 1, index_t>* indices, Tensor<gpu, 1, DType>* scores, Tensor<gpu, 1, index_t>* sorted_indices, Tensor<gpu, 1, DType>* sorted_scores, Tensor<gpu, 1, char>* scratch, const int score_index, const index_t topk, Stream<gpu>* s) { using mshadow::Shape1; constexpr int n_threads = 512; const index_t num_elements = scores->shape_.Size(); const index_t num_elements_per_batch = data.shape_[1]; const index_t num_batches = data.shape_[0]; const int element_width = data.shape_[2]; const index_t n_blocks = ceil_div(num_elements, n_threads); hipLaunchKernelGGL(( ExtractScoresKernel), dim3(n_blocks), dim3(n_threads), 0, Stream<gpu>::GetStream(s), data.dptr_, scores->dptr_, num_elements, element_width, score_index); *indices = mshadow::expr::range<index_t>(0, num_elements); for (index_t i = 0; i < num_batches; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> indices_batch( indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, DType> sorted_scores_batch( sorted_scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> sorted_indices_batch( sorted_indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<true>(*sorted_indices, data, out, topk, score_index, s); } } // namespace void BoxNMSForwardGPU_notemp(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using mshadow::Shape1; using mshadow::Shape2; using mshadow::Shape3; CHECK_NE(req[0], kAddTo) << "BoxNMS does not support kAddTo"; CHECK_NE(req[0], kWriteInplace) << "BoxNMS does not support in place computation"; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; const BoxNMSParam& param = nnvm::get<BoxNMSParam>(attrs.parsed); Stream<gpu>* s = ctx.get_stream<gpu>(); mxnet::TShape in_shape = inputs[box_nms_enum::kData].shape_; int indim = in_shape.ndim(); int num_batch = indim <= 2 ? 1 : in_shape.ProdShape(0, indim - 2); int num_elem = in_shape[indim - 2]; int width_elem = in_shape[indim - 1]; MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<gpu, 3, DType> data = inputs[box_nms_enum::kData].get_with_shape<gpu, 3, DType>( Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 3, DType> out = outputs[box_nms_enum::kOut].get_with_shape<gpu, 3, DType>( Shape3(num_batch, num_elem, width_elem), s); // Special case for topk == 0 if (param.topk == 0) { if (req[0] != kNullOp && req[0] != kWriteInplace) { out = mshadow::expr::F<mshadow_op::identity>(data); } return; } index_t topk = param.topk > 0 ? ::min(param.topk, num_elem) : num_elem; const auto& workspace = GetWorkspace<DType>(num_batch, num_elem, width_elem, topk, ctx); FilterAndPrepareAuxData(data, &out, workspace, param, s); Tensor<gpu, 1, DType> scores(workspace.scores, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, DType> sorted_scores( workspace.scores + scores.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> indices(workspace.indices, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> sorted_indices( workspace.indices + indices.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, char> scratch( reinterpret_cast<char*>(workspace.scratch), Shape1(workspace.scratch_space), s); Tensor<gpu, 3, DType> buffer(workspace.buffer, Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 2, uint32_t> nms_scratch( workspace.nms_scratch, Shape2(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8), topk * num_batch), s); indices = mshadow::expr::range<index_t>(0, num_batch * num_elem); for (index_t i = 0; i < num_batch; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> indices_batch(indices.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, DType> sorted_scores_batch( sorted_scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> sorted_indices_batch( sorted_indices.dptr_ + i * num_elem, Shape1(num_elem), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, &scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<false>(sorted_indices, out, &buffer, topk, -1, s); NMS<DType> nms; nms(&buffer, &nms_scratch, topk, param, s); CompactNMSResults(buffer, &out, &indices, &scores, &sorted_indices, &sorted_scores, &scratch, param.score_index, topk, s); // convert encoding if (param.in_format != param.out_format) { if (box_common_enum::kCenter == param.out_format) { mxnet::op::mxnet_op::Kernel<corner_to_center, gpu>::Launch( s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } else { mxnet::op::mxnet_op::Kernel<center_to_corner, gpu>::Launch( s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } } }); } void BoxNMSForwardGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; if (req[1] == kNullOp) { BoxNMSForwardGPU_notemp(attrs, ctx, inputs, req, outputs); return; } BoxNMSForward<gpu>(attrs, ctx, inputs, req, outputs); } NNVM_REGISTER_OP(_contrib_box_nms).set_attr<FCompute>("FCompute<gpu>", BoxNMSForwardGPU); NNVM_REGISTER_OP(_backward_contrib_box_nms) .set_attr<FCompute>("FCompute<gpu>", BoxNMSBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_iou).set_attr<FCompute>("FCompute<gpu>", BoxOverlapForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_box_iou) .set_attr<FCompute>("FCompute<gpu>", BoxOverlapBackward<gpu>); NNVM_REGISTER_OP(_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_encode).set_attr<FCompute>("FCompute<gpu>", BoxEncodeForward<gpu>); NNVM_REGISTER_OP(_contrib_box_decode).set_attr<FCompute>("FCompute<gpu>", BoxDecodeForward<gpu>); } // namespace op } // namespace mxnet
fcba72cf7e961e8ab9af0a7d7ce03b80d09d4829.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file bounding_box.cu * \brief Bounding box util functions and operators * \author Joshua Zhang */ #include <cub/cub.cuh> #include "./bounding_box-inl.cuh" #include "./bounding_box-inl.h" #include "../elemwise_op_common.h" namespace mxnet { namespace op { namespace { using mshadow::Stream; using mshadow::Tensor; template <typename DType> struct TempWorkspace { size_t scores_temp_space; DType* scores; size_t scratch_space; uint8_t* scratch; size_t buffer_space; DType* buffer; size_t nms_scratch_space; uint32_t* nms_scratch; size_t indices_temp_spaces; index_t* indices; }; inline size_t ceil_div(size_t x, size_t y) { return (x + y - 1) / y; } inline size_t align(size_t x, size_t alignment) { return ceil_div(x, alignment) * alignment; } template <typename DType> __global__ void FilterAndPrepareAuxDataKernel(const DType* data, DType* out, DType* scores, index_t num_elements_per_batch, const index_t element_width, const index_t N, const float threshold, const int id_index, const int score_index, const int background_id) { index_t tid = blockIdx.x * blockDim.x + threadIdx.x; bool first_in_element = (tid % element_width == 0); index_t start_of_my_element = tid - (tid % element_width); if (tid < N) { DType my_score = data[start_of_my_element + score_index]; bool filtered_out = my_score <= threshold; if (id_index != -1 && background_id != -1) { DType my_id = data[start_of_my_element + id_index]; filtered_out = filtered_out || (my_id == background_id); } if (!filtered_out) { out[tid] = data[tid]; } else { out[tid] = -1; my_score = -1; } if (first_in_element) { index_t offset = tid / element_width; scores[offset] = my_score; } } } template <typename DType> void FilterAndPrepareAuxData(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, const TempWorkspace<DType>& workspace, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; index_t N = data.shape_.Size(); const auto blocks = ceil_div(N, n_threads); FilterAndPrepareAuxDataKernel<<<blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data.dptr_, out->dptr_, workspace.scores, data.shape_[1], data.shape_[2], N, param.valid_thresh, param.id_index, param.score_index, param.background_id); } template <bool check_topk, bool check_score, typename DType> __global__ void CompactDataKernel(const index_t* indices, const DType* source, DType* destination, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int score_index, const index_t N) { const index_t tid_start = blockIdx.x * blockDim.x + threadIdx.x; for (index_t tid = tid_start; tid < N; tid += blockDim.x * gridDim.x) { const index_t my_element = tid / element_width; const index_t my_element_in_batch = my_element % num_elements_per_batch; if (check_topk && my_element_in_batch >= topk) { destination[tid] = -1; } else { DType ret; const index_t source_element = indices[my_element]; DType score = 0; if (check_score) { score = source[source_element * element_width + score_index]; } if (score >= 0) { ret = source[source_element * element_width + tid % element_width]; } else { ret = -1; } destination[tid] = ret; } } } template <bool check_score, typename DType> void CompactData(const Tensor<gpu, 1, index_t>& indices, const Tensor<gpu, 3, DType>& source, Tensor<gpu, 3, DType>* destination, const index_t topk, const int score_index, Stream<gpu>* s) { const int n_threads = 512; const size_t max_blocks = 320; index_t N = source.shape_.Size(); const auto blocks = std::min(ceil_div(N, n_threads), max_blocks); if (topk > 0) { CompactDataKernel<true, check_score> <<<blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>(indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } else { CompactDataKernel<false, check_score> <<<blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>(indices.dptr_, source.dptr_, destination->dptr_, topk, source.shape_[2], source.shape_[1], score_index, N); } } template <typename DType> void WorkspaceForSort(const index_t num_elem, const index_t topk, const int alignment, TempWorkspace<DType>* workspace) { const size_t sort_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(num_elem, false, false); const size_t sort_topk_scores_temp_space = mxnet::op::SortByKeyWorkspaceSize<DType, index_t, gpu>(topk, false, false); workspace->scratch_space = align(std::max(sort_scores_temp_space, sort_topk_scores_temp_space), alignment); } template <int encode, typename DType> __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold); template <typename DType> __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType* data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elems, const index_t start_index, const index_t topk); template <typename DType> __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch); template <typename DType> struct NMS { static constexpr int THRESHOLD = 512; void operator()(Tensor<gpu, 3, DType>* data, Tensor<gpu, 2, uint32_t>* scratch, const index_t topk, const BoxNMSParam& param, Stream<gpu>* s) { const int n_threads = 512; const index_t num_batches = data->shape_[0]; const index_t num_elements_per_batch = data->shape_[1]; const index_t element_width = data->shape_[2]; for (index_t current_start = 0; current_start < topk; current_start += THRESHOLD) { const index_t n_elems = topk - current_start; const index_t num_blocks_per_row_batch = ceil_div(n_elems, n_threads); const index_t num_blocks_per_row = num_blocks_per_row_batch * num_batches; const index_t n_blocks = THRESHOLD / (sizeof(uint32_t) * 8) * num_blocks_per_row; if (param.in_format == box_common_enum::kCorner) { CalculateGreedyNMSResultsKernel<box_common_enum::kCorner> <<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } else { CalculateGreedyNMSResultsKernel<box_common_enum::kCenter> <<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, num_elements_per_batch, param.coord_start, param.force_suppress ? -1 : param.id_index, param.score_index, param.overlap_thresh); } ReduceNMSResultTriangleKernel<<<num_batches, THRESHOLD, 0, Stream<gpu>::GetStream(s)>>>( scratch->dptr_, data->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk); const index_t n_rest_elems = n_elems - THRESHOLD; const index_t num_rest_blocks_per_batch = ceil_div(n_rest_elems, n_threads); const index_t num_rest_blocks = num_rest_blocks_per_batch * num_batches; if (n_rest_elems > 0) { ReduceNMSResultRestKernel<<<num_rest_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data->dptr_, scratch->dptr_, param.score_index, element_width, num_batches, num_elements_per_batch, current_start, topk, num_rest_blocks_per_batch); } } } }; template <int encode, typename DType> __device__ __forceinline__ DType calculate_area(const DType b0, const DType b1, const DType b2, const DType b3) { DType width = b2; DType height = b3; if (encode == box_common_enum::kCorner) { width -= b0; height -= b1; } if (width < 0 || height < 0) return 0; return width * height; } template <int encode, typename DType> __device__ __forceinline__ DType calculate_intersection(const DType a0, const DType a1, const DType a2, const DType a3, const DType b0, const DType b1, const DType b2, const DType b3) { DType wx, wy; if (encode == box_common_enum::kCorner) { const DType left = a0 > b0 ? a0 : b0; const DType bottom = a1 > b1 ? a1 : b1; const DType right = a2 < b2 ? a2 : b2; const DType top = a3 < b3 ? a3 : b3; wx = right - left; wy = top - bottom; } else { const DType al = 2 * a0 - a2; const DType ar = 2 * a0 + a2; const DType bl = 2 * b0 - b2; const DType br = 2 * b0 + b2; const DType left = bl > al ? bl : al; const DType right = br < ar ? br : ar; wx = right - left; const DType ab = 2 * a1 - a3; const DType at = 2 * a1 + a3; const DType bb = 2 * b1 - b3; const DType bt = 2 * b1 + b3; const DType bottom = bb > ab ? bb : ab; const DType top = bt < at ? bt : at; wy = top - bottom; wy = wy / 4; // To compensate for both wx and wy being 2x too large } if (wx <= 0 || wy <= 0) { return 0; } else { return (wx * wy); } } template <int encode, typename DType> __launch_bounds__(512) __global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, const index_t current_start, const index_t num_elems, const index_t num_batches, const index_t num_blocks_per_row_batch, const index_t num_blocks_per_row, const index_t topk, const index_t element_width, const index_t num_elements_per_batch, const int coord_index, const int class_index, const int score_index, const float threshold) { constexpr int max_elem_width = 20; constexpr int num_other_boxes = sizeof(uint32_t) * 8; __shared__ DType other_boxes[max_elem_width * num_other_boxes]; __shared__ DType other_boxes_areas[num_other_boxes]; const index_t my_row = blockIdx.x / num_blocks_per_row; const index_t my_block_offset_in_row = blockIdx.x % num_blocks_per_row; const index_t my_block_offset_in_batch = my_block_offset_in_row % num_blocks_per_row_batch; const index_t my_batch = (my_block_offset_in_row) / num_blocks_per_row_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + current_start + threadIdx.x; // Load other boxes const index_t offset = (my_batch * num_elements_per_batch + current_start + my_row * num_other_boxes) * element_width; for (int i = threadIdx.x; i < element_width * num_other_boxes; i += blockDim.x) { other_boxes[i] = data[offset + i]; } __syncthreads(); if (threadIdx.x < num_other_boxes) { const int other_boxes_offset = element_width * threadIdx.x; const DType their_area = calculate_area<encode>(other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); other_boxes_areas[threadIdx.x] = their_area; } __syncthreads(); if (my_element_in_batch >= topk) return; DType my_box[4]; DType my_class = -1; DType my_score = -1; const index_t my_offset = (my_batch * num_elements_per_batch + my_element_in_batch) * element_width; my_score = data[my_offset + score_index]; #pragma unroll for (int i = 0; i < 4; ++i) { my_box[i] = data[my_offset + coord_index + i]; } if (class_index != -1) { my_class = data[my_offset + class_index]; } DType my_area = calculate_area<encode>(my_box[0], my_box[1], my_box[2], my_box[3]); uint32_t ret = 0; if (my_score != -1) { #pragma unroll for (int i = 0; i < num_other_boxes; ++i) { const int other_boxes_offset = element_width * i; if ((class_index == -1 || my_class == other_boxes[other_boxes_offset + class_index]) && other_boxes[other_boxes_offset + score_index] != -1) { const DType their_area = other_boxes_areas[i]; const DType intersect = calculate_intersection<encode>(my_box[0], my_box[1], my_box[2], my_box[3], other_boxes[other_boxes_offset + coord_index + 0], other_boxes[other_boxes_offset + coord_index + 1], other_boxes[other_boxes_offset + coord_index + 2], other_boxes[other_boxes_offset + coord_index + 3]); if (intersect > threshold * (my_area + their_area - intersect)) { ret = ret | (1u << i); } } } } result[(my_row * num_batches + my_batch) * topk + my_element_in_batch] = ~ret; } template <typename DType> __launch_bounds__(NMS<DType>::THRESHOLD) __global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, DType* data, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk) { constexpr int n_threads = NMS<DType>::THRESHOLD; constexpr int warp_size = 32; const index_t my_batch = blockIdx.x; const index_t my_element_in_batch = threadIdx.x + start_index; const index_t my_element = my_batch * topk + my_element_in_batch; const int my_warp = threadIdx.x / warp_size; const int my_lane = threadIdx.x % warp_size; __shared__ uint32_t current_valid_boxes[n_threads / warp_size]; const uint32_t full_mask = 0xFFFFFFFF; const uint32_t my_lane_mask = 1 << my_lane; const uint32_t earlier_threads_mask = (1 << (my_lane + 1)) - 1; uint32_t valid = my_lane_mask; uint32_t valid_boxes = full_mask; uint32_t my_next_mask = my_element_in_batch < topk ? nms_results[my_element] : full_mask; #pragma unroll for (int i = 0; i < n_threads / warp_size; ++i) { uint32_t my_mask = my_next_mask; my_next_mask = (((i + 1) < n_threads / warp_size) && (my_element_in_batch < topk)) ? nms_results[(i + 1) * topk * num_batches + my_element] : full_mask; if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) { my_mask = my_mask | earlier_threads_mask; // Loop over warp_size - 1 because the last // thread does not contribute to the mask anyway #pragma unroll for (int j = 0; j < warp_size - 1; ++j) { const uint32_t mask = __shfl_sync(full_mask, valid ? my_mask : full_mask, j); valid = valid & mask; } valid_boxes = __ballot_sync(full_mask, valid); } if (my_lane == 0 && my_warp == i) { current_valid_boxes[i] = valid_boxes; } __syncthreads(); if ((my_warp > i) && (((~my_mask) & current_valid_boxes[i]) != 0)) { valid = 0; } } if (my_lane == 0) { nms_results[my_element] = valid_boxes; } if (valid == 0) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> __launch_bounds__(512) __global__ void ReduceNMSResultRestKernel(DType* data, const uint32_t* nms_results, const index_t score_index, const index_t element_width, const index_t num_batches, const index_t num_elements_per_batch, const index_t start_index, const index_t topk, const index_t num_blocks_per_batch) { constexpr int num_other_boxes = sizeof(uint32_t) * 8; constexpr int num_iterations = NMS<DType>::THRESHOLD / num_other_boxes; constexpr int warp_size = 32; const index_t my_block_offset_in_batch = blockIdx.x % num_blocks_per_batch; const index_t my_batch = blockIdx.x / num_blocks_per_batch; const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + start_index + NMS<DType>::THRESHOLD + threadIdx.x; const index_t my_element = my_batch * topk + my_element_in_batch; if (my_element_in_batch >= topk) return; bool valid = true; #pragma unroll for (int i = 0; i < num_iterations; ++i) { const uint32_t my_mask = nms_results[i * topk * num_batches + my_element]; const uint32_t valid_boxes = nms_results[my_batch * topk + i * warp_size + start_index]; const bool no_hit = (valid_boxes & (~my_mask)) == 0; valid = valid && no_hit; } if (!valid) { data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + score_index] = -1; } } template <typename DType> TempWorkspace<DType> GetWorkspace(const index_t num_batch, const index_t num_elem, const int width_elem, const index_t topk, const OpContext& ctx) { TempWorkspace<DType> workspace; Stream<gpu>* s = ctx.get_stream<gpu>(); const int alignment = 128; // Get the workspace size workspace.scores_temp_space = 2 * align(num_batch * num_elem * sizeof(DType), alignment); workspace.indices_temp_spaces = 2 * align(num_batch * num_elem * sizeof(index_t), alignment); WorkspaceForSort(num_elem, topk, alignment, &workspace); // Place for a buffer workspace.buffer_space = align(num_batch * num_elem * width_elem * sizeof(DType), alignment); workspace.nms_scratch_space = align(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8) * num_batch * topk * sizeof(uint32_t), alignment); const size_t workspace_size = workspace.scores_temp_space + workspace.scratch_space + workspace.buffer_space + workspace.nms_scratch_space + workspace.indices_temp_spaces; // Obtain the memory for workspace Tensor<gpu, 1, double> scratch_memory = ctx.requested[box_nms_enum::kTempSpace].get_space_typed<gpu, 1, double>( mshadow::Shape1(ceil_div(workspace_size, sizeof(double))), s); // Populate workspace pointers workspace.scores = reinterpret_cast<DType*>(scratch_memory.dptr_); workspace.scratch = reinterpret_cast<uint8_t*>(workspace.scores) + workspace.scores_temp_space; workspace.buffer = reinterpret_cast<DType*>(workspace.scratch + workspace.scratch_space); workspace.nms_scratch = reinterpret_cast<uint32_t*>(reinterpret_cast<uint8_t*>(workspace.buffer) + workspace.buffer_space); workspace.indices = reinterpret_cast<index_t*>(reinterpret_cast<uint8_t*>(workspace.nms_scratch) + workspace.nms_scratch_space); return workspace; } template <typename DType> __global__ void ExtractScoresKernel(const DType* data, DType* scores, const index_t N, const int element_width, const int score_index) { const index_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) { scores[tid] = data[tid * element_width + score_index]; } } template <typename DType> void CompactNMSResults(const Tensor<gpu, 3, DType>& data, Tensor<gpu, 3, DType>* out, Tensor<gpu, 1, index_t>* indices, Tensor<gpu, 1, DType>* scores, Tensor<gpu, 1, index_t>* sorted_indices, Tensor<gpu, 1, DType>* sorted_scores, Tensor<gpu, 1, char>* scratch, const int score_index, const index_t topk, Stream<gpu>* s) { using mshadow::Shape1; constexpr int n_threads = 512; const index_t num_elements = scores->shape_.Size(); const index_t num_elements_per_batch = data.shape_[1]; const index_t num_batches = data.shape_[0]; const int element_width = data.shape_[2]; const index_t n_blocks = ceil_div(num_elements, n_threads); ExtractScoresKernel<<<n_blocks, n_threads, 0, Stream<gpu>::GetStream(s)>>>( data.dptr_, scores->dptr_, num_elements, element_width, score_index); *indices = mshadow::expr::range<index_t>(0, num_elements); for (index_t i = 0; i < num_batches; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> indices_batch( indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, DType> sorted_scores_batch( sorted_scores->dptr_ + i * num_elements_per_batch, Shape1(topk), s); Tensor<gpu, 1, index_t> sorted_indices_batch( sorted_indices->dptr_ + i * num_elements_per_batch, Shape1(topk), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<true>(*sorted_indices, data, out, topk, score_index, s); } } // namespace void BoxNMSForwardGPU_notemp(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using mshadow::Shape1; using mshadow::Shape2; using mshadow::Shape3; CHECK_NE(req[0], kAddTo) << "BoxNMS does not support kAddTo"; CHECK_NE(req[0], kWriteInplace) << "BoxNMS does not support in place computation"; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; const BoxNMSParam& param = nnvm::get<BoxNMSParam>(attrs.parsed); Stream<gpu>* s = ctx.get_stream<gpu>(); mxnet::TShape in_shape = inputs[box_nms_enum::kData].shape_; int indim = in_shape.ndim(); int num_batch = indim <= 2 ? 1 : in_shape.ProdShape(0, indim - 2); int num_elem = in_shape[indim - 2]; int width_elem = in_shape[indim - 1]; MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<gpu, 3, DType> data = inputs[box_nms_enum::kData].get_with_shape<gpu, 3, DType>( Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 3, DType> out = outputs[box_nms_enum::kOut].get_with_shape<gpu, 3, DType>( Shape3(num_batch, num_elem, width_elem), s); // Special case for topk == 0 if (param.topk == 0) { if (req[0] != kNullOp && req[0] != kWriteInplace) { out = mshadow::expr::F<mshadow_op::identity>(data); } return; } index_t topk = param.topk > 0 ? std::min(param.topk, num_elem) : num_elem; const auto& workspace = GetWorkspace<DType>(num_batch, num_elem, width_elem, topk, ctx); FilterAndPrepareAuxData(data, &out, workspace, param, s); Tensor<gpu, 1, DType> scores(workspace.scores, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, DType> sorted_scores( workspace.scores + scores.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> indices(workspace.indices, Shape1(num_batch * num_elem), s); Tensor<gpu, 1, index_t> sorted_indices( workspace.indices + indices.MSize(), Shape1(num_batch * num_elem), s); Tensor<gpu, 1, char> scratch( reinterpret_cast<char*>(workspace.scratch), Shape1(workspace.scratch_space), s); Tensor<gpu, 3, DType> buffer(workspace.buffer, Shape3(num_batch, num_elem, width_elem), s); Tensor<gpu, 2, uint32_t> nms_scratch( workspace.nms_scratch, Shape2(NMS<DType>::THRESHOLD / (sizeof(uint32_t) * 8), topk * num_batch), s); indices = mshadow::expr::range<index_t>(0, num_batch * num_elem); for (index_t i = 0; i < num_batch; ++i) { // Sort each batch separately Tensor<gpu, 1, DType> scores_batch(scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> indices_batch(indices.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, DType> sorted_scores_batch( sorted_scores.dptr_ + i * num_elem, Shape1(num_elem), s); Tensor<gpu, 1, index_t> sorted_indices_batch( sorted_indices.dptr_ + i * num_elem, Shape1(num_elem), s); mxnet::op::SortByKey(scores_batch, indices_batch, false, &scratch, 0, 8 * sizeof(DType), &sorted_scores_batch, &sorted_indices_batch); } CompactData<false>(sorted_indices, out, &buffer, topk, -1, s); NMS<DType> nms; nms(&buffer, &nms_scratch, topk, param, s); CompactNMSResults(buffer, &out, &indices, &scores, &sorted_indices, &sorted_scores, &scratch, param.score_index, topk, s); // convert encoding if (param.in_format != param.out_format) { if (box_common_enum::kCenter == param.out_format) { mxnet::op::mxnet_op::Kernel<corner_to_center, gpu>::Launch( s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } else { mxnet::op::mxnet_op::Kernel<center_to_corner, gpu>::Launch( s, num_batch * num_elem, out.dptr_ + param.coord_start, width_elem); } } }); } void BoxNMSForwardGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mshadow::expr; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; if (req[1] == kNullOp) { BoxNMSForwardGPU_notemp(attrs, ctx, inputs, req, outputs); return; } BoxNMSForward<gpu>(attrs, ctx, inputs, req, outputs); } NNVM_REGISTER_OP(_contrib_box_nms).set_attr<FCompute>("FCompute<gpu>", BoxNMSForwardGPU); NNVM_REGISTER_OP(_backward_contrib_box_nms) .set_attr<FCompute>("FCompute<gpu>", BoxNMSBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_iou).set_attr<FCompute>("FCompute<gpu>", BoxOverlapForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_box_iou) .set_attr<FCompute>("FCompute<gpu>", BoxOverlapBackward<gpu>); NNVM_REGISTER_OP(_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingForward<gpu>); NNVM_REGISTER_OP(_backward_contrib_bipartite_matching) .set_attr<FCompute>("FCompute<gpu>", BipartiteMatchingBackward<gpu>); NNVM_REGISTER_OP(_contrib_box_encode).set_attr<FCompute>("FCompute<gpu>", BoxEncodeForward<gpu>); NNVM_REGISTER_OP(_contrib_box_decode).set_attr<FCompute>("FCompute<gpu>", BoxDecodeForward<gpu>); } // namespace op } // namespace mxnet
7e2dbfbb1d0755507858958e59196160dae0f64e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) OpenMMLab. All rights reserved #include "masked_conv2d_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void MaskedIm2colForwardCUDAKernelLauncher(const Tensor bottom_data, const Tensor mask_h_idx, const Tensor mask_w_idx, Tensor top_data, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w) { int channels = bottom_data.size(1); int height = bottom_data.size(2); int width = bottom_data.size(3); int mask_cnt = mask_h_idx.size(0); int output_size = mask_cnt * channels; at::hip::HIPGuardMasqueradingAsCUDA device_guard(bottom_data.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.scalar_type(), "MaskedIm2colLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>(); const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>(); const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>(); scalar_t *top_data_ = top_data.data_ptr<scalar_t>(); hipLaunchKernelGGL(( MaskedIm2colForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, bottom_data_, height, width, kernel_h, kernel_w, pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(hipGetLastError()); } void MaskedCol2imForwardCUDAKernelLauncher( const Tensor bottom_data, const Tensor mask_h_idx, const Tensor mask_w_idx, Tensor top_data, const int height, const int width, const int channels) { int mask_cnt = mask_h_idx.size(0); int output_size = mask_cnt * channels; at::hip::HIPGuardMasqueradingAsCUDA device_guard(bottom_data.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.scalar_type(), "MaskedCol2imLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>(); const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>(); const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>(); scalar_t *top_data_ = top_data.data_ptr<scalar_t>(); hipLaunchKernelGGL(( MaskedCol2imForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, bottom_data_, height, width, channels, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(hipGetLastError()); }
7e2dbfbb1d0755507858958e59196160dae0f64e.cu
// Copyright (c) OpenMMLab. All rights reserved #include "masked_conv2d_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void MaskedIm2colForwardCUDAKernelLauncher(const Tensor bottom_data, const Tensor mask_h_idx, const Tensor mask_w_idx, Tensor top_data, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w) { int channels = bottom_data.size(1); int height = bottom_data.size(2); int width = bottom_data.size(3); int mask_cnt = mask_h_idx.size(0); int output_size = mask_cnt * channels; at::cuda::CUDAGuard device_guard(bottom_data.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.scalar_type(), "MaskedIm2colLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>(); const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>(); const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>(); scalar_t *top_data_ = top_data.data_ptr<scalar_t>(); MaskedIm2colForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, bottom_data_, height, width, kernel_h, kernel_w, pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(cudaGetLastError()); } void MaskedCol2imForwardCUDAKernelLauncher( const Tensor bottom_data, const Tensor mask_h_idx, const Tensor mask_w_idx, Tensor top_data, const int height, const int width, const int channels) { int mask_cnt = mask_h_idx.size(0); int output_size = mask_cnt * channels; at::cuda::CUDAGuard device_guard(bottom_data.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( bottom_data.scalar_type(), "MaskedCol2imLaucherForward", ([&] { const scalar_t *bottom_data_ = bottom_data.data_ptr<scalar_t>(); const int64_t *mask_h_idx_ = mask_h_idx.data_ptr<int64_t>(); const int64_t *mask_w_idx_ = mask_w_idx.data_ptr<int64_t>(); scalar_t *top_data_ = top_data.data_ptr<scalar_t>(); MaskedCol2imForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, bottom_data_, height, width, channels, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_); })); AT_CUDA_CHECK(cudaGetLastError()); }
83615a5188cff0b6158b49138bf5b1178b569fcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_runtime.h" /** * @file ex_particle_OPENMP_seq.c * @author Michael Trotter & Matt Goodrum * @brief Particle filter implementation in C/OpenMP */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include <unistd.h> #include <fcntl.h> #include <float.h> #include "RDTimer.h" #define PI 3.1415926535897932 #define BLOCK_X 16 #define BLOCK_Y 16 #ifdef PROFILING SimplePerfSerializer* serializeTime; RDTimer* mytimer; #endif /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; const int threads_per_block = 128; /***************************** * CHECK_ERROR * Checks for CUDA errors and prints them to the screen to help with * debugging of CUDA related programming *****************************/ void check_error(hipError_t e) { if (e != hipSuccess) { printf("\nCUDA error: %s\n", hipGetErrorString(e)); exit(1); } } __device__ int findIndexSeq(double * CDF, int lengthCDF, double value) { int index = -1; int x; for(x = 0; x < lengthCDF; x++) { if(CDF[x] >= value) { index = x; break; } } if(index == -1) return lengthCDF-1; return index; } __device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) { if(endIndex < beginIndex) return -1; int middleIndex; while(endIndex > beginIndex) { middleIndex = beginIndex + ((endIndex-beginIndex)/2); if(CDF[middleIndex] >= value) { if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(CDF[middleIndex] == value && middleIndex >= 0) middleIndex--; middleIndex++; return middleIndex; } } if(CDF[middleIndex] > value) endIndex = middleIndex-1; else beginIndex = middleIndex+1; } return -1; } /***************************** * CUDA Kernel Function to replace FindIndex * param1: arrayX * param2: arrayY * param3: CDF * param4: u * param5: xj * param6: yj * param7: Nparticles *****************************/ __global__ void kernel(hipLaunchParm lp, double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){ int block_id = hipBlockIdx_x;// + hipGridDim_x * hipBlockIdx_y; int i = hipBlockDim_x * block_id + hipThreadIdx_x; if(i < Nparticles){ int index = -1; int x; for(x = 0; x < Nparticles; x++){ if(CDF[x] >= u[i]){ index = x; break; } } if(index == -1){ index = Nparticles-1; } xj[i] = arrayX[index]; yj[i] = arrayY[index]; } } /** * Takes in a double and returns an integer that approximates to that double * @return if the mantissa < .5 => return value < input value; else return value > input value */ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y] == 1){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = (int)fabs( (float) (x0 + (k-1)) ); yk = (int)fabs( (float) (y0 - 2*(k-1)) ); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * @param I The 3D matrix * @param ind The current ind array * @param numOnes The length of ind array * @return A double representing the sum */ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0; return likelihoodSum; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){ int max_size = IszX*IszY*Nfr; long long start = get_time(); //original particle centroid double xe = roundDouble(IszY/2.0); double ye = roundDouble(IszX/2.0); //expected object locations, compared to center int radius = 5; int diameter = radius*2 - 1; int * disk = (int *)malloc(diameter*diameter*sizeof(int)); memset(disk, 0, diameter*diameter*sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)malloc(countOnes*2*sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); //fprintf(stderr, "TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); //initial weights are all equal (1/Nparticles) double * weights = (double *)malloc(sizeof(double)*Nparticles); for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); //fprintf(stderr, "TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); //initial likelihood to 0.0 double * likelihood = (double *)malloc(sizeof(double)*Nparticles); double * arrayX = (double *)malloc(sizeof(double)*Nparticles); double * arrayY = (double *)malloc(sizeof(double)*Nparticles); double * xj = (double *)malloc(sizeof(double)*Nparticles); double * yj = (double *)malloc(sizeof(double)*Nparticles); double * CDF = (double *)malloc(sizeof(double)*Nparticles); //GPU copies of arrays double * arrayX_GPU; double * arrayY_GPU; double * xj_GPU; double * yj_GPU; double * CDF_GPU; int * ind = (int*)malloc(sizeof(int)*countOnes); double * u = (double *)malloc(sizeof(double)*Nparticles); double * u_GPU; //CUDA memory allocation check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles)); for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } int k; //double * Ik = (double *)malloc(sizeof(double)*IszX*IszY); int indX, indY; for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); //fprintf(stderr, "TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays)); //apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction for(x = 0; x < Nparticles; x++){ arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x); arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x); } //particle filter likelihood long long error = get_time(); //fprintf(stderr,"TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); for(x = 0; x < Nparticles; x++){ //compute the likelihood: remember our assumption is that you know // foreground and the background image intensity distribution. // Notice that we consider here a likelihood ratio, instead of // p(z|x). It is possible in this case. why? a hometask for you. //calc ind for(y = 0; y < countOnes; y++){ indX = roundDouble(arrayX[x]) + objxy[y*2 + 1]; indY = roundDouble(arrayY[x]) + objxy[y*2]; ind[y] = (int)fabs( (float) (indX*IszY*Nfr + indY*Nfr + k)); if(ind[y] >= max_size) ind[y] = 0; } likelihood[x] = calcLikelihoodSum(I, ind, countOnes); likelihood[x] = likelihood[x]/countOnes; } long long likelihood_time = get_time(); //fprintf(stderr, "TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); // update & normalize weights // using equation (63) of Arulampalam Tutorial for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); //fprintf(stderr, "TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); //fprintf(stderr, "TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); //fprintf(stderr, "TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; // estimate the object location by expected values for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } long long move_time = get_time(); //fprintf(stderr, "TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); //display(hold off for now) //pause(hold off for now) //resampling CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); //fprintf(stderr, "TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); //fprintf(stderr, "TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); long long start_copy = get_time(); #ifdef PROFILING mytimer->Reset("CPU TO GPU DATA TRANSFER"); mytimer->Start(); #endif //CUDA memory copying from CPU memory to GPU memory hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice); long long end_copy = get_time(); #ifdef PROFILING mytimer->Stop(); serializeTime->Serialize(mytimer); mytimer->Reset("GPU EXECUTION"); mytimer->Start(); #endif //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL hipLaunchKernel(kernel, dim3(num_blocks), dim3(threads_per_block ), 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); hipDeviceSynchronize(); #ifdef PROFILING mytimer->Stop(); serializeTime->Serialize(mytimer); #endif long long start_copy_back = get_time(); #ifdef PROFILING mytimer->Reset("CPU TO GPU DATA TRANSFER"); mytimer->Start(); #endif //CUDA memory copying back from GPU to CPU memory hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost); hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost); #ifdef PROFILING mytimer->Stop(); serializeTime->Serialize(mytimer); #endif long long end_copy_back = get_time(); //fprintf(stderr, "SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); //fprintf(stderr,"CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); //fprintf(stderr,"SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); //fprintf(stderr,"TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); for(x = 0; x < Nparticles; x++){ //reassign arrayX and arrayY arrayX[x] = xj[x]; arrayY[x] = yj[x]; weights[x] = 1/((double)(Nparticles)); } long long reset = get_time(); //fprintf(stderr,"TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } //CUDA freeing of memory hipFree(u_GPU); hipFree(CDF_GPU); hipFree(yj_GPU); hipFree(xj_GPU); hipFree(arrayY_GPU); hipFree(arrayX_GPU); //free memory free(disk); free(objxy); free(weights); free(likelihood); free(arrayX); free(arrayY); free(xj); free(yj); free(CDF); free(u); free(ind); } int main(int argc, char * argv[]){ char usage[] = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if(argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) { printf( "%s\n",usage ); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if( sscanf( argv[2], "%d", &IszX ) == EOF ) { printf("ERROR: dimX input is incorrect"); return 0; } if( IszX <= 0 ) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[4], "%d", &IszY ) == EOF ) { printf("ERROR: dimY input is incorrect"); return 0; } if( IszY <= 0 ) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[6], "%d", &Nfr ) == EOF ) { printf("ERROR: Number of frames input is incorrect"); return 0; } if( Nfr <= 0 ) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) { printf("ERROR: Number of particles input is incorrect"); return 0; } if( Nparticles <= 0 ) { printf("Number of particles must be > 0\n"); return 0; } #ifdef PROFILING serializeTime = new SimplePerfSerializer( argv[0] ); mytimer = new RDTimerCPU(); #endif //establish seed int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) seed[i] = (rand()%10000000)*i; //malloc matrix int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); //fprintf(stderr,"VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter particleFilter(I, IszX, IszY, Nfr, seed, Nparticles); long long endParticleFilter = get_time(); //fprintf(stderr, "PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); //fprintf(stderr, "ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); #ifdef PROFILING delete serializeTime; delete mytimer; #endif free(seed); free(I); return 0; }
83615a5188cff0b6158b49138bf5b1178b569fcc.cu
#include "hip_runtime.h" /** * @file ex_particle_OPENMP_seq.c * @author Michael Trotter & Matt Goodrum * @brief Particle filter implementation in C/OpenMP */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include <unistd.h> #include <fcntl.h> #include <float.h> #include "RDTimer.h" #define PI 3.1415926535897932 #define BLOCK_X 16 #define BLOCK_Y 16 #ifdef PROFILING SimplePerfSerializer* serializeTime; RDTimer* mytimer; #endif /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; const int threads_per_block = 128; /***************************** * CHECK_ERROR * Checks for CUDA errors and prints them to the screen to help with * debugging of CUDA related programming *****************************/ void check_error(hipError_t e) { if (e != hipSuccess) { printf("\nCUDA error: %s\n", hipGetErrorString(e)); exit(1); } } __device__ int findIndexSeq(double * CDF, int lengthCDF, double value) { int index = -1; int x; for(x = 0; x < lengthCDF; x++) { if(CDF[x] >= value) { index = x; break; } } if(index == -1) return lengthCDF-1; return index; } __device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) { if(endIndex < beginIndex) return -1; int middleIndex; while(endIndex > beginIndex) { middleIndex = beginIndex + ((endIndex-beginIndex)/2); if(CDF[middleIndex] >= value) { if(middleIndex == 0) return middleIndex; else if(CDF[middleIndex-1] < value) return middleIndex; else if(CDF[middleIndex-1] == value) { while(CDF[middleIndex] == value && middleIndex >= 0) middleIndex--; middleIndex++; return middleIndex; } } if(CDF[middleIndex] > value) endIndex = middleIndex-1; else beginIndex = middleIndex+1; } return -1; } /***************************** * CUDA Kernel Function to replace FindIndex * param1: arrayX * param2: arrayY * param3: CDF * param4: u * param5: xj * param6: yj * param7: Nparticles *****************************/ __global__ void kernel(hipLaunchParm lp, double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){ int block_id = hipBlockIdx_x;// + hipGridDim_x * hipBlockIdx_y; int i = hipBlockDim_x * block_id + hipThreadIdx_x; if(i < Nparticles){ int index = -1; int x; for(x = 0; x < Nparticles; x++){ if(CDF[x] >= u[i]){ index = x; break; } } if(index == -1){ index = Nparticles-1; } xj[i] = arrayX[index]; yj[i] = arrayY[index]; } } /** * Takes in a double and returns an integer that approximates to that double * @return if the mantissa < .5 => return value < input value; else return value > input value */ double roundDouble(double value){ int newValue = (int)(value); if(value - newValue < .5) return newValue; else return newValue++; } /** * Set values of the 3D array to a newValue if that value is equal to the testValue * @param testValue The value to be replaced * @param newValue The value to replace testValue with * @param array3D The image vector * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames */ void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue) array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue; } } } } /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * Sets values of 3D matrix using randomly generated numbers from a normal distribution * @param array3D The video to be modified * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param seed The seed array */ void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){ int x, y, z; for(x = 0; x < *dimX; x++){ for(y = 0; y < *dimY; y++){ for(z = 0; z < *dimZ; z++){ array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0)); } } } } /** * Fills a radius x radius matrix representing the disk * @param disk The pointer to the disk to be made * @param radius The radius of the disk to be made */ void strelDisk(int * disk, int radius) { int diameter = radius*2 - 1; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2)); if(distance < radius) disk[x*diameter + y] = 1; } } } /** * Dilates the provided video * @param matrix The video to be dilated * @param posX The x location of the pixel to be dilated * @param posY The y location of the pixel to be dilated * @param poxZ The z location of the pixel to be dilated * @param dimX The x dimension of the frame * @param dimY The y dimension of the frame * @param dimZ The number of frames * @param error The error radius */ void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) { int startX = posX - error; while(startX < 0) startX++; int startY = posY - error; while(startY < 0) startY++; int endX = posX + error; while(endX > dimX) endX--; int endY = posY + error; while(endY > dimY) endY--; int x,y; for(x = startX; x < endX; x++){ for(y = startY; y < endY; y++){ double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) ); if(distance < error) matrix[x*dimY*dimZ + y*dimZ + posZ] = 1; } } } /** * Dilates the target matrix using the radius as a guide * @param matrix The reference matrix * @param dimX The x dimension of the video * @param dimY The y dimension of the video * @param dimZ The z dimension of the video * @param error The error radius to be dilated * @param newMatrix The target matrix */ void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix) { int x, y, z; for(z = 0; z < dimZ; z++){ for(x = 0; x < dimX; x++){ for(y = 0; y < dimY; y++){ if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){ dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error); } } } } } /** * Fills a 2D array describing the offsets of the disk object * @param se The disk object * @param numOnes The number of ones in the disk * @param neighbors The array that will contain the offsets * @param radius The radius used for dilation */ void getneighbors(int * se, int numOnes, double * neighbors, int radius){ int x, y; int neighY = 0; int center = radius - 1; int diameter = radius*2 -1; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(se[x*diameter + y] == 1){ neighbors[neighY*2] = (int)(y - center); neighbors[neighY*2 + 1] = (int)(x - center); neighY++; } } } } /** * The synthetic video sequence we will work with here is composed of a * single moving object, circular in shape (fixed radius) * The motion here is a linear motion * the foreground intensity and the backgrounf intensity is known * the image is corrupted with zero mean Gaussian noise * @param I The video itself * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames of the video * @param seed The seed array used for number generation */ void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){ int k; int max_size = IszX*IszY*Nfr; /*get object centers*/ int x0 = (int)roundDouble(IszY/2.0); int y0 = (int)roundDouble(IszX/2.0); I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1; /*move point*/ int xk, yk, pos; for(k = 1; k < Nfr; k++){ xk = (int)fabs( (float) (x0 + (k-1)) ); yk = (int)fabs( (float) (y0 - 2*(k-1)) ); pos = yk * IszY * Nfr + xk *Nfr + k; if(pos >= max_size) pos = 0; I[pos] = 1; } /*dilate matrix*/ int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix); int x, y; for(x = 0; x < IszX; x++){ for(y = 0; y < IszY; y++){ for(k = 0; k < Nfr; k++){ I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k]; } } } free(newMatrix); /*define background, add noise*/ setIf(0, 100, I, &IszX, &IszY, &Nfr); setIf(1, 228, I, &IszX, &IszY, &Nfr); /*add noise*/ addNoise(I, &IszX, &IszY, &Nfr, seed); } /** * Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100 * @param I The 3D matrix * @param ind The current ind array * @param numOnes The length of ind array * @return A double representing the sum */ double calcLikelihoodSum(int * I, int * ind, int numOnes){ double likelihoodSum = 0.0; int y; for(y = 0; y < numOnes; y++) likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0; return likelihoodSum; } /** * Finds the first element in the CDF that is greater than or equal to the provided value and returns that index * @note This function uses sequential search * @param CDF The CDF * @param lengthCDF The length of CDF * @param value The value to be found * @return The index of value in the CDF; if value is never found, returns the last index */ int findIndex(double * CDF, int lengthCDF, double value){ int index = -1; int x; for(x = 0; x < lengthCDF; x++){ if(CDF[x] >= value){ index = x; break; } } if(index == -1){ return lengthCDF-1; } return index; } /** * The implementation of the particle filter using OpenMP for many frames * @see http://openmp.org/wp/ * @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods * @param I The video to be run * @param IszX The x dimension of the video * @param IszY The y dimension of the video * @param Nfr The number of frames * @param seed The seed array used for random number generation * @param Nparticles The number of particles to be used */ void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){ int max_size = IszX*IszY*Nfr; long long start = get_time(); //original particle centroid double xe = roundDouble(IszY/2.0); double ye = roundDouble(IszX/2.0); //expected object locations, compared to center int radius = 5; int diameter = radius*2 - 1; int * disk = (int *)malloc(diameter*diameter*sizeof(int)); memset(disk, 0, diameter*diameter*sizeof(int)); strelDisk(disk, radius); int countOnes = 0; int x, y; for(x = 0; x < diameter; x++){ for(y = 0; y < diameter; y++){ if(disk[x*diameter + y] == 1) countOnes++; } } double * objxy = (double *)malloc(countOnes*2*sizeof(double)); getneighbors(disk, countOnes, objxy, radius); long long get_neighbors = get_time(); //fprintf(stderr, "TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors)); //initial weights are all equal (1/Nparticles) double * weights = (double *)malloc(sizeof(double)*Nparticles); for(x = 0; x < Nparticles; x++){ weights[x] = 1/((double)(Nparticles)); } long long get_weights = get_time(); //fprintf(stderr, "TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights)); //initial likelihood to 0.0 double * likelihood = (double *)malloc(sizeof(double)*Nparticles); double * arrayX = (double *)malloc(sizeof(double)*Nparticles); double * arrayY = (double *)malloc(sizeof(double)*Nparticles); double * xj = (double *)malloc(sizeof(double)*Nparticles); double * yj = (double *)malloc(sizeof(double)*Nparticles); double * CDF = (double *)malloc(sizeof(double)*Nparticles); //GPU copies of arrays double * arrayX_GPU; double * arrayY_GPU; double * xj_GPU; double * yj_GPU; double * CDF_GPU; int * ind = (int*)malloc(sizeof(int)*countOnes); double * u = (double *)malloc(sizeof(double)*Nparticles); double * u_GPU; //CUDA memory allocation check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles)); check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles)); for(x = 0; x < Nparticles; x++){ arrayX[x] = xe; arrayY[x] = ye; } int k; //double * Ik = (double *)malloc(sizeof(double)*IszX*IszY); int indX, indY; for(k = 1; k < Nfr; k++){ long long set_arrays = get_time(); //fprintf(stderr, "TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays)); //apply motion model //draws sample from motion model (random walk). The only prior information //is that the object moves 2x as fast as in the y direction for(x = 0; x < Nparticles; x++){ arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x); arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x); } //particle filter likelihood long long error = get_time(); //fprintf(stderr,"TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error)); for(x = 0; x < Nparticles; x++){ //compute the likelihood: remember our assumption is that you know // foreground and the background image intensity distribution. // Notice that we consider here a likelihood ratio, instead of // p(z|x). It is possible in this case. why? a hometask for you. //calc ind for(y = 0; y < countOnes; y++){ indX = roundDouble(arrayX[x]) + objxy[y*2 + 1]; indY = roundDouble(arrayY[x]) + objxy[y*2]; ind[y] = (int)fabs( (float) (indX*IszY*Nfr + indY*Nfr + k)); if(ind[y] >= max_size) ind[y] = 0; } likelihood[x] = calcLikelihoodSum(I, ind, countOnes); likelihood[x] = likelihood[x]/countOnes; } long long likelihood_time = get_time(); //fprintf(stderr, "TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time)); // update & normalize weights // using equation (63) of Arulampalam Tutorial for(x = 0; x < Nparticles; x++){ weights[x] = weights[x] * exp(likelihood[x]); } long long exponential = get_time(); //fprintf(stderr, "TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential)); double sumWeights = 0; for(x = 0; x < Nparticles; x++){ sumWeights += weights[x]; } long long sum_time = get_time(); //fprintf(stderr, "TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time)); for(x = 0; x < Nparticles; x++){ weights[x] = weights[x]/sumWeights; } long long normalize = get_time(); //fprintf(stderr, "TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize)); xe = 0; ye = 0; // estimate the object location by expected values for(x = 0; x < Nparticles; x++){ xe += arrayX[x] * weights[x]; ye += arrayY[x] * weights[x]; } long long move_time = get_time(); //fprintf(stderr, "TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time)); printf("XE: %lf\n", xe); printf("YE: %lf\n", ye); double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) ); printf("%lf\n", distance); //display(hold off for now) //pause(hold off for now) //resampling CDF[0] = weights[0]; for(x = 1; x < Nparticles; x++){ CDF[x] = weights[x] + CDF[x-1]; } long long cum_sum = get_time(); //fprintf(stderr, "TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum)); double u1 = (1/((double)(Nparticles)))*randu(seed, 0); for(x = 0; x < Nparticles; x++){ u[x] = u1 + x/((double)(Nparticles)); } long long u_time = get_time(); //fprintf(stderr, "TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time)); long long start_copy = get_time(); #ifdef PROFILING mytimer->Reset("CPU TO GPU DATA TRANSFER"); mytimer->Start(); #endif //CUDA memory copying from CPU memory to GPU memory hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice); hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice); long long end_copy = get_time(); #ifdef PROFILING mytimer->Stop(); serializeTime->Serialize(mytimer); mytimer->Reset("GPU EXECUTION"); mytimer->Start(); #endif //Set number of threads int num_blocks = ceil((double) Nparticles/(double) threads_per_block); //KERNEL FUNCTION CALL hipLaunchKernel(kernel, dim3(num_blocks), dim3(threads_per_block ), 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles); hipDeviceSynchronize(); #ifdef PROFILING mytimer->Stop(); serializeTime->Serialize(mytimer); #endif long long start_copy_back = get_time(); #ifdef PROFILING mytimer->Reset("CPU TO GPU DATA TRANSFER"); mytimer->Start(); #endif //CUDA memory copying back from GPU to CPU memory hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost); hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost); #ifdef PROFILING mytimer->Stop(); serializeTime->Serialize(mytimer); #endif long long end_copy_back = get_time(); //fprintf(stderr, "SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy)); //fprintf(stderr,"CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back)); //fprintf(stderr,"SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back)); long long xyj_time = get_time(); //fprintf(stderr,"TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time)); for(x = 0; x < Nparticles; x++){ //reassign arrayX and arrayY arrayX[x] = xj[x]; arrayY[x] = yj[x]; weights[x] = 1/((double)(Nparticles)); } long long reset = get_time(); //fprintf(stderr,"TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset)); } //CUDA freeing of memory hipFree(u_GPU); hipFree(CDF_GPU); hipFree(yj_GPU); hipFree(xj_GPU); hipFree(arrayY_GPU); hipFree(arrayX_GPU); //free memory free(disk); free(objxy); free(weights); free(likelihood); free(arrayX); free(arrayY); free(xj); free(yj); free(CDF); free(u); free(ind); } int main(int argc, char * argv[]){ char usage[] = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>"; //check number of arguments if(argc != 9) { printf("%s\n", usage); return 0; } //check args deliminators if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) { printf( "%s\n",usage ); return 0; } int IszX, IszY, Nfr, Nparticles; //converting a string to a integer if( sscanf( argv[2], "%d", &IszX ) == EOF ) { printf("ERROR: dimX input is incorrect"); return 0; } if( IszX <= 0 ) { printf("dimX must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[4], "%d", &IszY ) == EOF ) { printf("ERROR: dimY input is incorrect"); return 0; } if( IszY <= 0 ) { printf("dimY must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[6], "%d", &Nfr ) == EOF ) { printf("ERROR: Number of frames input is incorrect"); return 0; } if( Nfr <= 0 ) { printf("number of frames must be > 0\n"); return 0; } //converting a string to a integer if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) { printf("ERROR: Number of particles input is incorrect"); return 0; } if( Nparticles <= 0 ) { printf("Number of particles must be > 0\n"); return 0; } #ifdef PROFILING serializeTime = new SimplePerfSerializer( argv[0] ); mytimer = new RDTimerCPU(); #endif //establish seed int * seed = (int *)malloc(sizeof(int)*Nparticles); int i; for(i = 0; i < Nparticles; i++) seed[i] = (rand()%10000000)*i; //malloc matrix int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr); long long start = get_time(); //call video sequence videoSequence(I, IszX, IszY, Nfr, seed); long long endVideoSequence = get_time(); //fprintf(stderr,"VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence)); //call particle filter particleFilter(I, IszX, IszY, Nfr, seed, Nparticles); long long endParticleFilter = get_time(); //fprintf(stderr, "PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter)); //fprintf(stderr, "ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter)); #ifdef PROFILING delete serializeTime; delete mytimer; #endif free(seed); free(I); return 0; }
ee7caa187acd2e53190f9cb09f91fc522ace6569.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" #include <math.h> __global__ void log_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = logf(dy[i]); } }
ee7caa187acd2e53190f9cb09f91fc522ace6569.cu
extern "C" #include <math.h> __global__ void log_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = logf(dy[i]); } }
2185f9e4977dc90525cb712664e97a09c98f4209.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector __global__ void saxpy(int * a, int * b, int * c) { // Determine our unique global thread ID, so we know which element to process int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < N ) // Make sure we don't do more work than we have data! c[tid] = 2 * a[tid] + b[tid]; } int main() { int *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector // Allocate memory hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); // Initialize memory for( int i = 0; i < N; ++i ) { a[i] = 2; b[i] = 1; c[i] = 0; } int threads_per_block = 128; int number_of_blocks = (N / threads_per_block) + 1; hipLaunchKernelGGL(( saxpy) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c ); hipDeviceSynchronize(); // Wait for the GPU to finish // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); // Free all our allocated memory hipFree( a ); hipFree( b ); hipFree( c ); }
2185f9e4977dc90525cb712664e97a09c98f4209.cu
#include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector __global__ void saxpy(int * a, int * b, int * c) { // Determine our unique global thread ID, so we know which element to process int tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < N ) // Make sure we don't do more work than we have data! c[tid] = 2 * a[tid] + b[tid]; } int main() { int *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector // Allocate memory cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); // Initialize memory for( int i = 0; i < N; ++i ) { a[i] = 2; b[i] = 1; c[i] = 0; } int threads_per_block = 128; int number_of_blocks = (N / threads_per_block) + 1; saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c ); cudaDeviceSynchronize(); // Wait for the GPU to finish // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); // Free all our allocated memory cudaFree( a ); cudaFree( b ); cudaFree( c ); }
faf729a97b99c1892684c386320795234bf6d197.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <ostream> #include <fstream> #include <sys/time.h> #include <time.h> using namespace std; #define CASENAME "test30" #define BLOCKSIZEX 192 #define BLOCKSIZEY 1 #define UBLOCKSIZEX 32 #define UBLOCKSIZEY 10 #define HALO 2 #define XDIM 576 #define YDIM 165 #define OBSTD 20 //.f #define OBSTX 122 //.f #define OBSTY 72 //.f #define TMAX 10000 #define MAXIT 1000 #define MAXRES 0.001 #define RE 100 #define UMAX 1.f #define BETA 0.01f //beta = 1/c^2 #define DTAU 0.001f #define DT 0.05f #define DT1 0.05f #define MAXIT1 200 #define STARTRAMP 200 #define ENDRAMP 1000 #define CONV 1 //1:UDS float timestep(int t) { float dt = DT; if(t<STARTRAMP) dt = DT1; else if(t>=STARTRAMP && t<ENDRAMP) dt = DT1-(DT1-DT)/(ENDRAMP-STARTRAMP)*(t-STARTRAMP); if(t == STARTRAMP) cout<<"Ramping down time step from: "<<DT1<<endl; else if(t == ENDRAMP) cout<<"Finished ramping down time step to: "<<dt<<endl; return dt; } float iterations(int t) { int it = MAXIT; if(t<STARTRAMP) it = MAXIT1; else if(t>=STARTRAMP && t<ENDRAMP) it = MAXIT1-(MAXIT1-MAXIT)/(ENDRAMP-STARTRAMP)*(t-STARTRAMP); if(t == STARTRAMP) cout<<"Ramping up max iterations from: "<<it<<endl; else if(t == ENDRAMP) cout<<"Finished ramping up max iterations to: "<<it<<endl; return it; } int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-2*HALO)*0.5f; float result = -1.0f*(((1.0f-(x-HALO)/radius))*((1.0f-(x-HALO)/radius))-1.0f); return (result); // return 1.f; } void AllocateArray(float ****f,int x,int y) { float ***array = new float **[4]; for(int i = 0;i<4;i++) { array[i] = new float *[x]; for(int j = 0;j<x;j++) { array[i][j] = new float [y]; for(int k = 0;k<y;k++) array[i][j][k] = 0.f; } } *f = array; } void DeallocateArray(float ***f,int x) { for(int i = 0;i<4;i++) { for(int j = 0;j<x;j++) delete [] f[i][j]; delete [] f[i]; } delete [] f; } void WriteResults(ostream &output, float *u, float *v, float *p) { output<<"VARIABLES = \"X\",\"Y\",\"u\",\"v\",\"p\"\n"; output<<"ZONE F=POINT, I="<<XDIM-HALO*2<<", J="<<YDIM-HALO*2<<endl; float dx = 1; float dy = 1; float uval,vval,pval; for(int j = HALO; j<YDIM-HALO; j++){ for(int i = HALO; i<XDIM-HALO; i++) { float xc = 0.5f*dx+(i)*dx; float yc = 0.5f*dy+(j)*dy; uval = 0.5f*(u[i+j*XDIM]+u[i-1 +j*XDIM]); vval = 0.5f*(v[i+j*XDIM]+v[i+(j-1)*XDIM]); pval = p[i+j*XDIM]; if(xc>OBSTX && xc<OBSTX+OBSTD && yc>OBSTY && yc<OBSTY+OBSTD) { uval = 0.f; vval = 0.f; pval = 0.f; } output<<xc<<", "<<yc<<", "<<uval<<", "<<vval<<", "<<pval<<endl; } } } void WriteResiduals(ostream &output, float *Res) { for(int i = 0; i<TMAX; i++) output<<i<<", "<<sqrt(Res[i])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl; } void WriteInputs(ostream &output) { output<<"Domain size: \t"<<XDIM<<"x"<<YDIM<<endl; output<<"Halo size: \t"<<HALO<<endl; output<<"Target residual: \t"<<MAXRES<<endl; output<<"Pseudo time step size: \t"<<DTAU<<endl; output<<"Maximum iterations: \t"<<MAXIT<<endl; output<<"Real time step size: \t"<<DT<<endl; output<<"Maximum time steps: \t"<<TMAX<<endl; output<<"Re: \t"<<RE<<endl; output<<"uMax: \t"<<UMAX<<endl; string scheme; if(CONV == 0) scheme = "CDS "; if(CONV == 1) scheme = "UDS "; if(CONV == 2) scheme = "Hybrid"; if(CONV == 3) scheme = "QUICK "; output<<"Convective discretization: \t"<<scheme<<endl; } __global__ void ACM_U_Shared(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float* uC, float* vC, float* uD, float* vD, float nu, float dt, int it, int t, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int tx = threadIdx.x+1; int ty = threadIdx.y+1; __shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2]; __shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2]; //if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){ if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){ if(threadIdx.x == 0){ u[0][ty] = uB[ x-1+ y *pitch]; v[0][ty] = vB[ x-1+ y *pitch]; if(threadIdx.y == blockDim.y-1){ u[0][ty+1] = uB[ x-1+ (y+1)*pitch]; } } if(threadIdx.x == blockDim.x-1){ u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch]; v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch]; } if(threadIdx.y == 0){ u[tx][0] = uB[ x+ (y-1)*pitch]; v[tx][0] = vB[ x+ (y-1)*pitch]; if(threadIdx.x == blockDim.x-1){ v[tx+1][0] = vB[ x+1+(y-1)*pitch]; } } if(threadIdx.y == blockDim.y-1){ u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch]; v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch]; } u[tx][ty] = uB[ x+ (y )*pitch]; v[tx][ty] = vB[ x+ (y )*pitch]; } syncthreads(); //if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){ if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){ float Ae,Aw,An,As; float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn; float Fe,Fw,Fn,Fs; float De,Dw,Dn,Ds; float B_Ue,B_Vn; float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; Ae = dy; Aw = dy; An = dx; As = dx; Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae; Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw; Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An; Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As; De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy; if(CONV == 1){ AE_Ue = max(-Fe,0.f)+De; AW_Ue = max( Fw,0.f)+Dw; AN_Ue = max(-Fn,0.f)+Dn; AS_Ue = max( Fs,0.f)+Ds; AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds; } else if(CONV == 2){ AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f)); AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f)); AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f)); AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f)); AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue; } B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]); //B_Ue-=( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/DT; uA[ x + y *pitch] =(((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ] +AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1] -AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy) //-(-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/dt -( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/dt //-( u[tx ][ty ]-uC[x +(y )*pitch])/dt )*DTAU)///(1.f+3.f*DTAU/dt) + u[tx ][ty ]; // uA[ x + y *pitch] =(((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ] // +AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1] // -AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy) // +(uC[x +(y )*pitch])/DT // + u[tx ][ty ]/DTAU // ))/(1.f/DT+1.f/DTAU); Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae; Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw; Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An; Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As; De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy; if(CONV == 1){ AE_Vn = max(-Fe,0.f)+De; AW_Vn = max( Fw,0.f)+Dw; AN_Vn = max(-Fn,0.f)+Dn; AS_Vn = max( Fs,0.f)+Ds; AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds; } else if(CONV == 2){ AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f)); AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f)); AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f)); AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f)); AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn; } B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]); //B_Vn-=( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/DT; vA[ x + y *pitch] =(((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ] +AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1] -AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy) //-(-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/dt -( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/dt //-( v[tx ][ty ]-vC[x +(y )*pitch])/dt )*DTAU)///(1.f+3.f*DTAU/dt) + v[tx ][ty ]; // vA[ x + y *pitch] =(((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ] // +AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1] // -AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy) // +(vC[x +(y )*pitch])/DT // + v[tx ][ty ]/DTAU // ))/(1.f/DT+1.f/DTAU); // if(y < HALO) // { // vA[ x + y *pitch] = 0.f; // uA[ x + y *pitch] = u[tx ][ty+1]; // } // if(y > YDIM-HALO-2) // { // vA[ x + y *pitch] = 0.f; // uA[ x + y *pitch] = u[tx ][ty-1]; // } // if(y > YDIM-HALO-3) // vA[ x + y *pitch] = 0.f; // if(x < HALO) // { // uA[ x + y *pitch] = UMAX; // vA[ x + y *pitch] = 0.f; // } // if(x > XDIM-HALO-2) // { // uA[ x + y *pitch] = u[tx-1][ty ]; // vA[ x + y *pitch] = v[tx-1][ty ]; // } // if(x > XDIM-HALO-3) // uA[ x + y *pitch] = u[tx-1][ty ]; // // if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD) // uA[ x + y *pitch] = 0.f; // if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD) // uA[ x + y *pitch] = 0.f; // if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD) // vA[ x + y *pitch] = 0.f; // if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD) // vA[ x + y *pitch] = 0.f; // // if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1) // vA[ x + y *pitch] = -v[tx-1][ty ]; // if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1) // vA[ x + y *pitch] = -v[tx+1][ty ]; // if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){ // if(t < 200) uA[ x + y *pitch] = u[tx][ty-1]; // else // uA[ x + y *pitch] = -u[tx ][ty-1]; // } // if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1) // uA[ x + y *pitch] = -u[tx ][ty+1]; } if(x == 0 && y == 0) Res[t] = 0.f; } __global__ void ACM_BC(float* u, float* v, int t, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; if(y < HALO) { v[ x + y *pitch] = 0.f; u[ x + y *pitch] = -u[x +(y+1)*pitch]; } if(y > YDIM-HALO-2) { v[ x + y *pitch] = 0.f; u[ x + y *pitch] = -u[x +(y-1)*pitch]; } if(y > YDIM-HALO-3) v[ x + y *pitch] = 0.f; if(x < HALO) { u[ x + y *pitch] = UMAX*PoisProf(y); v[ x + y *pitch] = 0.f; } if(x > XDIM-HALO-2) { u[ x + y *pitch] = u[XDIM-HALO-2+y*pitch]; v[ x + y *pitch] = v[XDIM-HALO-2+y*pitch]; } // if(x > XDIM-HALO-3) // u[ x + y *pitch] = u[x-1+y*pitch]; if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD) u[ x + y *pitch] = 0.f; if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD) u[ x + y *pitch] = 0.f; if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD) v[ x + y *pitch] = 0.f; if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD) v[ x + y *pitch] = 0.f; if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1) v[ x + y *pitch] = -v[x-1+y*pitch]; if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1) v[ x + y *pitch] = -v[x+1+y*pitch]; if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){ if(t < 500) u[ x + y *pitch] = u[x+(y-1)*pitch]; else u[ x + y *pitch] = -u[x+(y-1)*pitch]; } if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1) u[ x + y *pitch] = -u[x+(y+1)*pitch]; } __global__ void ACM_P(float* pA, float* Res, float* uA, float* vA, float* pB, float *pC, float dt, int it, int t, size_t pitch, float *uD) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; __shared__ float sumRes[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; if(x > HALO-1 && x < XDIM-HALO && y > HALO-1 && y < YDIM-HALO){ float res = (dy*(uA[ x + y *pitch]-uA[ x-1+ y *pitch]) +dx*(vA[ x + y *pitch]-vA[ x +(y-1)*pitch]))/(dx*dy); pA[ x + y *pitch] = -(res/BETA)*DTAU+pB[ x + y *pitch]; if(x > HALO && x < XDIM-HALO-2 && y > HALO && y < YDIM-HALO-2 && abs(res/UMAX)>MAXRES && !(x>= OBSTX && y>=OBSTY && x<OBSTX+OBSTD && y<OBSTY+OBSTD)){ check[0] = 1; sumRes[threadIdx.x]=1.f; } else sumRes[threadIdx.x]=0.f; } else{ sumRes[threadIdx.x]=0.f; } syncthreads(); if(check[0] == 1){ //reduction for residual int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumRes[threadIdx.x] += sumRes[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&Res[t],sumRes[0]); } } } __global__ void ACM_VelTransfer(float* uD, float* vD, float* uA, float* vA, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; uD[ x + y *pitch] = uA[ x + y *pitch]; vD[ x + y *pitch] = vA[ x + y *pitch]; } __global__ void ACM_Forces(float *FX, float *FY, float* uD, float* vD, float* pA, float nu, int t, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; __shared__ float sumFX[BLOCKSIZEX],sumFY[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; sumFX[threadIdx.x] = 0.f; sumFY[threadIdx.x] = 0.f; syncthreads(); float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; //forces on bottom wall if(x >= OBSTX && x<OBSTX+OBSTD && y == OBSTY){ check[0] = 1; sumFY[threadIdx.x] += dx*pA[ x +(y-1)*pitch]; if(x<OBSTX+OBSTD-1) sumFX[threadIdx.x] -= dx*nu*2.f*uD[ x +(y )*pitch]/dy; } //forces on top wall if(x >= OBSTX && x<OBSTX+OBSTD && y == OBSTY+OBSTD-1){ check[0] = 1; sumFY[threadIdx.x] -= dx*pA[ x +(y+1)*pitch]; if(x<OBSTX+OBSTD-1) sumFX[threadIdx.x] -= dx*nu*2.f*uD[ x +(y )*pitch]/dy; } //forces on left wall if(y >= OBSTY && y<OBSTY+OBSTD && x == OBSTX){ check[0] = 1; sumFX[threadIdx.x] += dy*pA[ x-1+(y )*pitch]; if(y<OBSTY+OBSTD-1) sumFY[threadIdx.x] -= dy*nu*2.f*vD[ x +(y )*pitch]/dx; } //forces on right wall if(y >= OBSTY && y<OBSTY+OBSTD && x == OBSTX+OBSTD-1){ check[0] = 1; sumFX[threadIdx.x] -= dy*pA[ x+1+(y )*pitch]; if(y<OBSTY+OBSTD-1) sumFY[threadIdx.x] -= dy*nu*2.f*vD[ x +(y )*pitch]/dx; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint]; sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumFX[0]); atomicAdd(&FY[t],sumFY[0]); } } } __global__ void ACM_Forces1(float *FX_intm, float *FY_intm, float* uD, float* vD, float* pA, float nu, int t, size_t pitch, float *test) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int xcoord = x+OBSTX; int ycoord = y+OBSTY; __shared__ float sumFX[OBSTD],sumFY[OBSTD]; sumFX[threadIdx.x] = 0.f; sumFY[threadIdx.x] = 0.f; syncthreads(); float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; //forces on bottom wall if(y == 0){ sumFY[threadIdx.x] += dx*pA[ xcoord +(ycoord-1)*pitch]; sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy; } //forces on top wall if(y == OBSTD-1){ sumFY[threadIdx.x] -= dx*pA[ xcoord +(ycoord+1)*pitch]; sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy; } //forces on left wall if(x == 0){ sumFX[threadIdx.x] += dy*pA[ xcoord-1+(ycoord )*pitch]; sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dy; } //forces on right wall if(x == OBSTD-1){ sumFX[threadIdx.x] -= dy*pA[ xcoord+1+(ycoord )*pitch]; sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dx; } syncthreads(); //reduction for force // int nTotalThreads = blockDim.x; // while(nTotalThreads > 1){ // int halfPoint = (nTotalThreads >> 1); // if(threadIdx.x < halfPoint){ // sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint]; // sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint]; // } // syncthreads(); // nTotalThreads = halfPoint; // } float sum = 0; if(threadIdx.x == 0){ for(int i = 0; i<blockDim.x; i++) sum += sumFX[i]; } sumFX[0] = sum; sum = 0; if(threadIdx.x == 0){ for(int i = 0; i<blockDim.x; i++) sum += sumFY[i]; } sumFY[0] = sum; if(threadIdx.x == 0){ FX_intm[y] = sumFX[0]; FY_intm[y] = sumFY[0]; } } __global__ void ACM_Forces2(float *FX, float *FY, float *FX_intm, float *FY_intm, int t, size_t pitch, float *test) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; //int xcoord = x+OBSTX; //int ycoord = y+OBSTY; __shared__ float sumFX[OBSTD],sumFY[OBSTD]; sumFX[threadIdx.y] = 0.f; sumFY[threadIdx.y] = 0.f; syncthreads(); sumFX[threadIdx.y] = FX_intm[threadIdx.y]; sumFY[threadIdx.y] = FY_intm[threadIdx.y]; //test[xcoord+ycoord*pitch] = 1.f; syncthreads(); float sum = 0; if(threadIdx.y == 0){ for(int i = 0; i<blockDim.y; i++) sum += sumFX[i]; } sumFX[0] = sum; sum = 0; if(threadIdx.y == 0){ for(int i = 0; i<blockDim.y; i++) sum += sumFY[i]; } sumFY[0] = sum; if(threadIdx.y == 0){ FX[t] = sumFX[0]; FY[t] = sumFY[0]; } } int main() { ofstream output_log,output_results,output_residual,output_vel,output_force; float nu = UMAX/RE; float Ma = UMAX*sqrt(BETA); cout<<"Ma = "<<Ma<<endl; string FileName = CASENAME; output_log.open ((FileName+".log").c_str()); output_results.open ((FileName+".dat").c_str()); output_residual.open ((FileName+".res").c_str()); output_vel.open ((FileName+".vel").c_str()); output_force.open ((FileName+".frc").c_str()); //write input parameters to console and log file WriteInputs(cout); WriteInputs(output_log); //allocate and initialize arrays float *u[4],*v[4],*p[4],*Res,*FX,*FY; float *FX_intm, *FY_intm; float *test; for(int i = 0; i<4; i++){ u[i] = (float *)malloc(XDIM*YDIM*sizeof(float)); v[i] = (float *)malloc(XDIM*YDIM*sizeof(float)); p[i] = (float *)malloc(XDIM*YDIM*sizeof(float)); } Res = (float *)malloc(TMAX*sizeof(float)); FX = (float *)malloc(TMAX*sizeof(float)); FY = (float *)malloc(TMAX*sizeof(float)); //initialize host memory for(int i = 0; i<4; i++){ for(int j = 0; j<XDIM*YDIM; j++){ u[i][j] = UMAX; v[i][j] = 0.f; p[i][j] = 0.f; } } for(int j = 0; j<TMAX; j++){ Res[j] = 0.f; FX[j] = 0.f; FY[j] = 0.f; } //size_t memsize, memsize2; size_t pitch = 2; while(pitch<XDIM) pitch=pitch*2; pitch *= sizeof(float);//pitch*sizeof(float); size_t pitch_e = pitch/sizeof(float); cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl; dim3 u_threads(UBLOCKSIZEX, UBLOCKSIZEY,1); dim3 u_grid (((XDIM+UBLOCKSIZEX-1)/UBLOCKSIZEX),((YDIM+UBLOCKSIZEY-1)/UBLOCKSIZEY),1); dim3 threads(BLOCKSIZEX, BLOCKSIZEY,1); dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1); dim3 f1_threads (OBSTD,1,1); dim3 f1_grid (1,OBSTD,1); dim3 f2_threads (1,OBSTD,1); dim3 f2_grid (1,1,1); hipStream_t compute; hipStream_t transfer; hipStreamCreate(&compute); hipStreamCreate(&transfer); float *u_d[4],*v_d[4],*p_d[4], *Res_d, *FX_d, *FY_d; for(int i = 0; i<4; i++){ hipMalloc((void **) &u_d[i], pitch_e*YDIM*sizeof(float)); hipMalloc((void **) &v_d[i], pitch_e*YDIM*sizeof(float)); hipMalloc((void **) &p_d[i], pitch_e*YDIM*sizeof(float)); } hipMalloc((void **) &Res_d, TMAX*sizeof(float)); hipMalloc((void **) &FX_d, TMAX*sizeof(float)); hipMalloc((void **) &FY_d, TMAX*sizeof(float)); for(int i = 0; i<4; i++){ hipMemcpy2D(u_d[i],pitch,u[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,hipMemcpyHostToDevice); hipMemcpy2D(v_d[i],pitch,v[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,hipMemcpyHostToDevice); hipMemcpy2D(p_d[i],pitch,p[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,hipMemcpyHostToDevice); } hipMalloc((void **) &test, pitch_e*YDIM*sizeof(float)); hipMalloc((void **) &FX_intm, int(OBSTD)*sizeof(float)); hipMalloc((void **) &FY_intm, int(OBSTD)*sizeof(float)); hipMemcpy(Res_d,Res,TMAX*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(FX_d,FX,TMAX*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(FY_d,FY,TMAX*sizeof(float),hipMemcpyHostToDevice); hipFuncSetCacheConfig(ACM_U_Shared,hipFuncCachePreferShared); int A,B,C,D; int its; A = 0; B = 1; C = 2; D = 3; struct timeval tdr0,tdr1; double restime; hipDeviceSynchronize(); gettimeofday (&tdr0,NULL); float dt,maxit; //time loop for(int t = 0; t<TMAX; t++){ dt = timestep(t); maxit = iterations(t); cout<<dt<<","; //for(int it = 0; it<MAXIT; it++){ for(int it = 0; it<maxit; it++){ swap(A,B); //if(it > 0) hipLaunchKernelGGL(( ACM_U_Shared), dim3(u_grid),dim3(u_threads),0,compute, u_d[A],v_d[A],Res_d,u_d[B],v_d[B],p_d[B],u_d[C],v_d[C],u_d[D],v_d[D],nu,dt,it,t,pitch_e); hipLaunchKernelGGL(( ACM_BC), dim3(grid),dim3(threads),0,compute, u_d[A],v_d[A],t,pitch_e); hipLaunchKernelGGL(( ACM_P), dim3(grid),dim3(threads),0,compute, p_d[A],Res_d,u_d[A],v_d[A],p_d[B],p_d[C],dt,it,t,pitch_e,u_d[D]); hipDeviceSynchronize(); hipMemcpyAsync(&Res[t],&Res_d[t],sizeof(float),hipMemcpyDeviceToHost,compute); if(it > 1){ //if(sqrt(Res[t])/float((XDIM-2*HALO)*(YDIM-2*HALO)) < MAXRES || it == MAXIT-1){ if(Res[t]<1 || it == maxit-1){ its = it; it = maxit; } } }//end iteration //ACM_Forces<<<grid,threads,0,compute>>>(FX_d,FY_d,u_d[A],v_d[A],p_d[A],nu,t,pitch_e); hipLaunchKernelGGL(( ACM_Forces1), dim3(f1_grid),dim3(f1_threads),0,compute, FX_intm,FY_intm,u_d[A],v_d[A],p_d[A],nu,t,pitch_e,test); hipLaunchKernelGGL(( ACM_Forces2), dim3(f2_grid),dim3(f2_threads),0,compute, FX_d,FY_d,FX_intm,FY_intm,t,pitch_e,test); hipMemcpyAsync(&FX[t],&FX_d[t],sizeof(float),hipMemcpyDeviceToHost,compute); hipMemcpyAsync(&FY[t],&FY_d[t],sizeof(float),hipMemcpyDeviceToHost,compute); if(t%1000==0 && t>0) cout<<"finished time step "<<t<<endl; hipDeviceSynchronize(); //output_residual<<t<<", "<<its<<", "<<sqrt(Res[t])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl; output_residual<<t<<", "<<its<<", "<<Res[t]<<endl; output_force<<t<<", "<<FX[t]/0.5f<<", "<<FY[t]/0.5f<<endl; swap(C,D); swap(C,A); hipDeviceSynchronize(); //cout<<A<<", "<<B<<" "<<C<<", "<<D<<endl; }//end time loop hipDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; Nodes = XDIM*YDIM; cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MNUPS)\n"; for(int i = 0; i<4; i++){ for(int j = 0; j<XDIM*YDIM; j++){ u[i][j] = 1000.f; v[i][j] = 1000.f; p[i][j] = 1000.f; } } //Copy results from device to host for(int i = 0; i<4; i++){ hipMemcpy2D(u[i],XDIM*sizeof(float),u_d[i],pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost); hipMemcpy2D(v[i],XDIM*sizeof(float),v_d[i],pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost); hipMemcpy2D(p[i],XDIM*sizeof(float),p_d[i],pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost); } //hipMemcpy2D(p[0],XDIM*sizeof(float),test,pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost); hipMemcpy(Res,Res_d,TMAX*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(FX,FX_d,TMAX*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(FY,FY_d,TMAX*sizeof(float),hipMemcpyDeviceToHost); WriteResults(output_results,u[0],v[0],p[0]); // WriteResults(output_results,u[3],v[3],p[3]); //WriteForces(output_force,FX,FY); output_log.close(); output_results.close(); output_residual.close(); output_vel.close(); return 0; }
faf729a97b99c1892684c386320795234bf6d197.cu
#include <cuda.h> #include <iostream> #include <ostream> #include <fstream> #include <sys/time.h> #include <time.h> using namespace std; #define CASENAME "test30" #define BLOCKSIZEX 192 #define BLOCKSIZEY 1 #define UBLOCKSIZEX 32 #define UBLOCKSIZEY 10 #define HALO 2 #define XDIM 576 #define YDIM 165 #define OBSTD 20 //.f #define OBSTX 122 //.f #define OBSTY 72 //.f #define TMAX 10000 #define MAXIT 1000 #define MAXRES 0.001 #define RE 100 #define UMAX 1.f #define BETA 0.01f //beta = 1/c^2 #define DTAU 0.001f #define DT 0.05f #define DT1 0.05f #define MAXIT1 200 #define STARTRAMP 200 #define ENDRAMP 1000 #define CONV 1 //1:UDS float timestep(int t) { float dt = DT; if(t<STARTRAMP) dt = DT1; else if(t>=STARTRAMP && t<ENDRAMP) dt = DT1-(DT1-DT)/(ENDRAMP-STARTRAMP)*(t-STARTRAMP); if(t == STARTRAMP) cout<<"Ramping down time step from: "<<DT1<<endl; else if(t == ENDRAMP) cout<<"Finished ramping down time step to: "<<dt<<endl; return dt; } float iterations(int t) { int it = MAXIT; if(t<STARTRAMP) it = MAXIT1; else if(t>=STARTRAMP && t<ENDRAMP) it = MAXIT1-(MAXIT1-MAXIT)/(ENDRAMP-STARTRAMP)*(t-STARTRAMP); if(t == STARTRAMP) cout<<"Ramping up max iterations from: "<<it<<endl; else if(t == ENDRAMP) cout<<"Finished ramping up max iterations to: "<<it<<endl; return it; } int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-2*HALO)*0.5f; float result = -1.0f*(((1.0f-(x-HALO)/radius))*((1.0f-(x-HALO)/radius))-1.0f); return (result); // return 1.f; } void AllocateArray(float ****f,int x,int y) { float ***array = new float **[4]; for(int i = 0;i<4;i++) { array[i] = new float *[x]; for(int j = 0;j<x;j++) { array[i][j] = new float [y]; for(int k = 0;k<y;k++) array[i][j][k] = 0.f; } } *f = array; } void DeallocateArray(float ***f,int x) { for(int i = 0;i<4;i++) { for(int j = 0;j<x;j++) delete [] f[i][j]; delete [] f[i]; } delete [] f; } void WriteResults(ostream &output, float *u, float *v, float *p) { output<<"VARIABLES = \"X\",\"Y\",\"u\",\"v\",\"p\"\n"; output<<"ZONE F=POINT, I="<<XDIM-HALO*2<<", J="<<YDIM-HALO*2<<endl; float dx = 1; float dy = 1; float uval,vval,pval; for(int j = HALO; j<YDIM-HALO; j++){ for(int i = HALO; i<XDIM-HALO; i++) { float xc = 0.5f*dx+(i)*dx; float yc = 0.5f*dy+(j)*dy; uval = 0.5f*(u[i+j*XDIM]+u[i-1 +j*XDIM]); vval = 0.5f*(v[i+j*XDIM]+v[i+(j-1)*XDIM]); pval = p[i+j*XDIM]; if(xc>OBSTX && xc<OBSTX+OBSTD && yc>OBSTY && yc<OBSTY+OBSTD) { uval = 0.f; vval = 0.f; pval = 0.f; } output<<xc<<", "<<yc<<", "<<uval<<", "<<vval<<", "<<pval<<endl; } } } void WriteResiduals(ostream &output, float *Res) { for(int i = 0; i<TMAX; i++) output<<i<<", "<<sqrt(Res[i])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl; } void WriteInputs(ostream &output) { output<<"Domain size: \t"<<XDIM<<"x"<<YDIM<<endl; output<<"Halo size: \t"<<HALO<<endl; output<<"Target residual: \t"<<MAXRES<<endl; output<<"Pseudo time step size: \t"<<DTAU<<endl; output<<"Maximum iterations: \t"<<MAXIT<<endl; output<<"Real time step size: \t"<<DT<<endl; output<<"Maximum time steps: \t"<<TMAX<<endl; output<<"Re: \t"<<RE<<endl; output<<"uMax: \t"<<UMAX<<endl; string scheme; if(CONV == 0) scheme = "CDS "; if(CONV == 1) scheme = "UDS "; if(CONV == 2) scheme = "Hybrid"; if(CONV == 3) scheme = "QUICK "; output<<"Convective discretization: \t"<<scheme<<endl; } __global__ void ACM_U_Shared(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float* uC, float* vC, float* uD, float* vD, float nu, float dt, int it, int t, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int tx = threadIdx.x+1; int ty = threadIdx.y+1; __shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2]; __shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2]; //if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){ if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){ if(threadIdx.x == 0){ u[0][ty] = uB[ x-1+ y *pitch]; v[0][ty] = vB[ x-1+ y *pitch]; if(threadIdx.y == blockDim.y-1){ u[0][ty+1] = uB[ x-1+ (y+1)*pitch]; } } if(threadIdx.x == blockDim.x-1){ u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch]; v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch]; } if(threadIdx.y == 0){ u[tx][0] = uB[ x+ (y-1)*pitch]; v[tx][0] = vB[ x+ (y-1)*pitch]; if(threadIdx.x == blockDim.x-1){ v[tx+1][0] = vB[ x+1+(y-1)*pitch]; } } if(threadIdx.y == blockDim.y-1){ u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch]; v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch]; } u[tx][ty] = uB[ x+ (y )*pitch]; v[tx][ty] = vB[ x+ (y )*pitch]; } syncthreads(); //if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){ if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){ float Ae,Aw,An,As; float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn; float Fe,Fw,Fn,Fs; float De,Dw,Dn,Ds; float B_Ue,B_Vn; float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; Ae = dy; Aw = dy; An = dx; As = dx; Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae; Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw; Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An; Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As; De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy; if(CONV == 1){ AE_Ue = max(-Fe,0.f)+De; AW_Ue = max( Fw,0.f)+Dw; AN_Ue = max(-Fn,0.f)+Dn; AS_Ue = max( Fs,0.f)+Ds; AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds; } else if(CONV == 2){ AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f)); AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f)); AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f)); AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f)); AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue; } B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]); //B_Ue-=( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/DT; uA[ x + y *pitch] =(((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ] +AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1] -AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy) //-(-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/dt -( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/dt //-( u[tx ][ty ]-uC[x +(y )*pitch])/dt )*DTAU)///(1.f+3.f*DTAU/dt) + u[tx ][ty ]; // uA[ x + y *pitch] =(((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ] // +AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1] // -AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy) // +(uC[x +(y )*pitch])/DT // + u[tx ][ty ]/DTAU // ))/(1.f/DT+1.f/DTAU); Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae; Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw; Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An; Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As; De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy; if(CONV == 1){ AE_Vn = max(-Fe,0.f)+De; AW_Vn = max( Fw,0.f)+Dw; AN_Vn = max(-Fn,0.f)+Dn; AS_Vn = max( Fs,0.f)+Ds; AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds; } else if(CONV == 2){ AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f)); AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f)); AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f)); AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f)); AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn; } B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]); //B_Vn-=( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/DT; vA[ x + y *pitch] =(((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ] +AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1] -AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy) //-(-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/dt -( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/dt //-( v[tx ][ty ]-vC[x +(y )*pitch])/dt )*DTAU)///(1.f+3.f*DTAU/dt) + v[tx ][ty ]; // vA[ x + y *pitch] =(((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ] // +AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1] // -AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy) // +(vC[x +(y )*pitch])/DT // + v[tx ][ty ]/DTAU // ))/(1.f/DT+1.f/DTAU); // if(y < HALO) // { // vA[ x + y *pitch] = 0.f; // uA[ x + y *pitch] = u[tx ][ty+1]; // } // if(y > YDIM-HALO-2) // { // vA[ x + y *pitch] = 0.f; // uA[ x + y *pitch] = u[tx ][ty-1]; // } // if(y > YDIM-HALO-3) // vA[ x + y *pitch] = 0.f; // if(x < HALO) // { // uA[ x + y *pitch] = UMAX; // vA[ x + y *pitch] = 0.f; // } // if(x > XDIM-HALO-2) // { // uA[ x + y *pitch] = u[tx-1][ty ]; // vA[ x + y *pitch] = v[tx-1][ty ]; // } // if(x > XDIM-HALO-3) // uA[ x + y *pitch] = u[tx-1][ty ]; // // if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD) // uA[ x + y *pitch] = 0.f; // if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD) // uA[ x + y *pitch] = 0.f; // if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD) // vA[ x + y *pitch] = 0.f; // if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD) // vA[ x + y *pitch] = 0.f; // // if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1) // vA[ x + y *pitch] = -v[tx-1][ty ]; // if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1) // vA[ x + y *pitch] = -v[tx+1][ty ]; // if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){ // if(t < 200) uA[ x + y *pitch] = u[tx][ty-1]; // else // uA[ x + y *pitch] = -u[tx ][ty-1]; // } // if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1) // uA[ x + y *pitch] = -u[tx ][ty+1]; } if(x == 0 && y == 0) Res[t] = 0.f; } __global__ void ACM_BC(float* u, float* v, int t, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; if(y < HALO) { v[ x + y *pitch] = 0.f; u[ x + y *pitch] = -u[x +(y+1)*pitch]; } if(y > YDIM-HALO-2) { v[ x + y *pitch] = 0.f; u[ x + y *pitch] = -u[x +(y-1)*pitch]; } if(y > YDIM-HALO-3) v[ x + y *pitch] = 0.f; if(x < HALO) { u[ x + y *pitch] = UMAX*PoisProf(y); v[ x + y *pitch] = 0.f; } if(x > XDIM-HALO-2) { u[ x + y *pitch] = u[XDIM-HALO-2+y*pitch]; v[ x + y *pitch] = v[XDIM-HALO-2+y*pitch]; } // if(x > XDIM-HALO-3) // u[ x + y *pitch] = u[x-1+y*pitch]; if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD) u[ x + y *pitch] = 0.f; if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD) u[ x + y *pitch] = 0.f; if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD) v[ x + y *pitch] = 0.f; if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD) v[ x + y *pitch] = 0.f; if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1) v[ x + y *pitch] = -v[x-1+y*pitch]; if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1) v[ x + y *pitch] = -v[x+1+y*pitch]; if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){ if(t < 500) u[ x + y *pitch] = u[x+(y-1)*pitch]; else u[ x + y *pitch] = -u[x+(y-1)*pitch]; } if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1) u[ x + y *pitch] = -u[x+(y+1)*pitch]; } __global__ void ACM_P(float* pA, float* Res, float* uA, float* vA, float* pB, float *pC, float dt, int it, int t, size_t pitch, float *uD) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; __shared__ float sumRes[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; if(x > HALO-1 && x < XDIM-HALO && y > HALO-1 && y < YDIM-HALO){ float res = (dy*(uA[ x + y *pitch]-uA[ x-1+ y *pitch]) +dx*(vA[ x + y *pitch]-vA[ x +(y-1)*pitch]))/(dx*dy); pA[ x + y *pitch] = -(res/BETA)*DTAU+pB[ x + y *pitch]; if(x > HALO && x < XDIM-HALO-2 && y > HALO && y < YDIM-HALO-2 && abs(res/UMAX)>MAXRES && !(x>= OBSTX && y>=OBSTY && x<OBSTX+OBSTD && y<OBSTY+OBSTD)){ check[0] = 1; sumRes[threadIdx.x]=1.f; } else sumRes[threadIdx.x]=0.f; } else{ sumRes[threadIdx.x]=0.f; } syncthreads(); if(check[0] == 1){ //reduction for residual int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumRes[threadIdx.x] += sumRes[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&Res[t],sumRes[0]); } } } __global__ void ACM_VelTransfer(float* uD, float* vD, float* uA, float* vA, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; uD[ x + y *pitch] = uA[ x + y *pitch]; vD[ x + y *pitch] = vA[ x + y *pitch]; } __global__ void ACM_Forces(float *FX, float *FY, float* uD, float* vD, float* pA, float nu, int t, size_t pitch) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; __shared__ float sumFX[BLOCKSIZEX],sumFY[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; sumFX[threadIdx.x] = 0.f; sumFY[threadIdx.x] = 0.f; syncthreads(); float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; //forces on bottom wall if(x >= OBSTX && x<OBSTX+OBSTD && y == OBSTY){ check[0] = 1; sumFY[threadIdx.x] += dx*pA[ x +(y-1)*pitch]; if(x<OBSTX+OBSTD-1) sumFX[threadIdx.x] -= dx*nu*2.f*uD[ x +(y )*pitch]/dy; } //forces on top wall if(x >= OBSTX && x<OBSTX+OBSTD && y == OBSTY+OBSTD-1){ check[0] = 1; sumFY[threadIdx.x] -= dx*pA[ x +(y+1)*pitch]; if(x<OBSTX+OBSTD-1) sumFX[threadIdx.x] -= dx*nu*2.f*uD[ x +(y )*pitch]/dy; } //forces on left wall if(y >= OBSTY && y<OBSTY+OBSTD && x == OBSTX){ check[0] = 1; sumFX[threadIdx.x] += dy*pA[ x-1+(y )*pitch]; if(y<OBSTY+OBSTD-1) sumFY[threadIdx.x] -= dy*nu*2.f*vD[ x +(y )*pitch]/dx; } //forces on right wall if(y >= OBSTY && y<OBSTY+OBSTD && x == OBSTX+OBSTD-1){ check[0] = 1; sumFX[threadIdx.x] -= dy*pA[ x+1+(y )*pitch]; if(y<OBSTY+OBSTD-1) sumFY[threadIdx.x] -= dy*nu*2.f*vD[ x +(y )*pitch]/dx; } syncthreads(); if(check[0] == 1){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint]; sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumFX[0]); atomicAdd(&FY[t],sumFY[0]); } } } __global__ void ACM_Forces1(float *FX_intm, float *FY_intm, float* uD, float* vD, float* pA, float nu, int t, size_t pitch, float *test) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int xcoord = x+OBSTX; int ycoord = y+OBSTY; __shared__ float sumFX[OBSTD],sumFY[OBSTD]; sumFX[threadIdx.x] = 0.f; sumFY[threadIdx.x] = 0.f; syncthreads(); float dx = 1.f/OBSTD; float dy = 1.f/OBSTD; //forces on bottom wall if(y == 0){ sumFY[threadIdx.x] += dx*pA[ xcoord +(ycoord-1)*pitch]; sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy; } //forces on top wall if(y == OBSTD-1){ sumFY[threadIdx.x] -= dx*pA[ xcoord +(ycoord+1)*pitch]; sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy; } //forces on left wall if(x == 0){ sumFX[threadIdx.x] += dy*pA[ xcoord-1+(ycoord )*pitch]; sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dy; } //forces on right wall if(x == OBSTD-1){ sumFX[threadIdx.x] -= dy*pA[ xcoord+1+(ycoord )*pitch]; sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dx; } syncthreads(); //reduction for force // int nTotalThreads = blockDim.x; // while(nTotalThreads > 1){ // int halfPoint = (nTotalThreads >> 1); // if(threadIdx.x < halfPoint){ // sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint]; // sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint]; // } // syncthreads(); // nTotalThreads = halfPoint; // } float sum = 0; if(threadIdx.x == 0){ for(int i = 0; i<blockDim.x; i++) sum += sumFX[i]; } sumFX[0] = sum; sum = 0; if(threadIdx.x == 0){ for(int i = 0; i<blockDim.x; i++) sum += sumFY[i]; } sumFY[0] = sum; if(threadIdx.x == 0){ FX_intm[y] = sumFX[0]; FY_intm[y] = sumFY[0]; } } __global__ void ACM_Forces2(float *FX, float *FY, float *FX_intm, float *FY_intm, int t, size_t pitch, float *test) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; //int xcoord = x+OBSTX; //int ycoord = y+OBSTY; __shared__ float sumFX[OBSTD],sumFY[OBSTD]; sumFX[threadIdx.y] = 0.f; sumFY[threadIdx.y] = 0.f; syncthreads(); sumFX[threadIdx.y] = FX_intm[threadIdx.y]; sumFY[threadIdx.y] = FY_intm[threadIdx.y]; //test[xcoord+ycoord*pitch] = 1.f; syncthreads(); float sum = 0; if(threadIdx.y == 0){ for(int i = 0; i<blockDim.y; i++) sum += sumFX[i]; } sumFX[0] = sum; sum = 0; if(threadIdx.y == 0){ for(int i = 0; i<blockDim.y; i++) sum += sumFY[i]; } sumFY[0] = sum; if(threadIdx.y == 0){ FX[t] = sumFX[0]; FY[t] = sumFY[0]; } } int main() { ofstream output_log,output_results,output_residual,output_vel,output_force; float nu = UMAX/RE; float Ma = UMAX*sqrt(BETA); cout<<"Ma = "<<Ma<<endl; string FileName = CASENAME; output_log.open ((FileName+".log").c_str()); output_results.open ((FileName+".dat").c_str()); output_residual.open ((FileName+".res").c_str()); output_vel.open ((FileName+".vel").c_str()); output_force.open ((FileName+".frc").c_str()); //write input parameters to console and log file WriteInputs(cout); WriteInputs(output_log); //allocate and initialize arrays float *u[4],*v[4],*p[4],*Res,*FX,*FY; float *FX_intm, *FY_intm; float *test; for(int i = 0; i<4; i++){ u[i] = (float *)malloc(XDIM*YDIM*sizeof(float)); v[i] = (float *)malloc(XDIM*YDIM*sizeof(float)); p[i] = (float *)malloc(XDIM*YDIM*sizeof(float)); } Res = (float *)malloc(TMAX*sizeof(float)); FX = (float *)malloc(TMAX*sizeof(float)); FY = (float *)malloc(TMAX*sizeof(float)); //initialize host memory for(int i = 0; i<4; i++){ for(int j = 0; j<XDIM*YDIM; j++){ u[i][j] = UMAX; v[i][j] = 0.f; p[i][j] = 0.f; } } for(int j = 0; j<TMAX; j++){ Res[j] = 0.f; FX[j] = 0.f; FY[j] = 0.f; } //size_t memsize, memsize2; size_t pitch = 2; while(pitch<XDIM) pitch=pitch*2; pitch *= sizeof(float);//pitch*sizeof(float); size_t pitch_e = pitch/sizeof(float); cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl; dim3 u_threads(UBLOCKSIZEX, UBLOCKSIZEY,1); dim3 u_grid (((XDIM+UBLOCKSIZEX-1)/UBLOCKSIZEX),((YDIM+UBLOCKSIZEY-1)/UBLOCKSIZEY),1); dim3 threads(BLOCKSIZEX, BLOCKSIZEY,1); dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1); dim3 f1_threads (OBSTD,1,1); dim3 f1_grid (1,OBSTD,1); dim3 f2_threads (1,OBSTD,1); dim3 f2_grid (1,1,1); cudaStream_t compute; cudaStream_t transfer; cudaStreamCreate(&compute); cudaStreamCreate(&transfer); float *u_d[4],*v_d[4],*p_d[4], *Res_d, *FX_d, *FY_d; for(int i = 0; i<4; i++){ cudaMalloc((void **) &u_d[i], pitch_e*YDIM*sizeof(float)); cudaMalloc((void **) &v_d[i], pitch_e*YDIM*sizeof(float)); cudaMalloc((void **) &p_d[i], pitch_e*YDIM*sizeof(float)); } cudaMalloc((void **) &Res_d, TMAX*sizeof(float)); cudaMalloc((void **) &FX_d, TMAX*sizeof(float)); cudaMalloc((void **) &FY_d, TMAX*sizeof(float)); for(int i = 0; i<4; i++){ cudaMemcpy2D(u_d[i],pitch,u[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,cudaMemcpyHostToDevice); cudaMemcpy2D(v_d[i],pitch,v[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,cudaMemcpyHostToDevice); cudaMemcpy2D(p_d[i],pitch,p[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,cudaMemcpyHostToDevice); } cudaMalloc((void **) &test, pitch_e*YDIM*sizeof(float)); cudaMalloc((void **) &FX_intm, int(OBSTD)*sizeof(float)); cudaMalloc((void **) &FY_intm, int(OBSTD)*sizeof(float)); cudaMemcpy(Res_d,Res,TMAX*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(FX_d,FX,TMAX*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(FY_d,FY,TMAX*sizeof(float),cudaMemcpyHostToDevice); cudaFuncSetCacheConfig(ACM_U_Shared,cudaFuncCachePreferShared); int A,B,C,D; int its; A = 0; B = 1; C = 2; D = 3; struct timeval tdr0,tdr1; double restime; cudaDeviceSynchronize(); gettimeofday (&tdr0,NULL); float dt,maxit; //time loop for(int t = 0; t<TMAX; t++){ dt = timestep(t); maxit = iterations(t); cout<<dt<<","; //for(int it = 0; it<MAXIT; it++){ for(int it = 0; it<maxit; it++){ swap(A,B); //if(it > 0) ACM_U_Shared<<<u_grid,u_threads,0,compute>>>(u_d[A],v_d[A],Res_d,u_d[B],v_d[B],p_d[B],u_d[C],v_d[C],u_d[D],v_d[D],nu,dt,it,t,pitch_e); ACM_BC<<<grid,threads,0,compute>>>(u_d[A],v_d[A],t,pitch_e); ACM_P<<<grid,threads,0,compute>>>(p_d[A],Res_d,u_d[A],v_d[A],p_d[B],p_d[C],dt,it,t,pitch_e,u_d[D]); cudaDeviceSynchronize(); cudaMemcpyAsync(&Res[t],&Res_d[t],sizeof(float),cudaMemcpyDeviceToHost,compute); if(it > 1){ //if(sqrt(Res[t])/float((XDIM-2*HALO)*(YDIM-2*HALO)) < MAXRES || it == MAXIT-1){ if(Res[t]<1 || it == maxit-1){ its = it; it = maxit; } } }//end iteration //ACM_Forces<<<grid,threads,0,compute>>>(FX_d,FY_d,u_d[A],v_d[A],p_d[A],nu,t,pitch_e); ACM_Forces1<<<f1_grid,f1_threads,0,compute>>>(FX_intm,FY_intm,u_d[A],v_d[A],p_d[A],nu,t,pitch_e,test); ACM_Forces2<<<f2_grid,f2_threads,0,compute>>>(FX_d,FY_d,FX_intm,FY_intm,t,pitch_e,test); cudaMemcpyAsync(&FX[t],&FX_d[t],sizeof(float),cudaMemcpyDeviceToHost,compute); cudaMemcpyAsync(&FY[t],&FY_d[t],sizeof(float),cudaMemcpyDeviceToHost,compute); if(t%1000==0 && t>0) cout<<"finished time step "<<t<<endl; cudaDeviceSynchronize(); //output_residual<<t<<", "<<its<<", "<<sqrt(Res[t])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl; output_residual<<t<<", "<<its<<", "<<Res[t]<<endl; output_force<<t<<", "<<FX[t]/0.5f<<", "<<FY[t]/0.5f<<endl; swap(C,D); swap(C,A); cudaDeviceSynchronize(); //cout<<A<<", "<<B<<" "<<C<<", "<<D<<endl; }//end time loop cudaDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; Nodes = XDIM*YDIM; cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MNUPS)\n"; for(int i = 0; i<4; i++){ for(int j = 0; j<XDIM*YDIM; j++){ u[i][j] = 1000.f; v[i][j] = 1000.f; p[i][j] = 1000.f; } } //Copy results from device to host for(int i = 0; i<4; i++){ cudaMemcpy2D(u[i],XDIM*sizeof(float),u_d[i],pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(v[i],XDIM*sizeof(float),v_d[i],pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(p[i],XDIM*sizeof(float),p_d[i],pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost); } //cudaMemcpy2D(p[0],XDIM*sizeof(float),test,pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost); cudaMemcpy(Res,Res_d,TMAX*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(FX,FX_d,TMAX*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(FY,FY_d,TMAX*sizeof(float),cudaMemcpyDeviceToHost); WriteResults(output_results,u[0],v[0],p[0]); // WriteResults(output_results,u[3],v[3],p[3]); //WriteForces(output_force,FX,FY); output_log.close(); output_results.close(); output_residual.close(); output_vel.close(); return 0; }
a8bacc5d9fb8d0d461b667b2f8f214aaef0c6c86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_50 -cubin sconv_updat_s8_C128_K64.cu extern "C" __global__ void __launch_bounds__(128) sconv_updat_s8_C128_K64 ( unsigned int* param_Rand, float* param_F, const float* param_I, const float* param_E, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[128*8*2 + 64*8*2 + 6]; int tid = threadIdx.x; share[tid] = 1; *param_F = share[127-tid]; }
a8bacc5d9fb8d0d461b667b2f8f214aaef0c6c86.cu
/* * Copyright 2014 Nervana Systems Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // nvcc -arch sm_50 -cubin sconv_updat_s8_C128_K64.cu extern "C" __global__ void __launch_bounds__(128) sconv_updat_s8_C128_K64 ( unsigned int* param_Rand, float* param_F, const float* param_I, const float* param_E, float param_alpha, int param_flags, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_magic_RST, int param_shift_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_P, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_part_P, int param_part_Q, int param_part_PQ ) { __shared__ float share[128*8*2 + 64*8*2 + 6]; int tid = threadIdx.x; share[tid] = 1; *param_F = share[127-tid]; }
0f601add99b6c46ad92fa75eb6275bd5a4cf3bbd.hip
// !!! This is a file automatically generated by hipify!!! #ifdef __HIPCC__ #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ > 120 #define DOUBLE_PRECISION true #else #define DOUBLE_PRECISION false #endif #else #define DOUBLE_PRECISION false #endif #else #define DOUBLE_PRECISION false #endif #include <stdio.h> #include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include "numer_buffer.h" struct CastToFloat { float operator()(double value) const { return static_cast<float>(value);} }; template<typename T> NumerFloatBuffer<T>::NumerFloatBuffer() { this->h_data = new std::vector<T>(); this->d_data = new thrust::device_vector<T>(); this->_size = this->h_data->size(); } template<typename T> NumerFloatBuffer<T>::NumerFloatBuffer(unsigned long size) { this->h_data = new std::vector<T>(size, 0); this->d_data = new thrust::device_vector<T>(size, 0); this->_size = this->h_data->size(); } template<typename T> NumerFloatBuffer<T>::~NumerFloatBuffer() { delete this->h_data; delete this->d_data; } template<typename T> unsigned int NumerFloatBuffer<T>::size() { return this->_size; } template<typename T> void NumerFloatBuffer<T>::write(ErlNifEnv *env, ERL_NIF_TERM data) { ERL_NIF_TERM head; double value; long lvalue; this->h_data->clear(); this->d_data->clear(); while (enif_get_list_cell(env, data, &head, &data)) { if (enif_get_double(env, head, &value)) { this->h_data->push_back((T)value); }else if (enif_get_long(env, head, &lvalue)) { this->h_data->push_back((T)lvalue); } } this->_size = this->h_data->size(); /* if(!DOUBLE_PRECISION){ this->d_data->clear(); std::transform(this->h_data->begin(), this->h_data->end(), std::back_inserter(*(this->d_data)), CastToFloat()); } else*/ *(this->d_data) = *(this->h_data); //hipDeviceSynchronize(); } template<typename T> ERL_NIF_TERM NumerFloatBuffer<T>::toErlTerms(ErlNifEnv *env) { typename std::vector<T>::iterator iter; h_data->clear(); h_data->resize(d_data->size()); thrust::copy(d_data->begin(), d_data->end(), this->h_data->begin()); ERL_NIF_TERM retval = enif_make_list(env, 0.0f); if (h_data->size() > 0) { for (iter = h_data->end(); iter != h_data->begin();) { --iter; retval = enif_make_list_cell(env, enif_make_double(env, *iter), retval); } } return retval; } template<typename T> void NumerFloatBuffer<T>::clear() { this->h_data->clear(); this->d_data->clear(); this->_size = this->h_data->size(); } template class NumerFloatBuffer<float>; template class NumerFloatBuffer<double>;
0f601add99b6c46ad92fa75eb6275bd5a4cf3bbd.cu
#ifdef __CUDACC__ #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ > 120 #define DOUBLE_PRECISION true #else #define DOUBLE_PRECISION false #endif #else #define DOUBLE_PRECISION false #endif #else #define DOUBLE_PRECISION false #endif #include <stdio.h> #include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include "numer_buffer.h" struct CastToFloat { float operator()(double value) const { return static_cast<float>(value);} }; template<typename T> NumerFloatBuffer<T>::NumerFloatBuffer() { this->h_data = new std::vector<T>(); this->d_data = new thrust::device_vector<T>(); this->_size = this->h_data->size(); } template<typename T> NumerFloatBuffer<T>::NumerFloatBuffer(unsigned long size) { this->h_data = new std::vector<T>(size, 0); this->d_data = new thrust::device_vector<T>(size, 0); this->_size = this->h_data->size(); } template<typename T> NumerFloatBuffer<T>::~NumerFloatBuffer() { delete this->h_data; delete this->d_data; } template<typename T> unsigned int NumerFloatBuffer<T>::size() { return this->_size; } template<typename T> void NumerFloatBuffer<T>::write(ErlNifEnv *env, ERL_NIF_TERM data) { ERL_NIF_TERM head; double value; long lvalue; this->h_data->clear(); this->d_data->clear(); while (enif_get_list_cell(env, data, &head, &data)) { if (enif_get_double(env, head, &value)) { this->h_data->push_back((T)value); }else if (enif_get_long(env, head, &lvalue)) { this->h_data->push_back((T)lvalue); } } this->_size = this->h_data->size(); /* if(!DOUBLE_PRECISION){ this->d_data->clear(); std::transform(this->h_data->begin(), this->h_data->end(), std::back_inserter(*(this->d_data)), CastToFloat()); } else*/ *(this->d_data) = *(this->h_data); //cudaDeviceSynchronize(); } template<typename T> ERL_NIF_TERM NumerFloatBuffer<T>::toErlTerms(ErlNifEnv *env) { typename std::vector<T>::iterator iter; h_data->clear(); h_data->resize(d_data->size()); thrust::copy(d_data->begin(), d_data->end(), this->h_data->begin()); ERL_NIF_TERM retval = enif_make_list(env, 0.0f); if (h_data->size() > 0) { for (iter = h_data->end(); iter != h_data->begin();) { --iter; retval = enif_make_list_cell(env, enif_make_double(env, *iter), retval); } } return retval; } template<typename T> void NumerFloatBuffer<T>::clear() { this->h_data->clear(); this->d_data->clear(); this->_size = this->h_data->size(); } template class NumerFloatBuffer<float>; template class NumerFloatBuffer<double>;
536c107a5971412b786ebb9b8211723be3201c13.hip
// !!! This is a file automatically generated by hipify!!! //xfail:BOOGIE_ERROR //--blockDim=64 --gridDim=64 --no-inline // #include "hip/hip_runtime.h" __global__ void foo (int* p, int* q, int* r){ int a, b, c; int* d; a = 10; d = &a; d = &b; if (a > 10) { d = &c; } else { p = q; q = p; } d[1] = 200; p[100] = q[100] + 1; }
536c107a5971412b786ebb9b8211723be3201c13.cu
//xfail:BOOGIE_ERROR //--blockDim=64 --gridDim=64 --no-inline // #include "cuda.h" __global__ void foo (int* p, int* q, int* r){ int a, b, c; int* d; a = 10; d = &a; d = &b; if (a > 10) { d = &c; } else { p = q; q = p; } d[1] = 200; p[100] = q[100] + 1; }
c28c3b13b4dd4c75eefa86d7cb5fae4f086a37f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include "utils.hpp" #include <hipcub/hipcub.hpp> #include "nonzero_cuda_impl.hpp" #define THREADS_PER_BLOCK 256 namespace CudaImpl { hipError_t NonZeroCalcPrefixSumTempStorageBytes(int* prefix_counts, int number_of_blocks, size_t& temp_storage_bytes, hipStream_t stream) { temp_storage_bytes = 0; return hipcub::DeviceScan::InclusiveSum( nullptr, temp_storage_bytes, prefix_counts, prefix_counts, number_of_blocks, stream); } hipError_t NonZeroInclusivePrefixSum(void* d_temp_storage, size_t temp_storage_bytes, int* prefix_counts, int number_of_blocks, hipStream_t stream) { return hipcub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, prefix_counts, prefix_counts, number_of_blocks, stream); } __global__ void NonZeroCountEachBlockKernel(const unsigned char* x, int x_size, int* count_in_blocks) { typedef hipcub::BlockReduce<int, THREADS_PER_BLOCK, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY> BlockReduceT; __shared__ typename BlockReduceT::TempStorage temp_storage; int index = blockIdx.x * blockDim.x + threadIdx.x; int nz = 0; if (index < x_size && x[index] == 1) ++nz; int count = BlockReduceT(temp_storage).Sum(nz); if (threadIdx.x == 0) { count_in_blocks[blockIdx.x] = count; } } __global__ void NonZeroOutputPositionsKernel(const unsigned char* x, const int x_size, const int* prefix_counts, int* results) { typedef hipcub::BlockScan<int, THREADS_PER_BLOCK> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; int index = blockIdx.x * blockDim.x + threadIdx.x; int nz = 0; if (index < x_size && x[index] == 1) ++nz; int pos_in_block = 0; BlockScanT(temp_storage).InclusiveSum(nz, pos_in_block); int result_position = ((blockIdx.x == 0) ? 0 : prefix_counts[blockIdx.x - 1]) + pos_in_block - nz; if (index < x_size && x[index] == 1) { results[result_position] = index; // printf("result_position %d index %d\n"); } } void NoneZeroCudaImpl(const unsigned char* input, const int inputSize, int* output, hipStream_t stream) { const int threadCount = THREADS_PER_BLOCK; int blockSize = threadCount; int gridSize = (inputSize + blockSize - 1) / blockSize; int index = inputSize / 2; size_t tempStorageBytes = 0; hipLaunchKernelGGL(( NonZeroCountEachBlockKernel), dim3(gridSize), dim3(blockSize), 0, stream, input, inputSize, output + index); NonZeroCalcPrefixSumTempStorageBytes(output + index, gridSize, tempStorageBytes, stream); NonZeroInclusivePrefixSum(output + index + gridSize, tempStorageBytes, output + index, gridSize, stream); hipLaunchKernelGGL(( NonZeroOutputPositionsKernel), dim3(gridSize), dim3(blockSize), 0, stream, input, inputSize, output + index, output); hipError_t cudastatus = hipGetLastError(); CHECK_ASSERT(cudastatus == hipSuccess, "launch failed: %s\n", hipGetErrorString(cudastatus)); } }
c28c3b13b4dd4c75eefa86d7cb5fae4f086a37f0.cu
#include <cmath> #include <stdio.h> #include <cassert> #include <iostream> #include "utils.hpp" #include <cub/cub.cuh> #include "nonzero_cuda_impl.hpp" #define THREADS_PER_BLOCK 256 namespace CudaImpl { cudaError_t NonZeroCalcPrefixSumTempStorageBytes(int* prefix_counts, int number_of_blocks, size_t& temp_storage_bytes, cudaStream_t stream) { temp_storage_bytes = 0; return cub::DeviceScan::InclusiveSum( nullptr, temp_storage_bytes, prefix_counts, prefix_counts, number_of_blocks, stream); } cudaError_t NonZeroInclusivePrefixSum(void* d_temp_storage, size_t temp_storage_bytes, int* prefix_counts, int number_of_blocks, cudaStream_t stream) { return cub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, prefix_counts, prefix_counts, number_of_blocks, stream); } __global__ void NonZeroCountEachBlockKernel(const unsigned char* x, int x_size, int* count_in_blocks) { typedef cub::BlockReduce<int, THREADS_PER_BLOCK, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY> BlockReduceT; __shared__ typename BlockReduceT::TempStorage temp_storage; int index = blockIdx.x * blockDim.x + threadIdx.x; int nz = 0; if (index < x_size && x[index] == 1) ++nz; int count = BlockReduceT(temp_storage).Sum(nz); if (threadIdx.x == 0) { count_in_blocks[blockIdx.x] = count; } } __global__ void NonZeroOutputPositionsKernel(const unsigned char* x, const int x_size, const int* prefix_counts, int* results) { typedef cub::BlockScan<int, THREADS_PER_BLOCK> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; int index = blockIdx.x * blockDim.x + threadIdx.x; int nz = 0; if (index < x_size && x[index] == 1) ++nz; int pos_in_block = 0; BlockScanT(temp_storage).InclusiveSum(nz, pos_in_block); int result_position = ((blockIdx.x == 0) ? 0 : prefix_counts[blockIdx.x - 1]) + pos_in_block - nz; if (index < x_size && x[index] == 1) { results[result_position] = index; // printf("result_position %d index %d\n"); } } void NoneZeroCudaImpl(const unsigned char* input, const int inputSize, int* output, cudaStream_t stream) { const int threadCount = THREADS_PER_BLOCK; int blockSize = threadCount; int gridSize = (inputSize + blockSize - 1) / blockSize; int index = inputSize / 2; size_t tempStorageBytes = 0; NonZeroCountEachBlockKernel<<<gridSize, blockSize, 0, stream>>>(input, inputSize, output + index); NonZeroCalcPrefixSumTempStorageBytes(output + index, gridSize, tempStorageBytes, stream); NonZeroInclusivePrefixSum(output + index + gridSize, tempStorageBytes, output + index, gridSize, stream); NonZeroOutputPositionsKernel<<<gridSize, blockSize, 0, stream>>>(input, inputSize, output + index, output); cudaError_t cudastatus = cudaGetLastError(); CHECK_ASSERT(cudastatus == cudaSuccess, "launch failed: %s\n", cudaGetErrorString(cudastatus)); } }
e22171d2c345208bfc9c63762f8cdec1c039cc70.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../inc/RayTracer_DynamicSkeleton.cuh" #include "../inc/RayTracer_Dynamic_Hitable.cuh" // Define // ... // Typedef // ... // Static Function Prototype // table __global__ static void config_sphere_setCenter (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_sphere_setRadius (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_trimesh_setPoint_0 (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_trimesh_setPoint_1 (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_trimesh_setPoint_2 (int8_t *ret, void *object, uint8_t *data, uint32_t size); // __global__ static void interact_aabb_addHitable (int *ret, void *object, void* *list, uint32_t size); // __global__ static void interact_aabb_rmHitable (int *ret, void *object, void* *list, uint32_t size); // skeleton Dynamic_CUDA_constructTypeSkeleton(sphere, SceneObject_Hitable, Hitable_Sphere); Dynamic_CUDA_constructTypeSkeleton(trimesh, SceneObject_Hitable, Hitable_Trimesh); // Dynamic_CUDA_constructTypeSkeleton(aabb, SceneObject_Hitable, Hitable_AABB); Dynamic_CUDA_constructTypeConfigLinker(sphere_setCenter, config_sphere_setCenter); Dynamic_CUDA_constructTypeConfigLinker(sphere_setRadius, config_sphere_setRadius); Dynamic_CUDA_constructTypeConfigLinker(trimesh_setPoint_0, config_trimesh_setPoint_0); Dynamic_CUDA_constructTypeConfigLinker(trimesh_setPoint_1, config_trimesh_setPoint_1); Dynamic_CUDA_constructTypeConfigLinker(trimesh_setPoint_2, config_trimesh_setPoint_2); // Dynamic_CUDA_constructTypeInteractLinker(aabb_addHitable, interact_aabb_addHitable); // Dynamic_CUDA_constructTypeInteractLinker(aabb_rmHitable, interact_aabb_rmHitable); // cuda linker function __global__ static void hitable_setMaterial (SceneObject_Hitable *hitable, Material *material); // Static Data // ... // Operation Handling __host__ void RayTracer_Dynamic_Hitable_init(std::vector<Dynamic_ContainerType*> *type_list) { // table Dynamic_CUDA_addTypeConfigLinker(sphere, sphere_setCenter); Dynamic_CUDA_addTypeConfigLinker(sphere, sphere_setRadius); Dynamic_CUDA_addTypeConfigLinker(trimesh, trimesh_setPoint_0); Dynamic_CUDA_addTypeConfigLinker(trimesh, trimesh_setPoint_1); Dynamic_CUDA_addTypeConfigLinker(trimesh, trimesh_setPoint_2); // Dynamic_CUDA_addInteractLinker(aabb, aabb_addHitable); // Dynamic_CUDA_addInteractLinker(aabb, aabb_rmHitable); // create type Dynamic_ContainerType *type; Dynamic_CUDA_addType(sphere, sphere, type_list); Dynamic_CUDA_addType(trimesh, trimesh, type_list); // Dynamic_CUDA_addType(aabb, aabb, type_list); } __host__ void RayTracer_Dynamic_Hitable_info() { } __host__ void RayTracer_Dynamic_Hitable_del() { } __host__ error_t Dynamic_Hitable_setMaterial(SceneObject_Hitable *hitable, Material *material) { hipLaunchKernelGGL(( hitable_setMaterial) , dim3(1), dim3(1) , 0, 0, hitable, material); return ERROR_NO; } // Static Function Implementation // ... // table __global__ static void config_sphere_setCenter(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Sphere *hitable = (Hitable_Sphere*)object; double *center = (double*)data; hitable->setCenter(Vec3f(center[0], center[1], center[2])); *ret = 0; } __global__ static void config_sphere_setRadius(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Sphere *hitable = (Hitable_Sphere*)object; double radius = *((double*)data); hitable->setRadius(radius); *ret = 0; } __global__ static void config_trimesh_setPoint_0(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Trimesh *hitable = (Hitable_Trimesh*)object; double *point = (double*)data; Vec3f vec_point = Vec3f(point[0], point[1], point[2]); hitable->setPoint(vec_point, hitable->point[1], hitable->point[2]); *ret = 0; } __global__ static void config_trimesh_setPoint_1(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Trimesh *hitable = (Hitable_Trimesh*)object; double *point = (double*)data; Vec3f vec_point = Vec3f(point[0], point[1], point[2]); hitable->setPoint(hitable->point[0], vec_point, hitable->point[2]); *ret = 0; } __global__ static void config_trimesh_setPoint_2(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Trimesh *hitable = (Hitable_Trimesh*)object; double *point = (double*)data; Vec3f vec_point = Vec3f(point[0], point[1], point[2]); hitable->setPoint(hitable->point[0], hitable->point[1], vec_point); *ret = 0; } // __global__ static void interact_aabb_addHitable(int8_t *ret, void *object, void* *list, uint32_t size) { // Hitable_AABB *hitable = (Hitable_AABB*)object; // SceneObject_Hitable *child = (SceneObject_Hitable*)(list[0]); // if (!hitable->addHitable(child)) return -1; // *ret = 0; // } // __global__ static void interact_aabb_rmHitable(int8_t *ret, void *object, void* *list, uint32_t size) { // Hitable_AABB *hitable = (Hitable_AABB*)object; // SceneObject_Hitable *child = (SceneObject_Hitable*)(list[0]); // if (!hitable->rmHitable(child)) return -1; // *ret = 0; // } // cuda linker function __global__ static void hitable_setMaterial(SceneObject_Hitable *hitable, Material *material) { hitable->setMaterial(material); }
e22171d2c345208bfc9c63762f8cdec1c039cc70.cu
#include "../inc/RayTracer_DynamicSkeleton.cuh" #include "../inc/RayTracer_Dynamic_Hitable.cuh" // Define // ... // Typedef // ... // Static Function Prototype // table __global__ static void config_sphere_setCenter (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_sphere_setRadius (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_trimesh_setPoint_0 (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_trimesh_setPoint_1 (int8_t *ret, void *object, uint8_t *data, uint32_t size); __global__ static void config_trimesh_setPoint_2 (int8_t *ret, void *object, uint8_t *data, uint32_t size); // __global__ static void interact_aabb_addHitable (int *ret, void *object, void* *list, uint32_t size); // __global__ static void interact_aabb_rmHitable (int *ret, void *object, void* *list, uint32_t size); // skeleton Dynamic_CUDA_constructTypeSkeleton(sphere, SceneObject_Hitable, Hitable_Sphere); Dynamic_CUDA_constructTypeSkeleton(trimesh, SceneObject_Hitable, Hitable_Trimesh); // Dynamic_CUDA_constructTypeSkeleton(aabb, SceneObject_Hitable, Hitable_AABB); Dynamic_CUDA_constructTypeConfigLinker(sphere_setCenter, config_sphere_setCenter); Dynamic_CUDA_constructTypeConfigLinker(sphere_setRadius, config_sphere_setRadius); Dynamic_CUDA_constructTypeConfigLinker(trimesh_setPoint_0, config_trimesh_setPoint_0); Dynamic_CUDA_constructTypeConfigLinker(trimesh_setPoint_1, config_trimesh_setPoint_1); Dynamic_CUDA_constructTypeConfigLinker(trimesh_setPoint_2, config_trimesh_setPoint_2); // Dynamic_CUDA_constructTypeInteractLinker(aabb_addHitable, interact_aabb_addHitable); // Dynamic_CUDA_constructTypeInteractLinker(aabb_rmHitable, interact_aabb_rmHitable); // cuda linker function __global__ static void hitable_setMaterial (SceneObject_Hitable *hitable, Material *material); // Static Data // ... // Operation Handling __host__ void RayTracer_Dynamic_Hitable_init(std::vector<Dynamic_ContainerType*> *type_list) { // table Dynamic_CUDA_addTypeConfigLinker(sphere, sphere_setCenter); Dynamic_CUDA_addTypeConfigLinker(sphere, sphere_setRadius); Dynamic_CUDA_addTypeConfigLinker(trimesh, trimesh_setPoint_0); Dynamic_CUDA_addTypeConfigLinker(trimesh, trimesh_setPoint_1); Dynamic_CUDA_addTypeConfigLinker(trimesh, trimesh_setPoint_2); // Dynamic_CUDA_addInteractLinker(aabb, aabb_addHitable); // Dynamic_CUDA_addInteractLinker(aabb, aabb_rmHitable); // create type Dynamic_ContainerType *type; Dynamic_CUDA_addType(sphere, sphere, type_list); Dynamic_CUDA_addType(trimesh, trimesh, type_list); // Dynamic_CUDA_addType(aabb, aabb, type_list); } __host__ void RayTracer_Dynamic_Hitable_info() { } __host__ void RayTracer_Dynamic_Hitable_del() { } __host__ error_t Dynamic_Hitable_setMaterial(SceneObject_Hitable *hitable, Material *material) { hitable_setMaterial <<< 1, 1 >>> (hitable, material); return ERROR_NO; } // Static Function Implementation // ... // table __global__ static void config_sphere_setCenter(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Sphere *hitable = (Hitable_Sphere*)object; double *center = (double*)data; hitable->setCenter(Vec3f(center[0], center[1], center[2])); *ret = 0; } __global__ static void config_sphere_setRadius(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Sphere *hitable = (Hitable_Sphere*)object; double radius = *((double*)data); hitable->setRadius(radius); *ret = 0; } __global__ static void config_trimesh_setPoint_0(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Trimesh *hitable = (Hitable_Trimesh*)object; double *point = (double*)data; Vec3f vec_point = Vec3f(point[0], point[1], point[2]); hitable->setPoint(vec_point, hitable->point[1], hitable->point[2]); *ret = 0; } __global__ static void config_trimesh_setPoint_1(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Trimesh *hitable = (Hitable_Trimesh*)object; double *point = (double*)data; Vec3f vec_point = Vec3f(point[0], point[1], point[2]); hitable->setPoint(hitable->point[0], vec_point, hitable->point[2]); *ret = 0; } __global__ static void config_trimesh_setPoint_2(int8_t *ret, void *object, uint8_t *data, uint32_t size) { Hitable_Trimesh *hitable = (Hitable_Trimesh*)object; double *point = (double*)data; Vec3f vec_point = Vec3f(point[0], point[1], point[2]); hitable->setPoint(hitable->point[0], hitable->point[1], vec_point); *ret = 0; } // __global__ static void interact_aabb_addHitable(int8_t *ret, void *object, void* *list, uint32_t size) { // Hitable_AABB *hitable = (Hitable_AABB*)object; // SceneObject_Hitable *child = (SceneObject_Hitable*)(list[0]); // if (!hitable->addHitable(child)) return -1; // *ret = 0; // } // __global__ static void interact_aabb_rmHitable(int8_t *ret, void *object, void* *list, uint32_t size) { // Hitable_AABB *hitable = (Hitable_AABB*)object; // SceneObject_Hitable *child = (SceneObject_Hitable*)(list[0]); // if (!hitable->rmHitable(child)) return -1; // *ret = 0; // } // cuda linker function __global__ static void hitable_setMaterial(SceneObject_Hitable *hitable, Material *material) { hitable->setMaterial(material); }
AdaptiveAveragePooling3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/adaptive_avg_pool3d_backward_native.h> #include <ATen/ops/adaptive_avg_pool3d_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #endif #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) { return (a / b) * c + ((a % b) * c) / b; } __device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) { return 1 + ((a + 1) * c - 1) / b; } // 5d tensor B x D x T x H x W // All kernels view batch dim B and dim D as collapsed. /* * Description: * this function adaptively average pools an input 5D tensor along dimensions * 2, 3, and 4 5D input, 5D output * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragepool( scalar_t *input, scalar_t *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterates on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time scalar_t *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time scalar_t *output_dt = output + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; scalar_t *ptr_output = output_dt + oh*osizeW + ow; accscalar_t sum = static_cast<accscalar_t>(0); int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { scalar_t val = ptr_input[ih*istrideH + iw*istrideW]; sum += static_cast<accscalar_t>(val); } } ptr_input += istrideT; // next input frame } // Update output const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW); *ptr_output = static_cast<scalar_t>(sum / divide_factor); } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragepool_loop( scalar_t *input_data, scalar_t *output_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptiveaveragepool<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on input pixels int it, ih, iw; // compute offsets based on thread/block ID int istartH = blockIdx.y * blockDim.y + threadIdx.y; int iendH = isizeH; int istepH = gridDim.y * blockDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // select input plane int64_t i_plane = blockIdx.x + offsetZ; it = i_plane % isizeT; // output frame/time int d = i_plane / isizeT; // slice/feature // output frame/time range is fixed. int ostartT = start_index(it, isizeT, osizeT); int oendT = end_index(it, isizeT, osizeT); // gradInput offset by slice/feature and frame/time. scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW; // gradOutput offset by slice/feature and earliest relevant frame/time scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW; // For all input pixels... for (ih = istartH; ih < iendH; ih += istepH) { int ostartH = start_index(ih, isizeH, osizeH); int oendH = end_index(ih, isizeH, osizeH); for (iw = istartW; iw < iendW; iw += istepW) { int ostartW = start_index(iw, isizeW, osizeW); int oendW = end_index(iw, isizeW, osizeW); // Compute the gradients from corresponding output pixels scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw; scalar_t *ptr_gradOutput = gradOutput_dt; // for all relevant output pixels int ot, oh, ow; for (ot = ostartT; ot < oendT; ++ot) { int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT); for (oh = ostartH; oh < oendH; ++oh) { int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH); for (ow = ostartW; ow < oendW; ++ow) { int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW); const accscalar_t divide_factor = kW * kH * kT; accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*osizeW + ow] / divide_factor); *ptr_gradInput += static_cast<scalar_t>(grad_delta); } } ptr_gradOutput += osizeH*osizeW; // next output frame } } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragegradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptiveaveragegradinput<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * (uses atomic add) * */ template <typename scalar_t> __global__ void atomicadaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // output slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // gradInput offset by slice/feature and earliest relevant frame/time scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW; // gradOutput offset by slice/feature and frame/time scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients from corresponding input pixels scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW; scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow; scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW; int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { gpuAtomicAddNoReturn(&(ptr_gradInput[ih*isizeW + iw]), grad_delta); } } ptr_gradInput += isizeH*isizeW; // next input frame } } } } template <typename scalar_t> void atomicadaptiveaveragegradinput_loop( scalar_t* gradInput_data, scalar_t* gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( atomicadaptiveaveragegradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } // 5D tensor B x D x T x H x w void adaptive_avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef& output_size) { TensorArg output_arg{output, "output", 1}; TensorArg input_arg{input_, "input_", 2}; checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg}); for (int64_t i = 1; i < input_.ndimension(); i++) { TORCH_CHECK( input_.size(i) > 0, "adaptive_avg_pool3d_cuda(): Expected input to have non-zero size for non-batch dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being empty"); } TORCH_CHECK( (input_.ndimension() == 4 || input_.ndimension() == 5), "adaptive_avg_pool3d_cuda(): Expected 4D or 5D tensor, but got ", input_.sizes()); // the jit sometimes passes output_size.size() == 1 TORCH_CHECK( output_size.size() == 1 || output_size.size() == 3, "adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size[1]; int64_t osizeW = output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } if (output.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); adaptiveaveragepool_loop<scalar_t, accscalar_t>( input_data, output_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); }); } void adaptive_avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( "adaptive_avg_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = atomic ? sizeD * osizeT : sizeD * isizeT; } else { int sizeB = input.size(0); totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); atomicadaptiveaveragegradinput_loop( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); adaptiveaveragegradinput_loop<scalar_t, accscalar_t>( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } } } // namespace Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor adaptive_avg_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda"); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } Tensor adaptive_avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } } // namespace native } // namespace at
AdaptiveAveragePooling3d.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/adaptive_avg_pool3d_backward_native.h> #include <ATen/ops/adaptive_avg_pool3d_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #endif #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) { return (a / b) * c + ((a % b) * c) / b; } __device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) { return 1 + ((a + 1) * c - 1) / b; } // 5d tensor B x D x T x H x W // All kernels view batch dim B and dim D as collapsed. /* * Description: * this function adaptively average pools an input 5D tensor along dimensions * 2, 3, and 4 5D input, 5D output * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragepool( scalar_t *input, scalar_t *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterates on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time scalar_t *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time scalar_t *output_dt = output + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; scalar_t *ptr_output = output_dt + oh*osizeW + ow; accscalar_t sum = static_cast<accscalar_t>(0); int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { scalar_t val = ptr_input[ih*istrideH + iw*istrideW]; sum += static_cast<accscalar_t>(val); } } ptr_input += istrideT; // next input frame } // Update output const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW); *ptr_output = static_cast<scalar_t>(sum / divide_factor); } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragepool_loop( scalar_t *input_data, scalar_t *output_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptiveaveragepool<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on input pixels int it, ih, iw; // compute offsets based on thread/block ID int istartH = blockIdx.y * blockDim.y + threadIdx.y; int iendH = isizeH; int istepH = gridDim.y * blockDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // select input plane int64_t i_plane = blockIdx.x + offsetZ; it = i_plane % isizeT; // output frame/time int d = i_plane / isizeT; // slice/feature // output frame/time range is fixed. int ostartT = start_index(it, isizeT, osizeT); int oendT = end_index(it, isizeT, osizeT); // gradInput offset by slice/feature and frame/time. scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW; // gradOutput offset by slice/feature and earliest relevant frame/time scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW; // For all input pixels... for (ih = istartH; ih < iendH; ih += istepH) { int ostartH = start_index(ih, isizeH, osizeH); int oendH = end_index(ih, isizeH, osizeH); for (iw = istartW; iw < iendW; iw += istepW) { int ostartW = start_index(iw, isizeW, osizeW); int oendW = end_index(iw, isizeW, osizeW); // Compute the gradients from corresponding output pixels scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw; scalar_t *ptr_gradOutput = gradOutput_dt; // for all relevant output pixels int ot, oh, ow; for (ot = ostartT; ot < oendT; ++ot) { int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT); for (oh = ostartH; oh < oendH; ++oh) { int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH); for (ow = ostartW; ow < oendW; ++ow) { int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW); const accscalar_t divide_factor = kW * kH * kT; accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*osizeW + ow] / divide_factor); *ptr_gradInput += static_cast<scalar_t>(grad_delta); } } ptr_gradOutput += osizeH*osizeW; // next output frame } } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragegradinput_loop( scalar_t *gradInput_data, scalar_t *gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptiveaveragegradinput<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * (uses atomic add) * */ template <typename scalar_t> __global__ void atomicadaptiveaveragegradinput( scalar_t *gradInput, scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // output slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // gradInput offset by slice/feature and earliest relevant frame/time scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW; // gradOutput offset by slice/feature and frame/time scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients from corresponding input pixels scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW; scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow; scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW; int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { gpuAtomicAddNoReturn(&(ptr_gradInput[ih*isizeW + iw]), grad_delta); } } ptr_gradInput += isizeH*isizeW; // next input frame } } } } template <typename scalar_t> void atomicadaptiveaveragegradinput_loop( scalar_t* gradInput_data, scalar_t* gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); atomicadaptiveaveragegradinput<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } // 5D tensor B x D x T x H x w void adaptive_avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef& output_size) { TensorArg output_arg{output, "output", 1}; TensorArg input_arg{input_, "input_", 2}; checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg}); for (int64_t i = 1; i < input_.ndimension(); i++) { TORCH_CHECK( input_.size(i) > 0, "adaptive_avg_pool3d_cuda(): Expected input to have non-zero size for non-batch dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being empty"); } TORCH_CHECK( (input_.ndimension() == 4 || input_.ndimension() == 5), "adaptive_avg_pool3d_cuda(): Expected 4D or 5D tensor, but got ", input_.sizes()); // the jit sometimes passes output_size.size() == 1 TORCH_CHECK( output_size.size() == 1 || output_size.size() == 3, "adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size[1]; int64_t osizeW = output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } if (output.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); adaptiveaveragepool_loop<scalar_t, accscalar_t>( input_data, output_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); }); } void adaptive_avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( "adaptive_avg_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = atomic ? sizeD * osizeT : sizeD * isizeT; } else { int sizeB = input.size(0); totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); atomicadaptiveaveragegradinput_loop( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>(); adaptiveaveragegradinput_loop<scalar_t, accscalar_t>( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } } } // namespace Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor adaptive_avg_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda"); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } Tensor adaptive_avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } } // namespace native } // namespace at
79bf5aee1f0f7177c25405f7490812c07db3ae2e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layers/im2col_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype *data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype *data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype *data_im, const int *im_shape, const int *col_shape, const int *kernel_shape, const int *pad, const int *stride, const int *dilation, Dtype *data_col); template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 15, 15)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_dilation_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); blob_dilation_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; dilation_ = 3; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; blob_dilation_->mutable_cpu_data()[i] = dilation_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; delete blob_dilation_; } Blob<int> *const blob_kernel_shape_; Blob<int> *const blob_stride_; Blob<int> *const blob_pad_; Blob<int> *const blob_dilation_; Blob<Dtype> *const blob_bottom_; Blob<Dtype> *const blob_top_; Blob<Dtype> *const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int dilation_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam *bottom_data = this->blob_bottom_->gpu_data(); TypeParam *top_data = this->blob_top_->mutable_gpu_data(); TypeParam *cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim / grid_div; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel<TypeParam>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } TYPED_TEST(Im2colKernelTest, TestND) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam *bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam *top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), this->blob_dilation_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam *bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam *top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_nd_gpu_kernel<TypeParam, 2>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
79bf5aee1f0f7177c25405f7490812c07db3ae2e.cu
#include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layers/im2col_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype *data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype *data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype *data_im, const int *im_shape, const int *col_shape, const int *kernel_shape, const int *pad, const int *stride, const int *dilation, Dtype *data_col); template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 15, 15)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_dilation_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); blob_dilation_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; dilation_ = 3; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; blob_dilation_->mutable_cpu_data()[i] = dilation_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; delete blob_dilation_; } Blob<int> *const blob_kernel_shape_; Blob<int> *const blob_stride_; Blob<int> *const blob_pad_; Blob<int> *const blob_dilation_; Blob<Dtype> *const blob_bottom_; Blob<Dtype> *const blob_top_; Blob<Dtype> *const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int dilation_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam *bottom_data = this->blob_bottom_->gpu_data(); TypeParam *top_data = this->blob_top_->mutable_gpu_data(); TypeParam *cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim / grid_div; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<TypeParam><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } TYPED_TEST(Im2colKernelTest, TestND) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam *bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam *top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), this->blob_dilation_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam *bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam *top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) im2col_nd_gpu_kernel<TypeParam, 2><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
af6f81b0cf2b0d6310b8a3cbb0031f85fd9cd42b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2019 Daniil Kazantsev Copyright 2019 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "TV_ROF_GPU_core.h" #include "shared.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* C-OMP implementation of ROF-TV denoising/regularization model [1] (2D/3D case) * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. lambda - regularization parameter (a constant or the same size as input (1)) * 3. tau - marching step for explicit scheme, ~1 is recommended [REQUIRED] * 4. Number of iterations, for explicit scheme >= 150 is recommended [REQUIRED] * 5. eplsilon: tolerance constant * 6. GPU device number if for multigpu run (default 0) * Output: * [1] Regularised image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the paper by * [1] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms" */ #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS 1.0e-8 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) __host__ __device__ int sign (float x) { return (x > 0) - (x < 0); } /*********************2D case****************************/ /* differences 1 */ __global__ void D1_func2D(float* Input, float* D1, int N, int M) { int i1, j1, i2; float NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < N) && (j >= 0) && (j < M)) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= M) j1 = j-1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[j*N + i2]; /* y- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5f*(sign((float)NOMy_1) + sign((float)NOMy_0))*(MIN(abs((float)NOMy_1), abs((float)NOMy_0))); denom2 = denom2*denom2; T1 = sqrt(denom1 + denom2 + EPS); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func2D(float* Input, float* D2, int N, int M) { int i1, j1, j2; float NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; j1 = j + 1; if (j1 >= M) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[j2*N + i]; /* x- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5f*(sign((float)NOMx_1) + sign((float)NOMx_0))*(MIN(abs((float)NOMx_1), abs((float)NOMx_0))); denom2 = denom2*denom2; T2 = sqrt(denom1 + denom2 + EPS); D2[index] = NOMy_1/T2; } } __global__ void TV_kernel2D(float *D1, float *D2, float *Update, float *Input, float *lambdaPar_d, int lambda_is_arr, float tau, int N, int M) { int i2, j2; float dv1,dv2,lambda_val; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; lambda_val = *(lambdaPar_d + index* lambda_is_arr); if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { /* boundary conditions (Neumann reflections) */ i2 = i - 1; if (i2 < 0) i2 = i+1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* divergence components */ dv1 = D1[index] - D1[j2*N + i]; dv2 = D2[index] - D2[j*N + i2]; Update[index] += tau*(lambda_val*(dv1 + dv2) - (Update[index] - Input[index])); } } /*********************3D case****************************/ /* differences 1 */ __global__ void D1_func3D(float* Input, float* D1, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + j1*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + j*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + j*dimX + i2]; /* y- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + j*dimX + i]; /* z- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5*(sign(NOMy_1) + sign(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom2 = denom2*denom2; denom3 = 0.5*(sign(NOMz_1) + sign(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T1 = sqrt(denom1 + denom2 + denom3 + EPS); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func3D(float* Input, float* D2, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5*(sign(NOMx_1) + sign(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(sign(NOMz_1) + sign(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T2 = sqrt(denom1 + denom2 + denom3 + EPS); D2[index] = NOMy_1/T2; } } /* differences 3 */ __global__ void D3_func3D(float* Input, float* D3, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ denom1 = NOMz_1*NOMz_1; denom2 = 0.5*(sign(NOMx_1) + sign(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(sign(NOMy_1) + sign(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom3 = denom3*denom3; T3 = sqrt(denom1 + denom2 + denom3 + EPS); D3[index] = NOMz_1/T3; } } __global__ void TV_kernel3D(float *D1, float *D2, float *D3, float *Update, float *Input, float *lambda, int lambda_is_arr, float tau, int dimX, int dimY, int dimZ) { float dv1, dv2, dv3, lambda_val; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; lambda_val = *(lambda + index* lambda_is_arr); if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /*divergence components */ dv1 = D1[index] - D1[(dimX*dimY)*k + j2*dimX+i]; dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2]; dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i]; Update[index] += tau*(lambda_val*(dv1 + dv2 + dv3) - (Update[index] - Input[index])); } } __global__ void ROFcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void ROFResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void ROFcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void ROFResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } ///////////////////////////////////////////////// ///////////////// HOST FUNCTION ///////////////// extern "C" int TV_ROF_GPU_main(float* Input, float* Output, float *infovector, float *lambdaPar, int lambda_is_arr, int iter, float tau, float epsil, int gpu_device, int N, int M, int Z) { int deviceCount = -1; // number of devices hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(hipSetDevice(gpu_device)); float re; re = 0.0f; int ImSize, count, n; count = 0; n = 0; float *d_input, *d_update, *d_D1, *d_D2, *d_update_prev=NULL, *lambdaPar_d=NULL; if (Z == 0) Z = 1; ImSize = N*M*Z; CHECK(hipMalloc((void**)&d_input,ImSize*sizeof(float))); CHECK(hipMalloc((void**)&d_update,ImSize*sizeof(float))); CHECK(hipMalloc((void**)&d_D1,ImSize*sizeof(float))); CHECK(hipMalloc((void**)&d_D2,ImSize*sizeof(float))); if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); CHECK(hipMemcpy(d_input,Input,ImSize*sizeof(float),hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_update,Input,ImSize*sizeof(float),hipMemcpyHostToDevice)); /*dealing with spatially variant reglariser */ if (lambda_is_arr == 1) { CHECK(hipMalloc((void**)&lambdaPar_d,ImSize*sizeof(float))); CHECK(hipMemcpy(lambdaPar_d,lambdaPar,ImSize*sizeof(float),hipMemcpyHostToDevice)); } else { CHECK(hipMalloc((void**)&lambdaPar_d,1*sizeof(float))); CHECK(hipMemcpy(lambdaPar_d,lambdaPar,1*sizeof(float),hipMemcpyHostToDevice)); } if (Z == 1) { // TV - 2D case dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D)); for(n=0; n < iter; n++) { if ((epsil != 0.0f) && (n % 5 == 0)) { hipLaunchKernelGGL(( ROFcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, N, M, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /* calculate differences */ hipLaunchKernelGGL(( D1_func2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_D1, N, M); CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( D2_func2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_D2, N, M); CHECK(hipDeviceSynchronize()); /*running main kernel*/ hipLaunchKernelGGL(( TV_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_D1, d_D2, d_update, d_input, lambdaPar_d, lambda_is_arr, tau, N, M); CHECK(hipDeviceSynchronize()); if ((epsil != 0.0f) && (n % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ hipLaunchKernelGGL(( ROFResidCalc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, d_D1, N, M, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors( hipPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(d_D1, d_D1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } } else { // TV - 3D case dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKXSIZE)); float *d_D3; CHECK(hipMalloc((void**)&d_D3,N*M*Z*sizeof(float))); for(n=0; n < iter; n++) { if ((epsil != 0.0f) && (n % 5 == 0)) { hipLaunchKernelGGL(( ROFcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, N, M, Z, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /* calculate differences */ hipLaunchKernelGGL(( D1_func3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_D1, N, M, Z); CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( D2_func3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_D2, N, M, Z); CHECK(hipDeviceSynchronize()); hipLaunchKernelGGL(( D3_func3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_D3, N, M, Z); CHECK(hipDeviceSynchronize()); /*running main kernel*/ hipLaunchKernelGGL(( TV_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_D1, d_D2, d_D3, d_update, d_input, lambdaPar_d, lambda_is_arr, tau, N, M, Z); CHECK(hipDeviceSynchronize()); if ((epsil != 0.0f) && (n % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ hipLaunchKernelGGL(( ROFResidCalc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, d_D1, N, M, Z, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors( hipPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(d_D1, d_D1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } CHECK(hipFree(d_D3)); } CHECK(hipMemcpy(Output,d_update,N*M*Z*sizeof(float),hipMemcpyDeviceToHost)); if (epsil != 0.0f) hipFree(d_update_prev); CHECK(hipFree(d_input)); CHECK(hipFree(d_update)); CHECK(hipFree(d_D1)); CHECK(hipFree(d_D2)); CHECK(hipFree(lambdaPar_d)); infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ checkCudaErrors( hipDeviceSynchronize() ); return 0; }
af6f81b0cf2b0d6310b8a3cbb0031f85fd9cd42b.cu
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2019 Daniil Kazantsev Copyright 2019 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "TV_ROF_GPU_core.h" #include "shared.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* C-OMP implementation of ROF-TV denoising/regularization model [1] (2D/3D case) * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. lambda - regularization parameter (a constant or the same size as input (1)) * 3. tau - marching step for explicit scheme, ~1 is recommended [REQUIRED] * 4. Number of iterations, for explicit scheme >= 150 is recommended [REQUIRED] * 5. eplsilon: tolerance constant * 6. GPU device number if for multigpu run (default 0) * Output: * [1] Regularised image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the paper by * [1] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms" */ #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS 1.0e-8 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) __host__ __device__ int sign (float x) { return (x > 0) - (x < 0); } /*********************2D case****************************/ /* differences 1 */ __global__ void D1_func2D(float* Input, float* D1, int N, int M) { int i1, j1, i2; float NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < N) && (j >= 0) && (j < M)) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= M) j1 = j-1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[j*N + i2]; /* y- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5f*(sign((float)NOMy_1) + sign((float)NOMy_0))*(MIN(abs((float)NOMy_1), abs((float)NOMy_0))); denom2 = denom2*denom2; T1 = sqrt(denom1 + denom2 + EPS); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func2D(float* Input, float* D2, int N, int M) { int i1, j1, j2; float NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { /* boundary conditions (Neumann reflections) */ i1 = i + 1; if (i1 >= N) i1 = i-1; j1 = j + 1; if (j1 >= M) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* Forward-backward differences */ NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */ NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[j2*N + i]; /* x- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5f*(sign((float)NOMx_1) + sign((float)NOMx_0))*(MIN(abs((float)NOMx_1), abs((float)NOMx_0))); denom2 = denom2*denom2; T2 = sqrt(denom1 + denom2 + EPS); D2[index] = NOMy_1/T2; } } __global__ void TV_kernel2D(float *D1, float *D2, float *Update, float *Input, float *lambdaPar_d, int lambda_is_arr, float tau, int N, int M) { int i2, j2; float dv1,dv2,lambda_val; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + N*j; lambda_val = *(lambdaPar_d + index* lambda_is_arr); if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) { /* boundary conditions (Neumann reflections) */ i2 = i - 1; if (i2 < 0) i2 = i+1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* divergence components */ dv1 = D1[index] - D1[j2*N + i]; dv2 = D2[index] - D2[j*N + i2]; Update[index] += tau*(lambda_val*(dv1 + dv2) - (Update[index] - Input[index])); } } /*********************3D case****************************/ /* differences 1 */ __global__ void D1_func3D(float* Input, float* D1, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + j1*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + j*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + j*dimX + i2]; /* y- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + j*dimX + i]; /* z- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5*(sign(NOMy_1) + sign(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom2 = denom2*denom2; denom3 = 0.5*(sign(NOMz_1) + sign(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T1 = sqrt(denom1 + denom2 + denom3 + EPS); D1[index] = NOMx_1/T1; } } /* differences 2 */ __global__ void D2_func3D(float* Input, float* D2, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5*(sign(NOMx_1) + sign(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(sign(NOMz_1) + sign(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0))); denom3 = denom3*denom3; T2 = sqrt(denom1 + denom2 + denom3 + EPS); D2[index] = NOMy_1/T2; } } /* differences 3 */ __global__ void D3_func3D(float* Input, float* D3, int dimX, int dimY, int dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */ NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */ NOMy_0 = Input[index] - Input[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */ NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */ denom1 = NOMz_1*NOMz_1; denom2 = 0.5*(sign(NOMx_1) + sign(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5*(sign(NOMy_1) + sign(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0))); denom3 = denom3*denom3; T3 = sqrt(denom1 + denom2 + denom3 + EPS); D3[index] = NOMz_1/T3; } } __global__ void TV_kernel3D(float *D1, float *D2, float *D3, float *Update, float *Input, float *lambda, int lambda_is_arr, float tau, int dimX, int dimY, int dimZ) { float dv1, dv2, dv3, lambda_val; int i1,i2,k1,j1,j2,k2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (dimX*dimY)*k + j*dimX+i; lambda_val = *(lambda + index* lambda_is_arr); if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /*divergence components */ dv1 = D1[index] - D1[(dimX*dimY)*k + j2*dimX+i]; dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2]; dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i]; Update[index] += tau*(lambda_val*(dv1 + dv2 + dv3) - (Update[index] - Input[index])); } } __global__ void ROFcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void ROFResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void ROFcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void ROFResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } ///////////////////////////////////////////////// ///////////////// HOST FUNCTION ///////////////// extern "C" int TV_ROF_GPU_main(float* Input, float* Output, float *infovector, float *lambdaPar, int lambda_is_arr, int iter, float tau, float epsil, int gpu_device, int N, int M, int Z) { int deviceCount = -1; // number of devices cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(cudaSetDevice(gpu_device)); float re; re = 0.0f; int ImSize, count, n; count = 0; n = 0; float *d_input, *d_update, *d_D1, *d_D2, *d_update_prev=NULL, *lambdaPar_d=NULL; if (Z == 0) Z = 1; ImSize = N*M*Z; CHECK(cudaMalloc((void**)&d_input,ImSize*sizeof(float))); CHECK(cudaMalloc((void**)&d_update,ImSize*sizeof(float))); CHECK(cudaMalloc((void**)&d_D1,ImSize*sizeof(float))); CHECK(cudaMalloc((void**)&d_D2,ImSize*sizeof(float))); if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); CHECK(cudaMemcpy(d_input,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_update,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice)); /*dealing with spatially variant reglariser */ if (lambda_is_arr == 1) { CHECK(cudaMalloc((void**)&lambdaPar_d,ImSize*sizeof(float))); CHECK(cudaMemcpy(lambdaPar_d,lambdaPar,ImSize*sizeof(float),cudaMemcpyHostToDevice)); } else { CHECK(cudaMalloc((void**)&lambdaPar_d,1*sizeof(float))); CHECK(cudaMemcpy(lambdaPar_d,lambdaPar,1*sizeof(float),cudaMemcpyHostToDevice)); } if (Z == 1) { // TV - 2D case dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D)); for(n=0; n < iter; n++) { if ((epsil != 0.0f) && (n % 5 == 0)) { ROFcopy_kernel2D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, N, M, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /* calculate differences */ D1_func2D<<<dimGrid,dimBlock>>>(d_update, d_D1, N, M); CHECK(cudaDeviceSynchronize()); D2_func2D<<<dimGrid,dimBlock>>>(d_update, d_D2, N, M); CHECK(cudaDeviceSynchronize()); /*running main kernel*/ TV_kernel2D<<<dimGrid,dimBlock>>>(d_D1, d_D2, d_update, d_input, lambdaPar_d, lambda_is_arr, tau, N, M); CHECK(cudaDeviceSynchronize()); if ((epsil != 0.0f) && (n % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ ROFResidCalc2D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, d_D1, N, M, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors( cudaPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(d_D1, d_D1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } } else { // TV - 3D case dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKXSIZE)); float *d_D3; CHECK(cudaMalloc((void**)&d_D3,N*M*Z*sizeof(float))); for(n=0; n < iter; n++) { if ((epsil != 0.0f) && (n % 5 == 0)) { ROFcopy_kernel3D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, N, M, Z, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /* calculate differences */ D1_func3D<<<dimGrid,dimBlock>>>(d_update, d_D1, N, M, Z); CHECK(cudaDeviceSynchronize()); D2_func3D<<<dimGrid,dimBlock>>>(d_update, d_D2, N, M, Z); CHECK(cudaDeviceSynchronize()); D3_func3D<<<dimGrid,dimBlock>>>(d_update, d_D3, N, M, Z); CHECK(cudaDeviceSynchronize()); /*running main kernel*/ TV_kernel3D<<<dimGrid,dimBlock>>>(d_D1, d_D2, d_D3, d_update, d_input, lambdaPar_d, lambda_is_arr, tau, N, M, Z); CHECK(cudaDeviceSynchronize()); if ((epsil != 0.0f) && (n % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ ROFResidCalc3D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, d_D1, N, M, Z, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors( cudaPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(d_D1, d_D1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } CHECK(cudaFree(d_D3)); } CHECK(cudaMemcpy(Output,d_update,N*M*Z*sizeof(float),cudaMemcpyDeviceToHost)); if (epsil != 0.0f) cudaFree(d_update_prev); CHECK(cudaFree(d_input)); CHECK(cudaFree(d_update)); CHECK(cudaFree(d_D1)); CHECK(cudaFree(d_D2)); CHECK(cudaFree(lambdaPar_d)); infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ checkCudaErrors( cudaDeviceSynchronize() ); return 0; }
9e393e6f79d2ade71dfb967ad290f96f2174ba51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "reduction/Reduction.cuh" // This assumes that the number of threads is equal to the number of predictions/targets. // First, the squared differences between predictions and targets are stored in shared memory. // In the second step, the squared differences are summed up using a parallel reduction. // Finally, the sum is multiplied by 1/2. template <int blockSize> __global__ void squaredLossKernel (int numberEntries, float *predictions, float *targets, float *result) { int threadId = threadIdx.x; extern __shared__ float sharedData[]; if(threadId < numberEntries) { sharedData[threadId] = powf(predictions[threadId] - targets[threadId], 2.0); } __syncthreads(); reduce<blockSize>(threadId, sharedData, 0); if(threadId == 0) { result[0] = 0.5 * sharedData[0]; } }
9e393e6f79d2ade71dfb967ad290f96f2174ba51.cu
#include "reduction/Reduction.cuh" // This assumes that the number of threads is equal to the number of predictions/targets. // First, the squared differences between predictions and targets are stored in shared memory. // In the second step, the squared differences are summed up using a parallel reduction. // Finally, the sum is multiplied by 1/2. template <int blockSize> __global__ void squaredLossKernel (int numberEntries, float *predictions, float *targets, float *result) { int threadId = threadIdx.x; extern __shared__ float sharedData[]; if(threadId < numberEntries) { sharedData[threadId] = powf(predictions[threadId] - targets[threadId], 2.0); } __syncthreads(); reduce<blockSize>(threadId, sharedData, 0); if(threadId == 0) { result[0] = 0.5 * sharedData[0]; } }
769c7bb39eacbd2929fe64084244aee29909a8ed.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <iostream> #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include "THH/THH.h" #include <ATen/hip/HIPContext.h> #include <torch/extension.h> #include <math.h> #include "softmax.h" #include "dropout.h" // symbol to be automatically resolved by PyTorch libs extern THCState *state; namespace multihead_attn { namespace fused_softmax { namespace additive_mask_softmax_dropout { std::vector<torch::Tensor> fwd_cuda( bool is_training, int heads, torch::Tensor const& input, const half* pad_mask, float dropout_prob ) { const int attn_batches = input.size(0); const int sequences = attn_batches / heads; const int q_seq_len = input.size(1); const int k_seq_len = q_seq_len; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; // There is no reason to use more than one stream as every kernel is // sequentially dependent hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); // 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code) auto act_options = input.options().requires_grad(false); auto mask_options = act_options.dtype(torch::kUInt8); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* input_ptr = static_cast<void*>(input.data_ptr()); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); // Padded Softmax bool softmax_success = false; if (pad_mask == nullptr) { softmax_success = dispatch_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(input_ptr), k_seq_len, k_seq_len, attn_batches*q_seq_len); } else { softmax_success = dispatch_additive_masked_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(input_ptr), pad_mask, k_seq_len, k_seq_len, attn_batches*q_seq_len, attn_batches*q_seq_len/sequences); } if (is_training) { //use at:: function so that C++ version generates the same random mask as python version auto dropout_tuple = at::_fused_dropout(softmax_results, 1.0f-dropout_prob); //auto dropout_tuple = at::native_dropout(softmax_results, 1.0f-dropout_prob, 1./(1.0f-dropout_prob), true); dropout_results = std::get<0>(dropout_tuple); dropout_mask = std::get<1>(dropout_tuple); } // Matmul2 return { dropout_results, dropout_mask, softmax_results }; } torch::Tensor bwd_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& softmax_results, torch::Tensor const& dropout_mask, float dropout_prob ) { const int attn_batches = output_grads.size(0); const int q_seq_len = output_grads.size(1); const int k_seq_len = q_seq_len; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream(); hipblasSetStream(handle, stream); // Output Tensor Allocations // torch::Tensor input_grads = torch::empty_like(output_grads); // Apply Dropout Mask and Scale by Dropout Probability // Softmax Grad dispatch_masked_scale_softmax_backward_stream<half, half, float,false>( static_cast<half*>(output_grads.data_ptr()), static_cast<half*>(output_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); //backward pass is completely in-place return output_grads; } } } }
769c7bb39eacbd2929fe64084244aee29909a8ed.cu
#include <vector> #include <iostream> #include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include "THC/THC.h" #include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include <math.h> #include "softmax.h" #include "dropout.h" // symbol to be automatically resolved by PyTorch libs extern THCState *state; namespace multihead_attn { namespace fused_softmax { namespace additive_mask_softmax_dropout { std::vector<torch::Tensor> fwd_cuda( bool is_training, int heads, torch::Tensor const& input, const half* pad_mask, float dropout_prob ) { const int attn_batches = input.size(0); const int sequences = attn_batches / heads; const int q_seq_len = input.size(1); const int k_seq_len = q_seq_len; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; // There is no reason to use more than one stream as every kernel is // sequentially dependent cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); // 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code) auto act_options = input.options().requires_grad(false); auto mask_options = act_options.dtype(torch::kUInt8); torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options); torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options); // Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax) void* input_ptr = static_cast<void*>(input.data_ptr()); void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr()); // Padded Softmax bool softmax_success = false; if (pad_mask == nullptr) { softmax_success = dispatch_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(input_ptr), k_seq_len, k_seq_len, attn_batches*q_seq_len); } else { softmax_success = dispatch_additive_masked_softmax<half, half, float>( reinterpret_cast<half*>(softmax_results_ptr), reinterpret_cast<const half*>(input_ptr), pad_mask, k_seq_len, k_seq_len, attn_batches*q_seq_len, attn_batches*q_seq_len/sequences); } if (is_training) { //use at:: function so that C++ version generates the same random mask as python version auto dropout_tuple = at::_fused_dropout(softmax_results, 1.0f-dropout_prob); //auto dropout_tuple = at::native_dropout(softmax_results, 1.0f-dropout_prob, 1./(1.0f-dropout_prob), true); dropout_results = std::get<0>(dropout_tuple); dropout_mask = std::get<1>(dropout_tuple); } // Matmul2 return { dropout_results, dropout_mask, softmax_results }; } torch::Tensor bwd_cuda( int heads, torch::Tensor const& output_grads, torch::Tensor const& softmax_results, torch::Tensor const& dropout_mask, float dropout_prob ) { const int attn_batches = output_grads.size(0); const int q_seq_len = output_grads.size(1); const int k_seq_len = q_seq_len; const int dropout_elems = attn_batches * q_seq_len * k_seq_len; // TODO: Streams can be used in Backprop but I haven't added more than one // in my first attempt to create the code cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); // Output Tensor Allocations // torch::Tensor input_grads = torch::empty_like(output_grads); // Apply Dropout Mask and Scale by Dropout Probability // Softmax Grad dispatch_masked_scale_softmax_backward_stream<half, half, float,false>( static_cast<half*>(output_grads.data_ptr()), static_cast<half*>(output_grads.data_ptr()), reinterpret_cast<half const*>(softmax_results.data_ptr()), static_cast<uint8_t const*>(dropout_mask.data_ptr()), 1.0/(1.0-dropout_prob), k_seq_len, k_seq_len, attn_batches*q_seq_len, stream); //backward pass is completely in-place return output_grads; } } } }
5470335f471350f2287a5db78ffe930b11ca0d41.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; // // TODO allocate device memory buffers on the GPU using hipMalloc // hipMalloc(&device_x, totalBytes/3); hipMalloc(&device_y, totalBytes/3); hipMalloc(&device_result, totalBytes/3); // start timing after allocation of device memory double startTime = CycleTimer::currentSeconds(); // // TODO copy input arrays to the GPU using hipMemcpy // hipMemcpy(device_x,xarray,totalBytes/3, hipMemcpyHostToDevice); hipMemcpy(device_y,yarray,totalBytes/3, hipMemcpyHostToDevice); printf("got here 2\n"); // run kernel hipLaunchKernelGGL(( saxpy_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, alpha, device_x, device_y, device_result); hipDeviceSynchronize(); printf("got here \n"); // // TODO copy result from GPU using hipMemcpy // hipMemcpy(resultarray,device_result, totalBytes/3, hipMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode)); } double overallDuration = endTime - startTime; printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); // TODO free memory buffers on the GPU hipFree(device_x); hipFree(device_y); hipFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
5470335f471350f2287a5db78ffe930b11ca0d41.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); __global__ void saxpy_kernel(int N, float alpha, float* x, float* y, float* result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) result[index] = alpha * x[index] + y[index]; } void saxpyCuda(int N, float alpha, float* xarray, float* yarray, float* resultarray) { int totalBytes = sizeof(float) * 3 * N; // compute number of blocks and threads per block const int threadsPerBlock = 512; const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock; float* device_x; float* device_y; float* device_result; // // TODO allocate device memory buffers on the GPU using cudaMalloc // cudaMalloc(&device_x, totalBytes/3); cudaMalloc(&device_y, totalBytes/3); cudaMalloc(&device_result, totalBytes/3); // start timing after allocation of device memory double startTime = CycleTimer::currentSeconds(); // // TODO copy input arrays to the GPU using cudaMemcpy // cudaMemcpy(device_x,xarray,totalBytes/3, cudaMemcpyHostToDevice); cudaMemcpy(device_y,yarray,totalBytes/3, cudaMemcpyHostToDevice); printf("got here 2\n"); // run kernel saxpy_kernel<<<blocks, threadsPerBlock>>>(N, alpha, device_x, device_y, device_result); cudaThreadSynchronize(); printf("got here \n"); // // TODO copy result from GPU using cudaMemcpy // cudaMemcpy(resultarray,device_result, totalBytes/3, cudaMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) { fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); } double overallDuration = endTime - startTime; printf("Overall: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration)); // TODO free memory buffers on the GPU cudaFree(device_x); cudaFree(device_y); cudaFree(device_result); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
4c0985c890405aa709db738d8d6d3a18c3a15faa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <sm_32_intrinsics.h> #include <surface_functions.h> //#include <sample_inc.h> extern "C" { __global__ void test_kernel( const unsigned char * src, unsigned char * dst, int multiplier) { dst[threadIdx.x + blockIdx.x * blockDim.x] = src[threadIdx.x + blockIdx.x * blockDim.x] * multiplier; } }
4c0985c890405aa709db738d8d6d3a18c3a15faa.cu
#include <sm_32_intrinsics.h> #include <surface_functions.h> //#include <sample_inc.h> extern "C" { __global__ void test_kernel( const unsigned char * src, unsigned char * dst, int multiplier) { dst[threadIdx.x + blockIdx.x * blockDim.x] = src[threadIdx.x + blockIdx.x * blockDim.x] * multiplier; } }
c268ede091e7c3ef1214e202354418a233e054f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // matrix multiplication between square matrices using bidimensional indexes. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <ctype.h> #include <sys/types.h> #include <sys/time.h> #define SIZE 4 //2048 #define NUM_THREADS 2 //512 #define NUM_BLOCKS SIZE / NUM_THREADS double cclock() /* Returns elepsed seconds past from the last call to timer rest */ { struct timeval tmp; double sec; gettimeofday( &tmp, (struct timezone *)0 ); sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0; return sec; } // print the vector void print_vector(int size, int *v) { int i; for (i = 0; i < size; i++) { printf("%d ", v[i]); } printf("\n"); } // kernel function __global__ void dot( int *a, int *b, int *c ) { __shared__ int temp[NUM_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; temp[threadIdx.x] = a[idx] * b[idx]; __syncthreads(); if( 0 == threadIdx.x ) { int sum = 0; for( int i = 0; i < NUM_THREADS; i++ ) sum += temp[i]; atomicAdd( c , sum ); } } int main(int argc, char *argv[]) { int * h_a, * h_b, *h_c; // host pointers int * d_a, * d_b, *d_c; // device pointers int i; int size_in_bytes; int t_start, t_end; // SIZE = atoi(argv[1]); size_in_bytes = SIZE * sizeof( int ); if( SIZE < 1 ){ fprintf( stderr, "Error. Inconsistent parameters.\nProgram exit ...\n"); exit(1); } // allocate the pointers h_a = ( int * ) malloc( size_in_bytes ); h_b = ( int * ) malloc( size_in_bytes ); h_c = ( int * ) malloc( sizeof( int ) ); hipMalloc( (void**) &d_a, size_in_bytes ); hipMalloc( (void**) &d_b, size_in_bytes ); hipMalloc( (void**) &d_c, sizeof( int ) ); // initialize the vectors // srand(time(NULL)); for( i = 0; i < SIZE; i++ ){ h_a[i] = (int) 1; //(rand() % 1000 + 1); h_b[i] = (int) i; //(rand() % 1000 + 1); } h_c[0] = 0; print_vector(SIZE, h_a); print_vector(SIZE, h_b); // copy from CPU to GPU //hipMemcpy( dest, source, sizeinbytes, hipMemcpyHostToDevice | hipMemcpyDeviceToHost ); hipMemcpy( d_a, h_a, size_in_bytes, hipMemcpyHostToDevice ); hipMemcpy( d_b, h_b, size_in_bytes, hipMemcpyHostToDevice ); hipMemcpy( d_c, h_c, sizeof( int ), hipMemcpyHostToDevice ); t_start=cclock(); hipLaunchKernelGGL(( dot), dim3(NUM_BLOCKS), dim3(NUM_THREADS) , 0, 0, d_a, d_b, d_c); t_end=cclock(); // copy from GPU to CPU hipMemcpy( h_a, d_a, size_in_bytes, hipMemcpyDeviceToHost ); hipMemcpy( h_b, d_b, size_in_bytes, hipMemcpyDeviceToHost ); hipMemcpy( h_c, d_c, sizeof( int ), hipMemcpyDeviceToHost ); printf("%d\n", h_c[0]); fprintf( stdout, "multiplication executed. Time Elapsed %9.4f secs\n", t_end-t_start ); // free the memory free( h_a ); free( h_b ); free( h_c ); hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); return 0; }
c268ede091e7c3ef1214e202354418a233e054f5.cu
// matrix multiplication between square matrices using bidimensional indexes. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <ctype.h> #include <sys/types.h> #include <sys/time.h> #define SIZE 4 //2048 #define NUM_THREADS 2 //512 #define NUM_BLOCKS SIZE / NUM_THREADS double cclock() /* Returns elepsed seconds past from the last call to timer rest */ { struct timeval tmp; double sec; gettimeofday( &tmp, (struct timezone *)0 ); sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0; return sec; } // print the vector void print_vector(int size, int *v) { int i; for (i = 0; i < size; i++) { printf("%d ", v[i]); } printf("\n"); } // kernel function __global__ void dot( int *a, int *b, int *c ) { __shared__ int temp[NUM_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; temp[threadIdx.x] = a[idx] * b[idx]; __syncthreads(); if( 0 == threadIdx.x ) { int sum = 0; for( int i = 0; i < NUM_THREADS; i++ ) sum += temp[i]; atomicAdd( c , sum ); } } int main(int argc, char *argv[]) { int * h_a, * h_b, *h_c; // host pointers int * d_a, * d_b, *d_c; // device pointers int i; int size_in_bytes; int t_start, t_end; // SIZE = atoi(argv[1]); size_in_bytes = SIZE * sizeof( int ); if( SIZE < 1 ){ fprintf( stderr, "Error. Inconsistent parameters.\nProgram exit ...\n"); exit(1); } // allocate the pointers h_a = ( int * ) malloc( size_in_bytes ); h_b = ( int * ) malloc( size_in_bytes ); h_c = ( int * ) malloc( sizeof( int ) ); cudaMalloc( (void**) &d_a, size_in_bytes ); cudaMalloc( (void**) &d_b, size_in_bytes ); cudaMalloc( (void**) &d_c, sizeof( int ) ); // initialize the vectors // srand(time(NULL)); for( i = 0; i < SIZE; i++ ){ h_a[i] = (int) 1; //(rand() % 1000 + 1); h_b[i] = (int) i; //(rand() % 1000 + 1); } h_c[0] = 0; print_vector(SIZE, h_a); print_vector(SIZE, h_b); // copy from CPU to GPU //cudaMemcpy( dest, source, sizeinbytes, cudaMemcpyHostToDevice | cudaMemcpyDeviceToHost ); cudaMemcpy( d_a, h_a, size_in_bytes, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, h_b, size_in_bytes, cudaMemcpyHostToDevice ); cudaMemcpy( d_c, h_c, sizeof( int ), cudaMemcpyHostToDevice ); t_start=cclock(); dot<<< NUM_BLOCKS, NUM_THREADS >>>(d_a, d_b, d_c); t_end=cclock(); // copy from GPU to CPU cudaMemcpy( h_a, d_a, size_in_bytes, cudaMemcpyDeviceToHost ); cudaMemcpy( h_b, d_b, size_in_bytes, cudaMemcpyDeviceToHost ); cudaMemcpy( h_c, d_c, sizeof( int ), cudaMemcpyDeviceToHost ); printf("%d\n", h_c[0]); fprintf( stdout, "multiplication executed. Time Elapsed %9.4f secs\n", t_end-t_start ); // free the memory free( h_a ); free( h_b ); free( h_c ); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; }
8501b97a1674593f51b8810526bdc1020ebc261b.hip
// !!! This is a file automatically generated by hipify!!! /* This code accompanies * Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics * https://doi.org/10.1016/j.jcp.2019.07.029 * Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids * https://doi.org/10.1103/PhysRevFluids.4.103701 * * Yifei Guan, Igor Novosselov * University of Washington * * Author: Yifei Guan * */ #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <stdlib.h> #define _USE_MATH_DEFINES #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include "LBM.h" #include <hip/device_functions.h> #define RAD 1 __global__ void gpu_poisson(double*, double*,double*); __global__ void gpu_efield(double*, double*, double*); __global__ void odd_extension(double*, hipfftDoubleComplex*, double*); __global__ void gpu_derivative(double*, double*, hipfftDoubleComplex*); __global__ void odd_extract(double*, hipfftDoubleComplex*); __global__ void gpu_bc(double*); __device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y) { return (2*RAD + nThreads)*y + x; } __host__ void poisson_phi(double *charge_gpu, double *phi_gpu) { // blocks in grid dim3 grid(NX / nThreads, NY, 1); // threads in block dim3 threads(nThreads, 1, 1); unsigned int it = 0; double MAX_ITERATIONS = 1.0E6; double TOLERANCE = 1.0e-9; double *Res = (double*)malloc(mem_size_scalar); double error = 0.0; double *R; checkCudaErrors(hipMalloc((void**)&R, mem_size_scalar)); for (it = 0; it < MAX_ITERATIONS; ++it) { error = 0.0; gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R); checkCudaErrors(hipMemcpy(Res, R, mem_size_scalar, hipMemcpyDeviceToHost)); for (unsigned int y = 0; y < NY; ++y) { for (unsigned int x = 0; x < NX; ++x) { //if (it % 1000 == 1) printf("%g\n", error); if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)]; } } if (error < TOLERANCE) break; } checkCudaErrors(hipFree(R)); free(Res); //printf("%g\n", error); if (it == MAX_ITERATIONS) { printf("Poisson solver did not converge!\n"); printf("Residual = %g\n", error); system("pause"); //exit(-1); } getLastCudaError("Poisson solver kernel error"); } __global__ void gpu_poisson(double *c, double *fi,double *R){ unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int s_y = threadIdx.y + RAD; unsigned int s_x = threadIdx.x + RAD; unsigned int xp1 = (x + blockDim.x) % NX; unsigned int yp1 = (y + blockDim.y) % NY; unsigned int xm1 = (NX + x - 1) % NX; unsigned int ym1 = (NY + y - 1) % NY; __shared__ double s_in[(2*RAD + nThreads)*3]; // load to shared memory (regular cells) s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)]; // load halo cells if (threadIdx.x < RAD) { s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)]; s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)]; } if (threadIdx.y < RAD) { s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)]; s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)]; } // Boundary conditions if (y == 0) { fi[gpu_scalar_index(x, y)] = voltage; return; } if (y == NY - 1) { fi[gpu_scalar_index(x, y)] = voltage2; return; } __syncthreads(); double charge = c[gpu_scalar_index(x, y)]; //double phi = fi[gpu_scalar_index(x, y)]; //double phiL = fi[gpu_scalar_index(xm1, y)]; //double phiR = fi[gpu_scalar_index(xp1, y)]; //double phiU = fi[gpu_scalar_index(x, yp1)]; //double phiD = fi[gpu_scalar_index(x, ym1)]; double phi = s_in[gpu_s_scalar_index(s_x, s_y)]; double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)]; double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)]; double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)]; double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)]; double source = (charge / eps) * dx *dx; // Right hand side of the equation double phi_old = phi; phi = 0.25 * (phiL + phiR + phiU + phiD + source); // Record the error R[gpu_scalar_index(x, y)] = fabs(phi - phi_old); //__syncthreads(); fi[gpu_scalar_index(x, y)] = phi; //if (x == 5 && y == 5) printf("%g\n", phi); } __host__ void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) { // blocks in grid dim3 grid(NX / nThreads, NY, 1); // threads in block dim3 threads(nThreads, 1, 1); gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu); gpu_bc << <grid, threads >> > (Ey_gpu); getLastCudaError("Efield kernel error"); } __global__ void gpu_efield(double *fi, double *ex, double *ey){ unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int xp1 = (x + 1) % NX; unsigned int yp1 = (y + 1) % NY; unsigned int xm1 = (NX + x - 1) % NX; unsigned int ym1 = (NY + y - 1) % NY; double phi = fi[gpu_scalar_index(x, y)]; double phiL = fi[gpu_scalar_index(xm1, y)]; double phiR = fi[gpu_scalar_index(xp1, y)]; double phiU = fi[gpu_scalar_index(x, yp1)]; double phiD = fi[gpu_scalar_index(x, ym1)]; ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx; ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy; } __global__ void gpu_bc(double *ey) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; if (y == 0) { //ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)]; ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)]; return; } if (y == NY - 1) { //ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)]; ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)]; return; } } // ========================================================================= // Fast poisson solver domain extension // ========================================================================= __host__ void fast_Poisson(double *charge_gpu, double *T_gpu, double *kx, double *ky, hipfftHandle plan) { checkCudaErrors(hipMalloc((void**)&freq_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NE)); checkCudaErrors(hipMalloc((void**)&phi_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NE)); checkCudaErrors(hipMalloc((void**)&charge_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NE)); checkCudaErrors(hipMalloc((void**)&T_gpu_ext, sizeof(hipfftDoubleComplex)*NX*NE)); // Extend the domain extension(charge_gpu, charge_gpu_ext, T_gpu); // Execute a real-to-complex 2D FFT CHECK_CUFFT(hipfftExecZ2Z(plan, charge_gpu_ext, freq_gpu_ext, HIPFFT_FORWARD)); // Execute the derivatives in frequency domain derivative(kx, ky, freq_gpu_ext); // Execute a complex-to-complex 2D IFFT CHECK_CUFFT(hipfftExecZ2Z(plan, freq_gpu_ext, phi_gpu_ext, HIPFFT_BACKWARD)); // Extraction of phi from extended domain phi_gpu_ext extract(phi_gpu, phi_gpu_ext); // Calculate electric field strength efield(phi_gpu, Ex_gpu, Ey_gpu); checkCudaErrors(hipFree(charge_gpu_ext)); checkCudaErrors(hipFree(phi_gpu_ext)); checkCudaErrors(hipFree(freq_gpu_ext)); checkCudaErrors(hipFree(T_gpu_ext)); } __host__ void extension(double *c, hipfftDoubleComplex *c_ext, double *T) { // blocks in grid dim3 grid(NX / nThreads, NE, 1); // threads in block dim3 threads(nThreads, 1, 1); odd_extension << < grid, threads >> > (c, c_ext, T); getLastCudaError("Odd Extension error"); } __global__ void odd_extension(double *charge, hipfftDoubleComplex *charge_ext, double *T) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; if (y == 0) { charge_ext[gpu_scalar_index(x, y)].x = 0.0; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == 1) { charge_ext[gpu_scalar_index(x, y)].x = -convertCtoCharge*(charge[gpu_scalar_index(x, y)] - T[gpu_scalar_index(x, y)]) / eps - voltage / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y > 1 && y < NY - 2) { charge_ext[gpu_scalar_index(x, y)].x = -convertCtoCharge*(charge[gpu_scalar_index(x, y)] - T[gpu_scalar_index(x, y)]) / eps; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NY - 2) { charge_ext[gpu_scalar_index(x, y)].x = -convertCtoCharge*(charge[gpu_scalar_index(x, y)] - T[gpu_scalar_index(x, y)]) / eps - voltage2 / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NY - 1) { charge_ext[gpu_scalar_index(x, y)].x = 0.0; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NY) { charge_ext[gpu_scalar_index(x, y)].x = convertCtoCharge*(charge[gpu_scalar_index(x, NE - y)] - T[gpu_scalar_index(x, NE - y)]) / eps + voltage2 / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y > NY && y<NE-1) { charge_ext[gpu_scalar_index(x, y)].x = convertCtoCharge*(charge[gpu_scalar_index(x, NE - y)] - T[gpu_scalar_index(x, NE - y)]) / eps; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NE - 1) { charge_ext[gpu_scalar_index(x, y)].x = convertCtoCharge*(charge[gpu_scalar_index(x, 1)] - T[gpu_scalar_index(x, 1)]) / eps + voltage / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } } __host__ void derivative(double *kx, double *ky, hipfftDoubleComplex *source) { // blocks in grid dim3 grid(NX / nThreads, NE, 1); // threads in block dim3 threads(nThreads, 1, 1); gpu_derivative << < grid, threads >> > (kx, ky, source); getLastCudaError("Gpu derivative error"); } __global__ void gpu_derivative(double *kx, double *ky, hipfftDoubleComplex *source) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; double I = kx[x]; double J = ky[y]; double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I; if (y == 0 && x == 0) mu = 1.0; source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu; source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu; } __host__ void extract(double *fi, hipfftDoubleComplex *fi_ext) { // blocks in grid dim3 grid(NX / nThreads, NY, 1); // threads in block dim3 threads(nThreads, 1, 1); odd_extract << < grid, threads >> > (fi, fi_ext); getLastCudaError("Odd Extension error"); } __global__ void odd_extract(double *phi, hipfftDoubleComplex *phi_ext) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; if (y == 0) { phi[gpu_scalar_index(x, y)] = voltage; return; } if (y == NY-1) { phi[gpu_scalar_index(x, y)] = voltage2; return; } phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/SIZE; }
8501b97a1674593f51b8810526bdc1020ebc261b.cu
/* This code accompanies * Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics * https://doi.org/10.1016/j.jcp.2019.07.029 * Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids * https://doi.org/10.1103/PhysRevFluids.4.103701 * * Yifei Guan, Igor Novosselov * University of Washington * * Author: Yifei Guan * */ #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #include <stdlib.h> #define _USE_MATH_DEFINES #include <math.h> #include <cuda_runtime.h> #include <cuda.h> #include <cufft.h> #include "LBM.h" #include <device_functions.h> #define RAD 1 __global__ void gpu_poisson(double*, double*,double*); __global__ void gpu_efield(double*, double*, double*); __global__ void odd_extension(double*, cufftDoubleComplex*, double*); __global__ void gpu_derivative(double*, double*, cufftDoubleComplex*); __global__ void odd_extract(double*, cufftDoubleComplex*); __global__ void gpu_bc(double*); __device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y) { return (2*RAD + nThreads)*y + x; } __host__ void poisson_phi(double *charge_gpu, double *phi_gpu) { // blocks in grid dim3 grid(NX / nThreads, NY, 1); // threads in block dim3 threads(nThreads, 1, 1); unsigned int it = 0; double MAX_ITERATIONS = 1.0E6; double TOLERANCE = 1.0e-9; double *Res = (double*)malloc(mem_size_scalar); double error = 0.0; double *R; checkCudaErrors(cudaMalloc((void**)&R, mem_size_scalar)); for (it = 0; it < MAX_ITERATIONS; ++it) { error = 0.0; gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R); checkCudaErrors(cudaMemcpy(Res, R, mem_size_scalar, cudaMemcpyDeviceToHost)); for (unsigned int y = 0; y < NY; ++y) { for (unsigned int x = 0; x < NX; ++x) { //if (it % 1000 == 1) printf("%g\n", error); if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)]; } } if (error < TOLERANCE) break; } checkCudaErrors(cudaFree(R)); free(Res); //printf("%g\n", error); if (it == MAX_ITERATIONS) { printf("Poisson solver did not converge!\n"); printf("Residual = %g\n", error); system("pause"); //exit(-1); } getLastCudaError("Poisson solver kernel error"); } __global__ void gpu_poisson(double *c, double *fi,double *R){ unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int s_y = threadIdx.y + RAD; unsigned int s_x = threadIdx.x + RAD; unsigned int xp1 = (x + blockDim.x) % NX; unsigned int yp1 = (y + blockDim.y) % NY; unsigned int xm1 = (NX + x - 1) % NX; unsigned int ym1 = (NY + y - 1) % NY; __shared__ double s_in[(2*RAD + nThreads)*3]; // load to shared memory (regular cells) s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)]; // load halo cells if (threadIdx.x < RAD) { s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)]; s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)]; } if (threadIdx.y < RAD) { s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)]; s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)]; } // Boundary conditions if (y == 0) { fi[gpu_scalar_index(x, y)] = voltage; return; } if (y == NY - 1) { fi[gpu_scalar_index(x, y)] = voltage2; return; } __syncthreads(); double charge = c[gpu_scalar_index(x, y)]; //double phi = fi[gpu_scalar_index(x, y)]; //double phiL = fi[gpu_scalar_index(xm1, y)]; //double phiR = fi[gpu_scalar_index(xp1, y)]; //double phiU = fi[gpu_scalar_index(x, yp1)]; //double phiD = fi[gpu_scalar_index(x, ym1)]; double phi = s_in[gpu_s_scalar_index(s_x, s_y)]; double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)]; double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)]; double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)]; double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)]; double source = (charge / eps) * dx *dx; // Right hand side of the equation double phi_old = phi; phi = 0.25 * (phiL + phiR + phiU + phiD + source); // Record the error R[gpu_scalar_index(x, y)] = fabs(phi - phi_old); //__syncthreads(); fi[gpu_scalar_index(x, y)] = phi; //if (x == 5 && y == 5) printf("%g\n", phi); } __host__ void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) { // blocks in grid dim3 grid(NX / nThreads, NY, 1); // threads in block dim3 threads(nThreads, 1, 1); gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu); gpu_bc << <grid, threads >> > (Ey_gpu); getLastCudaError("Efield kernel error"); } __global__ void gpu_efield(double *fi, double *ex, double *ey){ unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int xp1 = (x + 1) % NX; unsigned int yp1 = (y + 1) % NY; unsigned int xm1 = (NX + x - 1) % NX; unsigned int ym1 = (NY + y - 1) % NY; double phi = fi[gpu_scalar_index(x, y)]; double phiL = fi[gpu_scalar_index(xm1, y)]; double phiR = fi[gpu_scalar_index(xp1, y)]; double phiU = fi[gpu_scalar_index(x, yp1)]; double phiD = fi[gpu_scalar_index(x, ym1)]; ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx; ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy; } __global__ void gpu_bc(double *ey) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; if (y == 0) { //ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)]; ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)]; return; } if (y == NY - 1) { //ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)]; ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)]; return; } } // ========================================================================= // Fast poisson solver domain extension // ========================================================================= __host__ void fast_Poisson(double *charge_gpu, double *T_gpu, double *kx, double *ky, cufftHandle plan) { checkCudaErrors(cudaMalloc((void**)&freq_gpu_ext, sizeof(cufftDoubleComplex)*NX*NE)); checkCudaErrors(cudaMalloc((void**)&phi_gpu_ext, sizeof(cufftDoubleComplex)*NX*NE)); checkCudaErrors(cudaMalloc((void**)&charge_gpu_ext, sizeof(cufftDoubleComplex)*NX*NE)); checkCudaErrors(cudaMalloc((void**)&T_gpu_ext, sizeof(cufftDoubleComplex)*NX*NE)); // Extend the domain extension(charge_gpu, charge_gpu_ext, T_gpu); // Execute a real-to-complex 2D FFT CHECK_CUFFT(cufftExecZ2Z(plan, charge_gpu_ext, freq_gpu_ext, CUFFT_FORWARD)); // Execute the derivatives in frequency domain derivative(kx, ky, freq_gpu_ext); // Execute a complex-to-complex 2D IFFT CHECK_CUFFT(cufftExecZ2Z(plan, freq_gpu_ext, phi_gpu_ext, CUFFT_INVERSE)); // Extraction of phi from extended domain phi_gpu_ext extract(phi_gpu, phi_gpu_ext); // Calculate electric field strength efield(phi_gpu, Ex_gpu, Ey_gpu); checkCudaErrors(cudaFree(charge_gpu_ext)); checkCudaErrors(cudaFree(phi_gpu_ext)); checkCudaErrors(cudaFree(freq_gpu_ext)); checkCudaErrors(cudaFree(T_gpu_ext)); } __host__ void extension(double *c, cufftDoubleComplex *c_ext, double *T) { // blocks in grid dim3 grid(NX / nThreads, NE, 1); // threads in block dim3 threads(nThreads, 1, 1); odd_extension << < grid, threads >> > (c, c_ext, T); getLastCudaError("Odd Extension error"); } __global__ void odd_extension(double *charge, cufftDoubleComplex *charge_ext, double *T) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; if (y == 0) { charge_ext[gpu_scalar_index(x, y)].x = 0.0; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == 1) { charge_ext[gpu_scalar_index(x, y)].x = -convertCtoCharge*(charge[gpu_scalar_index(x, y)] - T[gpu_scalar_index(x, y)]) / eps - voltage / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y > 1 && y < NY - 2) { charge_ext[gpu_scalar_index(x, y)].x = -convertCtoCharge*(charge[gpu_scalar_index(x, y)] - T[gpu_scalar_index(x, y)]) / eps; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NY - 2) { charge_ext[gpu_scalar_index(x, y)].x = -convertCtoCharge*(charge[gpu_scalar_index(x, y)] - T[gpu_scalar_index(x, y)]) / eps - voltage2 / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NY - 1) { charge_ext[gpu_scalar_index(x, y)].x = 0.0; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NY) { charge_ext[gpu_scalar_index(x, y)].x = convertCtoCharge*(charge[gpu_scalar_index(x, NE - y)] - T[gpu_scalar_index(x, NE - y)]) / eps + voltage2 / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y > NY && y<NE-1) { charge_ext[gpu_scalar_index(x, y)].x = convertCtoCharge*(charge[gpu_scalar_index(x, NE - y)] - T[gpu_scalar_index(x, NE - y)]) / eps; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } if (y == NE - 1) { charge_ext[gpu_scalar_index(x, y)].x = convertCtoCharge*(charge[gpu_scalar_index(x, 1)] - T[gpu_scalar_index(x, 1)]) / eps + voltage / dy / dy; charge_ext[gpu_scalar_index(x, y)].y = 0.0; return; } } __host__ void derivative(double *kx, double *ky, cufftDoubleComplex *source) { // blocks in grid dim3 grid(NX / nThreads, NE, 1); // threads in block dim3 threads(nThreads, 1, 1); gpu_derivative << < grid, threads >> > (kx, ky, source); getLastCudaError("Gpu derivative error"); } __global__ void gpu_derivative(double *kx, double *ky, cufftDoubleComplex *source) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; double I = kx[x]; double J = ky[y]; double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I; if (y == 0 && x == 0) mu = 1.0; source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu; source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu; } __host__ void extract(double *fi, cufftDoubleComplex *fi_ext) { // blocks in grid dim3 grid(NX / nThreads, NY, 1); // threads in block dim3 threads(nThreads, 1, 1); odd_extract << < grid, threads >> > (fi, fi_ext); getLastCudaError("Odd Extension error"); } __global__ void odd_extract(double *phi, cufftDoubleComplex *phi_ext) { unsigned int y = blockIdx.y; unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; if (y == 0) { phi[gpu_scalar_index(x, y)] = voltage; return; } if (y == NY-1) { phi[gpu_scalar_index(x, y)] = voltage2; return; } phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/SIZE; }
2f2a472edb117a6ec5fa1146b135d13d9c3274f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> //iostream> #include <assert.h> #include <nvmatrix_kernels.cuh> #include <nvmatrix.cuh> #include <conv_util.cuh> #include <iostream> using namespace std; __device__ float square(const float a) { return a*a; } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target.getNumCols() : images.getNumCols(); int imgPixels = imgSize * imgSize; assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); assert(target.isContiguous()); assert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { assert(target.getNumRows() == numChannels * outputs); } else { assert(images.getNumRows() == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { images.resize(numChannels * imgPixels, numImages); images.apply(NVMatrixOps::Zero()); } else { target.resize(numChannels*outputs, numImages); } } else { if (reverse) { assert(images.getNumRows() == numChannels * outputs); assert(images.getNumCols() == numImages); } else { assert(target.getNumRows() == numChannels * outputs); assert(target.getNumCols() == numImages); } } bool checkCaseBounds = numImages % 128 != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (chansPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kBedOfNails<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images.getNumCols(); int radius = filter.getNumCols() / 2; int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); assert(imgPixels == imgSize * imgSize); assert(radius >= 1 && radius <= 4); assert(imgSize >= 2 * radius + 1); assert(filter.getNumRows() == 1); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); if (scaleTargets == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 1>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 2>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 3>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { hipFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kGaussianBlur<4, 32, 4>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i] / (subsX * subsX); } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i] / (subsX * subsX); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images.getNumCols(); int numFilters = maxGrads.getNumRows() / outputs; int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads.getNumRows() == numFilters * outputs); assert(maxGrads.getNumCols() == numImages); assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(images); int checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndoCu(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images->size[0]; int numFilters = maxGrads->size[1] / outputs; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads->size[1] == numFilters * outputs); assert(maxGrads->size[0] == numImages); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); */ assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); //target.resize(images); int checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads.getNumCols(); int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads.getNumRows() / outputs; assert(avgGrads.getNumRows() == numFilters * outputs); assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(numFilters * imgPixels, numImages); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 4)) * imgSize); int checkCaseBounds = numImages % 128 != 0; if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndoCu(cudamat* avgGrads, cudamat* target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads->size[0]; int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads->size[1] / outputs; cout << numImages << " " << outputs << " " << imgPixels << " " << numFilters << endl; assert(avgGrads->size[1] == numFilters * outputs); /* assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); */ assert(numFilters % 16 == 0); assert(numImages % 128 == 0); assert(strideX <= subsX); //target.resize(numFilters * imgPixels, numImages); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 4)) * imgSize); int checkCaseBounds = numImages % 128 != 0; if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalAvgUndo<4, 32, 4, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); target.resize(images); denoms.resize(images); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, hipFuncCachePreferL1); // L1 faster here hipLaunchKernelGGL(( kCNorm2<8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 1, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 3) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 3, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 4, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 5) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 5, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 6) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 6, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 7) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 7, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 8) { if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_fewfilter<1, 8, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { hipFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kCNorm_manyfilter<4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } } cutilCheckMsg("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { bool checkCaseBounds = numImages % 128 != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*2) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } cutilCheckMsg("kRNormUndo: kernel execution failed"); }
2f2a472edb117a6ec5fa1146b135d13d9c3274f0.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> //iostream> #include <assert.h> #include <nvmatrix_kernels.cuh> #include <nvmatrix.cuh> #include <conv_util.cuh> #include <iostream> using namespace std; __device__ float square(const float a) { return a*a; } /* * Block size B_YxB_X. * B_X*imgsPerThread*blockIdx.x + threadIdx.x determines img idx * B_Y*blockIdx.y + threadIdx.y determines img row (col if !horiz), channel idx * * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) * * target can be the same matrix as imgs. * radius must be one of 3, 5, 7, 9. * * Tried imgsPerThread, slower. */ template<int B_Y, int B_X, int radius> __global__ void kGaussianBlur(float* imgs, float* filter, float* target, const int imgSize, const int numImages, const int imgStride, const bool horiz, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilter[radius]; const int imgPixels = imgSize * imgSize; const int ty = B_Y * blockIdx.y + threadIdx.y; const int channelIdx = ty / imgSize; const int rowIdx = ty % imgSize; const int imgIdx = B_X*blockIdx.x + threadIdx.x; const int filterWidth = 2*radius+1; // const int tidx = B_Y * threadIdx.y + threadIdx.x; if (horiz) { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgSize * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * imgSize * numImages + imgIdx; } else { imgs += channelIdx * imgPixels * imgStride + rowIdx * imgStride + imgIdx; target += channelIdx * imgPixels * numImages + rowIdx * numImages + imgIdx; } float outputs[filterWidth-1]; #pragma unroll for (int r = 0; r < filterWidth-1; r++) { outputs[r] = 0; } if (threadIdx.x < filterWidth-1) { shFilter[threadIdx.x] = filter[threadIdx.x]; } __syncthreads(); if (imgIdx < numImages) { // This writes radius*2 = filterWidth - 1 values to outputs #pragma unroll for (int col = 0; col < radius; col++) { float px = imgs[0]; #pragma unroll for (int r = 0; r < radius + 1 + col; r++) { outputs[r] += px * shFilter[radius + col - r]; } imgs += horiz ? imgStride : imgStride * imgSize; } // Unfortunately this has to be at this level of granularity if (scaleTargets != 0) { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleTargets * target[0] + scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { float* t = &target[0]; t[0] = scaleTargets * t[0] + scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } else { for (int col = radius; col < imgSize ; col++) { // loop over img columns float px = imgs[0]; target[0] = scaleOutputs * (outputs[0] + px * shFilter[0]); #pragma unroll for (int r = 1; r < radius*2; r++) { outputs[r-1] = outputs[r] + px * shFilter[r]; } outputs[filterWidth - 2] = px * shFilter[0]; imgs += horiz ? imgStride : imgStride * imgSize; target += horiz ? numImages : numImages * imgSize; } #pragma unroll for (int r = 0; r < radius; r++) { target[0] = scaleOutputs * outputs[r]; target += horiz ? numImages : numImages * imgSize; } } } } /* * Block size B_YxB_X * blockIdx.x determines output.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines output.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, numOutputs, numImages) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int chansPerThread, bool checkCaseBounds> __global__ void kBedOfNails(float* imgs, float* target, const int imgSize, const int numChannels, const int numImages, const int startX, const int strideX, const int outputsX, const bool reverse, const float scaleTargets, const float scaleOutput) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numChanBlocks = DIVUP(numChannels, B_Y*chansPerThread); const int outputIdxX = blockIdx.x / numImgBlocks; const int outputIdxY = blockIdx.y / numChanBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockChanIdx = (blockIdx.y % numChanBlocks) * B_Y * chansPerThread; const int myChanIdx = (blockChanIdx + threadIdx.y*chansPerThread); if (myChanIdx >= numChannels) { return; } // if (blockIdx.x != 0 || blockIdx.y != 0) { // return; // } const int outputIdx = outputIdxY * outputsX + outputIdxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startImgPxX = startX + outputIdxX * strideX; const int startImgPxY = startX + outputIdxY * strideX; const int imgIdx = blockImgIdx + threadIdx.x; const int imgPx = startImgPxY * imgSize + startImgPxX; imgs += myChanIdx * imgPixels * numImages + imgPx * numImages + imgIdx; target += (myChanIdx * numOutputs + outputIdx) * numImages + imgIdx; if (scaleTargets != 0) { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleTargets * target[c * numOutputs * numImages + i * B_X] + scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleTargets * imgs[c * imgPixels * numImages + i * B_X] + scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } else { if (!reverse) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { target[c * numOutputs * numImages + i * B_X] = scaleOutput * imgs[c * imgPixels * numImages + i * B_X]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < chansPerThread; c++) { imgs[c * imgPixels * numImages + i * B_X] = scaleOutput * target[c * numOutputs * numImages + i * B_X]; } } } } } } /* * imgs: (numChannels, imgPixels, numImages) * target: (numChannels, outputs, numImages) */ void _convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, bool reverse, float scaleTargets, float scaleOutput) { int numImages = reverse ? target.getNumCols() : images.getNumCols(); int imgPixels = imgSize * imgSize; assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); assert(target.isContiguous()); assert(strideX > 1); int outputsX = DIVUP(imgSize, strideX); int outputs = outputsX * outputsX; if (reverse) { assert(target.getNumRows() == numChannels * outputs); } else { assert(images.getNumRows() == numChannels * imgPixels); } if (scaleTargets == 0) { if (reverse) { images.resize(numChannels * imgPixels, numImages); images.apply(NVMatrixOps::Zero()); } else { target.resize(numChannels*outputs, numImages); } } else { if (reverse) { assert(images.getNumRows() == numChannels * outputs); assert(images.getNumCols() == numImages); } else { assert(target.getNumRows() == numChannels * outputs); assert(target.getNumCols() == numImages); } } bool checkCaseBounds = numImages % 128 != 0; int chansPerThread = numChannels % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * outputsX, DIVUP(numChannels, 4 * chansPerThread) * outputsX); if (chansPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 1, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 1, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kBedOfNails<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kBedOfNails<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), target.getDevData(), imgSize, numChannels, numImages, startX, strideX, outputsX, reverse, scaleTargets, scaleOutput); } } } void convBedOfNails(NVMatrix& images, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(images, target, numChannels, imgSize, startX, strideX, false, scaleTargets, scaleOutput); } void convBedOfNailsUndo(NVMatrix& actsGrad, NVMatrix& target, int numChannels, int imgSize, int startX, int strideX, float scaleTargets, float scaleOutput) { _convBedOfNails(target, actsGrad, numChannels, imgSize, startX, strideX, true, scaleTargets, scaleOutput); } /* * imgs: (numChannels, imgPixels, numImages) with given imgStride * filter: (1, 2*radius + 1) * target: (numChannels, imgPixels, numImages) */ void convGaussianBlur(NVMatrix& images, NVMatrix& filter, NVMatrix& target, bool horiz, int numChannels, float scaleTargets, float scaleOutputs) { int numImages = images.getNumCols(); int radius = filter.getNumCols() / 2; int imgPixels = images.getNumRows() / numChannels; int imgSize = int(sqrt(imgPixels)); assert(imgPixels == imgSize * imgSize); assert(radius >= 1 && radius <= 4); assert(imgSize >= 2 * radius + 1); assert(filter.getNumRows() == 1); assert(images.getNumRows() == numChannels * imgPixels); assert(!images.isTrans()); assert(!filter.isTrans()); assert(!target.isTrans()); assert(target.isContiguous()); if (scaleTargets == 0) { target.resize(images); } else { assert(target.isSameDims(images)); } dim3 threads(32, 4); dim3 blocks(DIVUP(numImages, threads.x), DIVUP(numChannels*imgSize, threads.y)); if (radius == 1) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 1>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 1><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 2) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 2>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 2><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 3) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 3>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 3><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } else if (radius == 4) { cudaFuncSetCacheConfig(kGaussianBlur<4, 32, 4>, cudaFuncCachePreferL1); kGaussianBlur<4, 32, 4><<<blocks, threads>>>(images.getDevData(), filter.getDevData(), target.getDevData(), imgSize, numImages, images.getStride(), horiz, scaleTargets, scaleOutputs); } } /* * Block size 1x128 * blockIdx.x determines pixel.x, image idx in batches of 128*imgsPerThread * blockIdx.y determines pixel.y * * So each block does one output for some number of images and all the fliters. * * threadIdx.x determines img idx * * imgs: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int imgsPerThread, int numFilters, bool checkCaseBounds> __global__ void kCNorm_fewfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, 128*imgsPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y; const int blockImgIdx = (blockIdx.x % numImgBlocks) * 128 * imgsPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += pxIdx * numImages + imgIdx; denoms += pxIdx * numImages + imgIdx; meanDiffs += imgIdx; target += pxIdx * numImages + imgIdx; float prod[numFilters][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] += square(meanDiffs[(f * imgPixels + imgPx) * numImages + i * 128]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * 128 < numImages) { #pragma unroll for (int f = 0; f < numFilters; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * 128] = prod[f][i]; target[f * imgPixels * numImages + i * 128] = imgs[f * imgPixels * numImages + i * 128] * __powf(prod[f][i], -powScale); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm_manyfilter(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int pxIdxX = blockIdx.x / numImgBlocks; const int pxIdxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int pxIdx = pxIdxY * imgSize + pxIdxX; const int startPxX = -sizeX/2 + pxIdxX; const int startPxY = -sizeX/2 + pxIdxY; const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + threadIdx.y) * imgPixels * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + pxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } const int loopStartY = MAX(0, startPxY); const int loopStartX = MAX(0, startPxX); const int loopEndY = MIN(imgSize, startPxY + sizeX); const int loopEndX = MIN(imgSize, startPxX + sizeX); for (int y = loopStartY; y < loopEndY; y++) { for (int x = loopStartX; x < loopEndX; x++) { const int imgPx = y * imgSize + x; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(meanDiffs[(f * B_Y * imgPixels + imgPx) * numImages + i * B_X]); } } } } } #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * B_Y * imgPixels * numImages + i * B_X] = imgs[f * B_Y * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region of pixels for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * imgs: (numFilters, imgPixels, numImages) * means: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool checkCaseBounds> __global__ void kCNorm2(float* imgs, float* meanDiffs, float* denoms, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float addScale, const float powScale) { __shared__ float shDiffs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -sizeX/2 + blockPxX); const int startPxY = MAX(0, -sizeX/2 + blockPxY); const int endPxX = MIN(imgSize, blockPxX + DIVUP(sizeX, 2) + 3); const int endPxY = MIN(imgSize, blockPxY + DIVUP(sizeX, 2) + 3); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -sizeX/2 + myPxY; const int myStartPxX = -sizeX/2 + myPxX; const int myEndPxY = myPxY + DIVUP(sizeX, 2); const int myEndPxX = myPxX + DIVUP(sizeX, 2); const int imgIdx = blockImgIdx + threadIdx.x; imgs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; meanDiffs += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 0; } } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shDiffs[ly + loadY][lx + loadX] = meanDiffs[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += square(shDiffs[f][threadIdx.x + i * B_X]); } } } } __syncthreads(); } } // imgs -= (loadY * imgPixels - myPxIdx) * numImages + loadX; // imgs += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] = 1 + addScale * prod[f][i]; denoms[f * imgPixels * numImages + i * B_X] = prod[f][i]; target[f * imgPixels * numImages + i * B_X] = imgs[f * imgPixels * numImages + i * B_X] * __powf(prod[f][i], -powScale); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalAvgUndo(float* avgGrads, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; avgGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += avgGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i] / (subsX * subsX); } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i] / (subsX * subsX); } } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kLocalMaxUndo(float* imgs, float* maxGrads, float* maxActs, float* target, const int imgSize, const int numFilters, const int numImages, const int subsX, const int startX, const int strideX, const int outputsX, const float scaleTargets, const float scaleOutputs) { __shared__ float shImgs[B_Y*filtersPerThread][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / (numFilters/(B_Y*filtersPerThread)); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % (numFilters/(B_Y*filtersPerThread))) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int numOutputs = outputsX * outputsX; const int imgPixels = imgSize * imgSize; const int startOutputY = blockPxY - startX < subsX ? 0 : 1 + (blockPxY - startX - subsX) / strideX; const int endOutputY = MIN(outputsX, 1 + (blockPxY - startX) / strideX); const int startOutputX = blockPxX - startX < subsX ? 0 : 1 + (blockPxX - startX - subsX) / strideX; const int endOutputX = MIN(outputsX, 1 + (blockPxX - startX) / strideX); const int imgIdx = blockImgIdx + threadIdx.x; imgs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; maxGrads += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; maxActs += ((blockFilterIdx + threadIdx.y) * numOutputs) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } if (blockPxX >= startX && blockPxX < startX + strideX * (outputsX-1) + subsX && blockPxY >= startX && blockPxY < startX + strideX * (outputsX-1) + subsX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i] = imgs[f * B_Y * imgPixels * numImages + i * B_X]; } } } for (int my = startOutputY; my < endOutputY; my++) { for (int mx = startOutputX; mx < endOutputX; mx++) { const int outputIdx = my * outputsX + mx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float ma = maxActs[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float mg = maxGrads[(f * B_Y * numOutputs + outputIdx) * numImages + i * B_X]; const float img = shImgs[threadIdx.y + B_Y * f][threadIdx.x + B_X * i]; prod[f][i] += (img == ma) * mg; } } } } } } if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * acts := -2 x scale x acts x outGrads / denoms */ template<int B_X, int eltsPerThread> __global__ void kRNormUndoPrelims(float* acts, float* denoms, float* outGrads, const uint numElements, const float scale) { const uint e = B_X * blockIdx.x * eltsPerThread + threadIdx.x; const uint numThreads = B_X * gridDim.x; for (uint i = e; i < numElements; i += numThreads*eltsPerThread) { #pragma unroll for (uint k = 0; k < eltsPerThread; k++) { if (i + k * B_X < numElements) { acts[i + k * B_X] = __fdividef(scale*outGrads[i + k * B_X] * acts[i + k * B_X], denoms[i + k * B_X]); } } } } /* * Block size B_YxB_X * blockIdx.x determines pixel.x, image idx in batches of B_X*imgsPerThread * blockIdx.y determines pixel.y, filter idx in batches of B_Y*filtersPerThread * * So each block does one output pixel for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines filter idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * numImages must be divisible by B_X*imgsPerThread * numFilters must be divisible by B_Y*filtersPerThread * * TODO: this isn't really ideal */ template<int B_Y, int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int numFilterBlocks = numFilters/(B_Y*filtersPerThread); const int blockPxX = blockIdx.x / numImgBlocks; const int blockPxY = blockIdx.y / numFilterBlocks; const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * B_Y * filtersPerThread; const int blockPx = blockPxY * imgSize + blockPxX; const int imgPixels = imgSize * imgSize; const int startY = MAX(0, blockPxY + sizeX/2 - sizeX + 1); const int startX = MAX(0, blockPxX + sizeX/2 - sizeX + 1); const int endY = MIN(imgSize, blockPxY + sizeX/2 + 1); const int endX = MIN(imgSize, blockPxX + sizeX/2 + 1); const int imgIdx = blockImgIdx + threadIdx.x; acts += ((blockFilterIdx + threadIdx.y) * imgPixels) * numImages + imgIdx; inputs += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; denoms += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; outGrads += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; target += ((blockFilterIdx + threadIdx.y) * imgPixels + blockPx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int sy = startY; sy < endY; sy++) { for (int sx = startX; sx < endX; sx++) { const int outPx = sy * imgSize + sx; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += acts[(f * B_Y * imgPixels + outPx) * numImages + i * B_X]; } } } } } // outGrads += blockPx * numImages; if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float inp = inputs[(f * B_Y * imgPixels) * numImages + i * B_X]; const float out = outGrads[(f * B_Y * imgPixels) * numImages + i * B_X]; const float den = denoms[(f * B_Y * imgPixels) * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * target[f * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } /* * Block size 16xB_X * blockIdx.x determines 4x4 pixel.x region, image idx in batches of B_X*imgsPerThread * blockIdx.y determines 4x4 pixel.y region, filter idx in batches of filtersPerThread * * So each block does 4x4 region for some number of images/filters. * * threadIdx.x determines img idx * threadIdx.y determines pixel idx * * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * B_X one of 8, 16, 32 * imgsPerThread one of 1, 2, 4, 8, 16 * * B_XximgsPerThread MUST be divisible by 32. * Number of filters MUST be divisible by filtersPerThread. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false * numFilters must be divisible by filtersPerThread * * Final write-out will not be fully coalesced unless B_X is 32. But there's a lot more * reading than writing here, and the reading is all coalesced, so it should be OK. */ template<int B_X, int imgsPerThread, int filtersPerThread, bool add, bool checkCaseBounds> __global__ void kRNormUndo2(float* outGrads, float* denoms, float* inputs, float* acts, float* target, const int imgSize, const int numFilters, const int numImages, const int sizeX, const float powScale, const float scaleTargets, const float scaleOutputs) { __shared__ float shActs[filtersPerThread][B_X*imgsPerThread]; const int imgPixels = imgSize * imgSize; const int numImgBlocks = DIVUP(numImages, B_X*imgsPerThread); const int numFilterBlocks = numFilters/(filtersPerThread); const int blockPxX = 4*(blockIdx.x / numImgBlocks); const int blockPxY = 4*(blockIdx.y / numFilterBlocks); const int blockImgIdx = (blockIdx.x % numImgBlocks) * B_X * imgsPerThread; const int blockFilterIdx = (blockIdx.y % numFilterBlocks) * filtersPerThread; const int tidx = threadIdx.y * B_X + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; const int startPxX = MAX(0, -DIVUP(sizeX,2) + blockPxX + 1); const int startPxY = MAX(0, -DIVUP(sizeX,2) + blockPxY + 1); const int endPxX = MIN(imgSize, blockPxX + sizeX/2 + 4); const int endPxY = MIN(imgSize, blockPxY + sizeX/2 + 4); const int myPxX = blockPxX + threadIdx.y % 4; const int myPxY = blockPxY + threadIdx.y / 4; const int myPxIdx = myPxY * imgSize + myPxX; // const bool doWork = myPxX < imgSize && myPxY < imgSize; const int myStartPxY = -DIVUP(sizeX,2) + myPxY + 1; const int myStartPxX = -DIVUP(sizeX,2) + myPxX + 1; const int myEndPxY = myPxY + sizeX/2 + 1; const int myEndPxX = myPxX + sizeX/2 + 1; const int imgIdx = blockImgIdx + threadIdx.x; acts += (blockFilterIdx + loadY) * imgPixels * numImages + blockImgIdx + loadX; denoms += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; inputs += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; outGrads += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; target += (blockFilterIdx * imgPixels + myPxIdx) * numImages + imgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for (int f = 0; f < filtersPerThread; f++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[f][i] = 0; } } for (int y = startPxY; y < endPxY; y++) { const bool isInY = y >= myStartPxY && y < myEndPxY; for (int x = startPxX; x < endPxX; x++) { const int px = y * imgSize + x; // All the threads load a pixel from memory #pragma unroll for (int ly = 0; ly < filtersPerThread; ly += B_X/2) { if (filtersPerThread % (B_X/2) == 0 || ly + loadY < filtersPerThread) { #pragma unroll for (int lx = 0; lx < B_X*imgsPerThread; lx += 32) { if (!checkCaseBounds || lx + loadX + blockImgIdx < numImages) { shActs[ly + loadY][lx + loadX] = acts[(ly * imgPixels + px) * numImages + lx]; } } } } __syncthreads(); // Each row of threads decides if it's interested in this pixel if (isInY && x >= myStartPxX && x < myEndPxX) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { prod[f][i] += shActs[f][threadIdx.x + i * B_X]; } } } } __syncthreads(); } } acts -= (loadY * imgPixels - myPxIdx) * numImages + loadX; acts += threadIdx.x; if (myPxX < imgSize && myPxY < imgSize) { if (!add) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = prod[f][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || imgIdx + i * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { const float out = outGrads[f * imgPixels * numImages + i * B_X]; const float den = denoms[f * imgPixels * numImages + i * B_X]; const float inp = inputs[f * imgPixels * numImages + i * B_X]; prod[f][i] = inp * prod[f][i] + out * __powf(den, -powScale); target[f * imgPixels * numImages + i * B_X] = scaleTargets * target[f * imgPixels * numImages + i * B_X] + scaleOutputs * prod[f][i]; } } } } } } void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX) { convLocalMaxUndo(images, maxGrads, maxActs, target, subsX, startX, strideX, outputsX, 0, 1); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(NVMatrix& images, NVMatrix& maxGrads, NVMatrix& maxActs, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images.getNumCols(); int numFilters = maxGrads.getNumRows() / outputs; int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads.getNumRows() == numFilters * outputs); assert(maxGrads.getNumCols() == numImages); assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(images); int checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads>>>(images.getDevData(), maxGrads.getDevData(), maxActs.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndoCu(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images->size[0]; int numFilters = maxGrads->size[1] / outputs; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads->size[1] == numFilters * outputs); assert(maxGrads->size[0] == numImages); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); */ assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); //target.resize(images); int checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalMaxUndo: kernel execution failed"); } void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize) { convLocalAvgUndo(avgGrads, target, subsX, startX, strideX, outputsX, imgSize, 0, 1); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndo(NVMatrix& avgGrads, NVMatrix& target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads.getNumCols(); int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads.getNumRows() / outputs; assert(avgGrads.getNumRows() == numFilters * outputs); assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); target.resize(numFilters * imgPixels, numImages); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 4)) * imgSize); int checkCaseBounds = numImages % 128 != 0; if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads>>>(avgGrads.getDevData(), target.getDevData(), imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } /* * avgGrads: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalAvgUndoCu(cudamat* avgGrads, cudamat* target, int subsX, int startX, int strideX, int outputsX, int imgSize, float scaleTargets, float scaleOutput) { int numImages = avgGrads->size[0]; int outputs = outputsX * outputsX; int imgPixels = imgSize * imgSize; int numFilters = avgGrads->size[1] / outputs; cout << numImages << " " << outputs << " " << imgPixels << " " << numFilters << endl; assert(avgGrads->size[1] == numFilters * outputs); /* assert(!target.isTrans()); assert(!avgGrads.isTrans()); assert(avgGrads.isContiguous()); */ assert(numFilters % 16 == 0); assert(numImages % 128 == 0); assert(strideX <= subsX); //target.resize(numFilters * imgPixels, numImages); dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 4)) * imgSize); int checkCaseBounds = numImages % 128 != 0; if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, true><<<blocks, threads>>>(avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, true><<<blocks, threads>>>(avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalAvgUndo<4, 32, 4, 4, false, false><<<blocks, threads>>>(avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalAvgUndo<4, 32, 4, 4, true, false><<<blocks, threads>>>(avgGrads->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } cutilCheckMsg("convLocalAvgUndo: kernel execution failed"); } void convResponseNorm(NVMatrix& images, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } /* * images: (numFilters, imgPixels, numImages) * meanDiffs: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) (out) * target: (numFilters, imgPixels, numImages) (out) */ void convContrastNorm(NVMatrix& images, NVMatrix& meanDiffs, NVMatrix& denoms, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale) { int numImages = images.getNumCols(); int imgPixels = images.getNumRows() / numFilters; assert(images.getNumRows() == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(meanDiffs.isSameDims(images)); assert(!meanDiffs.isTrans()); assert(!images.isTrans()); assert(images.isContiguous()); assert(meanDiffs.isContiguous()); assert(numFilters % 16 == 0 || numFilters <= 8); target.resize(images); denoms.resize(images); if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, true>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm2<8, 8, 4, false>, cudaFuncCachePreferL1); // L1 faster here kCNorm2<8, 8, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } else { bool checkCaseBounds = numImages % 128 != 0; if (numFilters <= 8) { dim3 threads(128); dim3 blocks(DIVUP(numImages,128) * imgSize, imgSize); if (numFilters == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 1, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 1, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 2, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 3) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 3, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 3, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 4, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 4, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 5) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 5, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 5, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 6) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 6, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 6, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 7) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 7, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 7, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } else if (numFilters == 8) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, true>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_fewfilter<1, 8, false>, cudaFuncCachePreferL1); kCNorm_fewfilter<1, 8, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numImages, sizeX, addScale, powScale); } } } else { dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, true>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, true><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } else { cudaFuncSetCacheConfig(kCNorm_manyfilter<4, 32, 4, 2, false>, cudaFuncCachePreferL1); kCNorm_manyfilter<4, 32, 4, 2, false><<<blocks, threads>>>(images.getDevData(), meanDiffs.getDevData(), denoms.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, addScale, powScale); } } } cutilCheckMsg("convResponseNorm: kernel execution failed"); } void convContrastNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& meanDiffs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } /* * outGrads: (numFilters, imgPixels, numImages) * denoms: (numFilters, imgPixels, numImages) * inputs: (numFilters, imgPixels, numImages) * acts: (numFilters, imgPixels, numImages) * target: (numFilters, imgPixels, numImages) * * THIS WILL OVERWRITE THE ACTS MATRIX. */ void convResponseNormUndo(NVMatrix& outGrads, NVMatrix& denoms, NVMatrix& inputs, NVMatrix& acts, NVMatrix& target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads.getNumCols(); int imgPixels = outGrads.getNumRows() / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads.getNumRows() == numFilters * imgPixels); assert(denoms.isSameDims(outGrads)); assert(acts.isSameDims(denoms)); assert(!denoms.isTrans()); assert(!outGrads.isTrans()); assert(!acts.isTrans()); assert(!target.isTrans()); assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); target.resize(outGrads); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads.getNumElements(),(threads.x * prelimEltsPerThread)))); kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts.getDevData(), denoms.getDevData(), outGrads.getDevData(), outGrads.getNumElements(), -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { bool checkCaseBounds = numImages % 128 != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*2) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(outGrads.getDevData(), denoms.getDevData(), inputs.getDevData(), acts.getDevData(), target.getDevData(), imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } cutilCheckMsg("kRNormUndo: kernel execution failed"); }
0a9861d9f4b04eaa66ec158c23a7ddf76f74aad2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "DifferentialEvolutionRunner.h" #include <cudaDefs.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <functional> #include <limits> #include <cmath> using namespace std; #pragma region Random hiprandState_t *rs = nullptr; //DEVICE DATA POINTER - Random number states constexpr unsigned int TPB = 256; constexpr unsigned int MBPTB = 4; float *dRandomFloats = nullptr; __global__ void initRandomStates(hiprandState_t *rs, const unsigned long seed) { unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x; hiprandState_t *r = rs + offset; //Thread data pointer offset hiprand_init(seed, offset, 0, r); } __global__ void initRandomFloats(hiprandState_t* __restrict__ rs, const unsigned int length, float* data) { unsigned int offset = threadIdx.x + blockIdx.x * TPB * MBPTB; unsigned int rsOffset = threadIdx.x + blockIdx.x * TPB; hiprandState_t *trs = &rs[rsOffset]; #pragma unroll MBPTB for (unsigned int i = 0; i < MBPTB; i++) { if (offset >= length) return; data[offset] = hiprand_uniform(trs); offset += TPB; } } void randomInit() { constexpr unsigned int length = 1 << 20; constexpr unsigned int sizeInBytes = length * sizeof(float); constexpr unsigned long seed = 42; KernelSetting ksRandom; ksRandom.blockSize = TPB; ksRandom.noChunks = MBPTB; ksRandom.dimBlock = dim3(TPB, 1, 1); ksRandom.dimGrid = getNumberOfParts(length, TPB * MBPTB); ksRandom.print(); //Random Sates checkCudaErrors(hipMalloc((void**)&rs, ksRandom.dimGrid.x * ksRandom.dimBlock.x * sizeof(hiprandState_t))); initRandomStates << <ksRandom.dimGrid, ksRandom.dimBlock >> > (rs, seed); //Init random numbers checkCudaErrors(hipMalloc((void**)&dRandomFloats, sizeInBytes)); initRandomFloats << <ksRandom.dimGrid, ksRandom.dimBlock >> > (rs, length, dRandomFloats); //constexpr size_t headLength = 100; //checkDeviceMatrix<float>(dRandomFloats, headLength * sizeof(float), 1, headLength, "%f ", "Device randomFloats (0 - 100)"); //checkDeviceMatrix<float>(dRandomFloats, sizeInBytes, 1, length, "%f ", "Device randomFloats"); } void randomCleanup() { SAFE_DELETE_CUDA(dRandomFloats); SAFE_DELETE_CUDA(rs); } #pragma endregion constexpr size_t closest_power_of_2(const size_t x) { size_t v = x; v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } namespace Project { // dimension of problem (number of parameters) constexpr size_t D = 1000; // x // Population size constexpr size_t NP = 1000; // y // differential weight, <0,2> constexpr float F = 0.5f; // crossover probability, <0,1> constexpr float CR = 0.2f; constexpr size_t Iterations = 30; // Parallel reduce constexpr size_t NP2 = closest_power_of_2(NP); typedef float Type; template<typename T> __device__ __host__ T FitnessFunc(T *x, size_t size, size_t offset = 0); template<typename T> __global__ void KernelRandomPopulation(T *population, float *randoms, size_t offset) { int tx = threadIdx.x; // D int ty = blockIdx.x; // NP if (!(tx < D && ty < NP)) return; int index = ty * D + tx; //printf("tx=%d, ty=%d, index=%d\n", tx, ty, index); population[index] = randoms[offset + index] * 10 - 5; //population[index] = index; } template<typename T> __global__ void KernelNextGeneration(T* __restrict__ dInputPopulation, T* dOutputPopulation, float* dRandoms, size_t offset) { int tx = threadIdx.x; // D int ty = blockIdx.x; // NP if (!(tx < D && ty < NP)) return; int index = ty * D + tx; // TODO: tx=0 calculates these values and sets them in shared memory // 3 random indexes - unique for each ty int i[3] = { (int)(dRandoms[offset + ty * 4 + 0] * NP), (int)(dRandoms[offset + ty * 4 + 1] * NP), (int)(dRandoms[offset + ty * 4 + 2] * NP) }; // TODO: make sure that array of 'i' is unique // guaranted copy parameter int j = (int)(dRandoms[offset + ty * 4 + 3] * NP); // random for all float r = dRandoms[offset + NP * D * 4 + index]; //printf("index=%d, x=%d, y=%d, i=[%d, %d, %d], j=%d, r=%f \n", index, tx, ty, i[0], i[1], i[2], j, r); if (tx == j || r < CR) { dOutputPopulation[index] = dInputPopulation[index]; } else { T a = dInputPopulation[ty * D + i[0]]; T b = dInputPopulation[ty * D + i[1]]; T c = dInputPopulation[ty * D + i[2]]; dOutputPopulation[index] = c + F * (a - b); } __shared__ T sOldFitnesses[NP]; __shared__ T sNewFitnesses[NP]; __syncthreads(); if (tx == 0) { sOldFitnesses[ty] = FitnessFunc(dInputPopulation, D, ty * D); sNewFitnesses[ty] = FitnessFunc(dOutputPopulation, D, ty * D); //printf("ty=%d, oldFitness=%f, newFitness=%f\n", ty, sOldFitnesses[ty], sNewFitnesses[ty]); } __syncthreads(); // if fitness is not better then keep original values if (sNewFitnesses[ty] > sOldFitnesses[ty]) { //if (tx == 0) printf("no better value at ty=%d, oldFitness=%f, newFitness=%f\n", ty, sOldFitnesses[ty], sNewFitnesses[ty]); //printf("rollback value at %d, index=%d, from=%f, to=%f\n", ty, index, dOutputPopulation[index], dInputPopulation[index]); dOutputPopulation[index] = dInputPopulation[index]; } else { //if (tx == 0 && sNewFitnesses[ty] < sOldFitnesses[ty]) // printf("better fitness found at ty=%d, oldFitness=%f, newFitness=%f\n", ty, sOldFitnesses[ty], sNewFitnesses[ty]); } } template<typename T> __global__ void KernelPrintFitnesses(T* __restrict__ dPopulation) { int tx = blockIdx.x * blockDim.x + threadIdx.x; if (tx >= NP) return; T value = FitnessFunc(dPopulation, D, tx * D); printf("tx=%d, fitness=%f\n", tx, value); } constexpr int int_max = numeric_limits<int>().max(); constexpr Type type_max = numeric_limits<Type>().max(); template<typename T> __global__ void KernelParallelReduce(T* __restrict__ dPopulation, size_t* __restrict__ dIndexOfBest) { __shared__ T sFitnesses[NP2]; __shared__ int sIndexes[NP2]; int tid = threadIdx.x; if (tid >= NP2) return; // global memory -> shared memory if (tid >= NP) { sIndexes[tid] = int_max; sFitnesses[tid] = type_max; } else { sIndexes[tid] = tid; sFitnesses[tid] = FitnessFunc(dPopulation, D, tid * D); } __syncthreads(); //printf("tid=%d, index=%d, fitness=%f\n", tid, sIndexes[tid], sFitnesses[tid]); if (NP2 >= 2048) // compile time { if (tid >= 1024) return; if (sFitnesses[tid] > sFitnesses[tid + 1024]) { sFitnesses[tid] = sFitnesses[tid + 1024]; sIndexes[tid] = sIndexes[tid + 1024]; } __syncthreads(); } if (NP2 >= 1024) // compile time { if (tid >= 512) return; if (sFitnesses[tid] > sFitnesses[tid + 512]) { sFitnesses[tid] = sFitnesses[tid + 512]; sIndexes[tid] = sIndexes[tid + 512]; } __syncthreads(); } if (NP2 >= 512) // compile time { if (tid >= 256) return; if (sFitnesses[tid] > sFitnesses[tid + 256]) { sFitnesses[tid] = sFitnesses[tid + 256]; sIndexes[tid] = sIndexes[tid + 256]; } __syncthreads(); } if (NP2 >= 256) // compile time { if (tid >= 128) return; if (sFitnesses[tid] > sFitnesses[tid + 128]) { sFitnesses[tid] = sFitnesses[tid + 128]; sIndexes[tid] = sIndexes[tid + 128]; } __syncthreads(); } if (NP2 >= 128) // compile time { if (tid >= 64) return; if (sFitnesses[tid] > sFitnesses[tid + 64]) { sFitnesses[tid] = sFitnesses[tid + 64]; sIndexes[tid] = sIndexes[tid + 64]; } __syncthreads(); } // tid < 32 if (NP2 >= 64) // compile time { if (tid >= 32) return; if (sFitnesses[tid] > sFitnesses[tid + 32]) { sFitnesses[tid] = sFitnesses[tid + 32]; sIndexes[tid] = sIndexes[tid + 32]; } } if (NP2 >= 32) // compile time { if (tid >= 16) return; if (sFitnesses[tid] > sFitnesses[tid + 16]) { sFitnesses[tid] = sFitnesses[tid + 16]; sIndexes[tid] = sIndexes[tid + 16]; } } if (NP2 >= 16) // compile time { if (tid >= 8) return; if (sFitnesses[tid] > sFitnesses[tid + 8]) { sFitnesses[tid] = sFitnesses[tid + 8]; sIndexes[tid] = sIndexes[tid + 8]; } } if (NP2 >= 8) // compile time { if (tid >= 4) return; if (sFitnesses[tid] > sFitnesses[tid + 4]) { sFitnesses[tid] = sFitnesses[tid + 4]; sIndexes[tid] = sIndexes[tid + 4]; } } if (NP2 >= 4) // compile time { if (tid >= 2) return; if (sFitnesses[tid] > sFitnesses[tid + 2]) { sFitnesses[tid] = sFitnesses[tid + 2]; sIndexes[tid] = sIndexes[tid + 2]; } } if (NP2 >= 2) // compile time { if (tid >= 1) return; if (sFitnesses[tid] > sFitnesses[tid + 1]) { sFitnesses[tid] = sFitnesses[tid + 1]; sIndexes[tid] = sIndexes[tid + 1]; } } //if (tid == 0) *dIndexOfBest = sIndexes[0]; printf("Best fitness index=%d, fitness=%f\n", sIndexes[0], sFitnesses[0]); //for (int i = 0; i < NP; i++) //printf("i=%d, index=%d, fitness=%f\n", i, sIndexes[i], sFitnesses[i]); } template<typename T> void PrintPopulationWithFitnesses(T* dPopulation) { T *population = nullptr; checkCudaErrors(hipHostMalloc((void**)&population, NP * D * sizeof(T), hipHostMallocWriteCombined)); checkCudaErrors(hipMemcpy(population, dPopulation, NP * D * sizeof(T), hipMemcpyKind::hipMemcpyDeviceToHost)); for (size_t i = 0; i < NP; i++) { printf("i=%u, params=[", i); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", population[i * D + d]); } T fitness = FitnessFunc(population, D, i * D); printf("] fitness=%f\n", fitness); } hipHostFree(population); } template<typename T> void PrintPopulationWithFitnesses(T* dNewPopulation, T* dOldPopulation) { T *newPopulation = nullptr; checkCudaErrors(hipHostMalloc((void**)&newPopulation, NP * D * sizeof(T), hipHostMallocWriteCombined)); checkCudaErrors(hipMemcpy(newPopulation, dNewPopulation, NP * D * sizeof(T), hipMemcpyKind::hipMemcpyDeviceToHost)); T *oldPopulation = nullptr; checkCudaErrors(hipHostMalloc((void**)&oldPopulation, NP * D * sizeof(T), hipHostMallocWriteCombined)); checkCudaErrors(hipMemcpy(oldPopulation, dOldPopulation, NP * D * sizeof(T), hipMemcpyKind::hipMemcpyDeviceToHost)); for (size_t i = 0; i < NP; i++) { printf("i=%u, params=[", i); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", newPopulation[i * D + d]); } T newFitness = FitnessFunc(newPopulation, D, i * D); T oldFitness = FitnessFunc(oldPopulation, D, i * D); printf("] newFitness=%f, oldFitness=%f\n", newFitness, oldFitness); } hipHostFree(newPopulation); hipHostFree(oldPopulation); } template<typename T> void PrintPopulationsWithFitnesses(T* dOldPopulation, T* dNewPopulation) { T *oldPopulation = nullptr; checkCudaErrors(hipHostMalloc((void**)&oldPopulation, NP * D * sizeof(T), hipHostMallocWriteCombined)); checkCudaErrors(hipMemcpy(oldPopulation, dOldPopulation, NP * D * sizeof(T), hipMemcpyKind::hipMemcpyDeviceToHost)); T *newPopulation = nullptr; checkCudaErrors(hipHostMalloc((void**)&newPopulation, NP * D * sizeof(T), hipHostMallocWriteCombined)); checkCudaErrors(hipMemcpy(newPopulation, dNewPopulation, NP * D * sizeof(T), hipMemcpyKind::hipMemcpyDeviceToHost)); for (size_t i = 0; i < NP; i++) { printf("i=%u\n", i); printf("old = ["); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", oldPopulation[i * D + d]); } T oldFitness = FitnessFunc(oldPopulation, D, i * D); printf("] oldFitness=%f\n", oldFitness); printf("new = ["); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", newPopulation[i * D + d]); } T newFitness = FitnessFunc(newPopulation, D, i * D); printf("] newFitness=%f\n", newFitness); } hipHostFree(newPopulation); hipHostFree(oldPopulation); } template<typename T> T* DifferentialEvolutionCalculate() { KernelSetting ksDE; ksDE.dimBlock = dim3(D); ksDE.blockSize = D; ksDE.dimGrid = dim3(NP); ksDE.print(); KernelSetting ksPrintFitnesses; constexpr size_t printBlockSize = 256; ksPrintFitnesses.dimBlock = dim3(printBlockSize); ksPrintFitnesses.blockSize = printBlockSize; ksPrintFitnesses.dimGrid = dim3(getNumberOfParts(NP, printBlockSize)); ksPrintFitnesses.print(); KernelSetting ksParallelReduce; ksParallelReduce.dimBlock = dim3(NP2); ksParallelReduce.blockSize = NP2; ksParallelReduce.dimGrid = dim3(); ksParallelReduce.print(); size_t randomFloatsOffset = 0; // allocate population matrices T *dPopulation = nullptr; // input population T *dPopulation2 = nullptr; // output population checkCudaErrors(hipMalloc((void**)&dPopulation, D * NP * sizeof(T))); checkCudaErrors(hipMalloc((void**)&dPopulation2, D * NP * sizeof(T))); // generate initial population checkDeviceMatrix(dPopulation, D * sizeof(T), 2, D, "%f ", "dPopulation - initial"); KernelRandomPopulation << <ksDE.dimGrid, ksDE.dimBlock >> > (dPopulation, dRandomFloats, randomFloatsOffset); checkError(); //checkDeviceMatrix(dPopulation, D * sizeof(T), NP, D, "%f ", "dPopulation - initial"); checkDeviceMatrix(dPopulation, D * sizeof(T), 2, D, "%f ", "dPopulation - initial"); //randomFloatsOffset += NP * D; randomFloatsOffset++; hipMemset(dPopulation2, 0, NP * D * sizeof(T)); //checkDeviceMatrix(dPopulation2, D * sizeof(T), NP, D, "%f ", "dPopulation2 - initial"); size_t *dIndexOfBest = nullptr; checkCudaErrors(hipMalloc((void**)&dIndexOfBest, sizeof(size_t))); size_t *hIndexOfBest = new size_t; //printf("initial fitnesses\n"); //KernelPrintFitnesses << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation); KernelParallelReduce << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation, dIndexOfBest); checkError(); checkCudaErrors(hipMemcpy(hIndexOfBest, dIndexOfBest, sizeof(size_t), hipMemcpyKind::hipMemcpyDeviceToHost)); //PrintPopulationWithFitnesses(dPopulation); for (size_t i = 0; i < Iterations; i++) { printf("ITERATION = %u\n", i); // Generate next generation KernelNextGeneration << <ksDE.dimGrid, ksDE.dimBlock >> > (dPopulation, dPopulation2, dRandomFloats, randomFloatsOffset); checkError(); KernelParallelReduce << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation2, dIndexOfBest); checkError(); checkCudaErrors(hipMemcpy(hIndexOfBest, dIndexOfBest, sizeof(size_t), hipMemcpyKind::hipMemcpyDeviceToHost)); printf("current best fitness is at %u\n", *hIndexOfBest); //PrintPopulationsWithFitnesses(dPopulation, dPopulation2); //KernelPrintFitnesses << <ksPrintFitnesses.dimGrid, ksPrintFitnesses.dimBlock >> > (dPopulation2); //checkDeviceMatrix(dPopulation, D * sizeof(T), NP, D, "%f ", "dPopulation"); //checkDeviceMatrix(dPopulation2, D * sizeof(T), NP, D, "%f ", "dPopulation2"); //randomFloatsOffset += NP * 3; // Each candidate (NP) has 3 random indexes (indexes of candidates for mutation) //randomFloatsOffset += NP; // One guaranteed random index for each candidate (NP) //randomFloatsOffset += NP * D; // For all candidates and its params (CR) randomFloatsOffset++; auto tmp = dPopulation; dPopulation = dPopulation2; dPopulation2 = tmp; } printf("final fitnesses\n"); //KernelPrintFitnesses << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation); //checkDeviceMatrix(dPopulation, D * sizeof(T), NP, D, "%f ", "dPopulation - final"); KernelParallelReduce << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation, dIndexOfBest); checkCudaErrors(hipMemcpy(hIndexOfBest, dIndexOfBest, sizeof(size_t), hipMemcpyKind::hipMemcpyDeviceToHost)); T *hx = new T[D]; T *dPopulationPtr = dPopulation + (*hIndexOfBest * D); checkCudaErrors(hipMemcpy(hx, dPopulationPtr, D * sizeof(T), hipMemcpyKind::hipMemcpyDeviceToHost)); SAFE_DELETE_CUDA(dPopulation); SAFE_DELETE_CUDA(dPopulation2); SAFE_DELETE_CUDA(dIndexOfBest); SAFE_DELETE(hIndexOfBest); return hx; } void run() { randomInit(); Type *result = DifferentialEvolutionCalculate<Type>(); printf("["); for (size_t i = 0; i < D; i++) { if (i != 0) printf(", "); printf("%f", result[i]); } printf("] = %f", FitnessFunc(result, D)); SAFE_DELETE_ARRAY(result); randomCleanup(); } template<typename T> inline __device__ __host__ T SphereFunction(T *x, size_t size, size_t offset) { T result = 0; T *ptr = x + offset; for (size_t i = 0; i < size; i++, ptr++) result += *ptr * *ptr; return result; } template<typename T> inline __device__ __host__ T RastriginFunction(T *x, size_t size, size_t offset) { constexpr T A = 10; constexpr double PI = 3.141592653589793238463; T result = A * size; T *ptr = x + offset; for (size_t i = 0; i < size; i++, ptr++) result += (*ptr * *ptr - A * cos(2 * PI * *ptr)); return result; } template<typename T> T FitnessFunc(T *x, size_t size, size_t offset) { //return SphereFunction(x, size, offset); return RastriginFunction(x, size, offset); } }
0a9861d9f4b04eaa66ec158c23a7ddf76f74aad2.cu
#pragma once #include "DifferentialEvolutionRunner.h" #include <cudaDefs.h> #include <curand.h> #include <curand_kernel.h> #include <functional> #include <limits> #include <cmath> using namespace std; #pragma region Random curandState *rs = nullptr; //DEVICE DATA POINTER - Random number states constexpr unsigned int TPB = 256; constexpr unsigned int MBPTB = 4; float *dRandomFloats = nullptr; __global__ void initRandomStates(curandState *rs, const unsigned long seed) { unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x; curandState *r = rs + offset; //Thread data pointer offset curand_init(seed, offset, 0, r); } __global__ void initRandomFloats(curandState* __restrict__ rs, const unsigned int length, float* data) { unsigned int offset = threadIdx.x + blockIdx.x * TPB * MBPTB; unsigned int rsOffset = threadIdx.x + blockIdx.x * TPB; curandState *trs = &rs[rsOffset]; #pragma unroll MBPTB for (unsigned int i = 0; i < MBPTB; i++) { if (offset >= length) return; data[offset] = curand_uniform(trs); offset += TPB; } } void randomInit() { constexpr unsigned int length = 1 << 20; constexpr unsigned int sizeInBytes = length * sizeof(float); constexpr unsigned long seed = 42; KernelSetting ksRandom; ksRandom.blockSize = TPB; ksRandom.noChunks = MBPTB; ksRandom.dimBlock = dim3(TPB, 1, 1); ksRandom.dimGrid = getNumberOfParts(length, TPB * MBPTB); ksRandom.print(); //Random Sates checkCudaErrors(cudaMalloc((void**)&rs, ksRandom.dimGrid.x * ksRandom.dimBlock.x * sizeof(curandState))); initRandomStates << <ksRandom.dimGrid, ksRandom.dimBlock >> > (rs, seed); //Init random numbers checkCudaErrors(cudaMalloc((void**)&dRandomFloats, sizeInBytes)); initRandomFloats << <ksRandom.dimGrid, ksRandom.dimBlock >> > (rs, length, dRandomFloats); //constexpr size_t headLength = 100; //checkDeviceMatrix<float>(dRandomFloats, headLength * sizeof(float), 1, headLength, "%f ", "Device randomFloats (0 - 100)"); //checkDeviceMatrix<float>(dRandomFloats, sizeInBytes, 1, length, "%f ", "Device randomFloats"); } void randomCleanup() { SAFE_DELETE_CUDA(dRandomFloats); SAFE_DELETE_CUDA(rs); } #pragma endregion constexpr size_t closest_power_of_2(const size_t x) { size_t v = x; v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } namespace Project { // dimension of problem (number of parameters) constexpr size_t D = 1000; // x // Population size constexpr size_t NP = 1000; // y // differential weight, <0,2> constexpr float F = 0.5f; // crossover probability, <0,1> constexpr float CR = 0.2f; constexpr size_t Iterations = 30; // Parallel reduce constexpr size_t NP2 = closest_power_of_2(NP); typedef float Type; template<typename T> __device__ __host__ T FitnessFunc(T *x, size_t size, size_t offset = 0); template<typename T> __global__ void KernelRandomPopulation(T *population, float *randoms, size_t offset) { int tx = threadIdx.x; // D int ty = blockIdx.x; // NP if (!(tx < D && ty < NP)) return; int index = ty * D + tx; //printf("tx=%d, ty=%d, index=%d\n", tx, ty, index); population[index] = randoms[offset + index] * 10 - 5; //population[index] = index; } template<typename T> __global__ void KernelNextGeneration(T* __restrict__ dInputPopulation, T* dOutputPopulation, float* dRandoms, size_t offset) { int tx = threadIdx.x; // D int ty = blockIdx.x; // NP if (!(tx < D && ty < NP)) return; int index = ty * D + tx; // TODO: tx=0 calculates these values and sets them in shared memory // 3 random indexes - unique for each ty int i[3] = { (int)(dRandoms[offset + ty * 4 + 0] * NP), (int)(dRandoms[offset + ty * 4 + 1] * NP), (int)(dRandoms[offset + ty * 4 + 2] * NP) }; // TODO: make sure that array of 'i' is unique // guaranted copy parameter int j = (int)(dRandoms[offset + ty * 4 + 3] * NP); // random for all float r = dRandoms[offset + NP * D * 4 + index]; //printf("index=%d, x=%d, y=%d, i=[%d, %d, %d], j=%d, r=%f \n", index, tx, ty, i[0], i[1], i[2], j, r); if (tx == j || r < CR) { dOutputPopulation[index] = dInputPopulation[index]; } else { T a = dInputPopulation[ty * D + i[0]]; T b = dInputPopulation[ty * D + i[1]]; T c = dInputPopulation[ty * D + i[2]]; dOutputPopulation[index] = c + F * (a - b); } __shared__ T sOldFitnesses[NP]; __shared__ T sNewFitnesses[NP]; __syncthreads(); if (tx == 0) { sOldFitnesses[ty] = FitnessFunc(dInputPopulation, D, ty * D); sNewFitnesses[ty] = FitnessFunc(dOutputPopulation, D, ty * D); //printf("ty=%d, oldFitness=%f, newFitness=%f\n", ty, sOldFitnesses[ty], sNewFitnesses[ty]); } __syncthreads(); // if fitness is not better then keep original values if (sNewFitnesses[ty] > sOldFitnesses[ty]) { //if (tx == 0) printf("no better value at ty=%d, oldFitness=%f, newFitness=%f\n", ty, sOldFitnesses[ty], sNewFitnesses[ty]); //printf("rollback value at %d, index=%d, from=%f, to=%f\n", ty, index, dOutputPopulation[index], dInputPopulation[index]); dOutputPopulation[index] = dInputPopulation[index]; } else { //if (tx == 0 && sNewFitnesses[ty] < sOldFitnesses[ty]) // printf("better fitness found at ty=%d, oldFitness=%f, newFitness=%f\n", ty, sOldFitnesses[ty], sNewFitnesses[ty]); } } template<typename T> __global__ void KernelPrintFitnesses(T* __restrict__ dPopulation) { int tx = blockIdx.x * blockDim.x + threadIdx.x; if (tx >= NP) return; T value = FitnessFunc(dPopulation, D, tx * D); printf("tx=%d, fitness=%f\n", tx, value); } constexpr int int_max = numeric_limits<int>().max(); constexpr Type type_max = numeric_limits<Type>().max(); template<typename T> __global__ void KernelParallelReduce(T* __restrict__ dPopulation, size_t* __restrict__ dIndexOfBest) { __shared__ T sFitnesses[NP2]; __shared__ int sIndexes[NP2]; int tid = threadIdx.x; if (tid >= NP2) return; // global memory -> shared memory if (tid >= NP) { sIndexes[tid] = int_max; sFitnesses[tid] = type_max; } else { sIndexes[tid] = tid; sFitnesses[tid] = FitnessFunc(dPopulation, D, tid * D); } __syncthreads(); //printf("tid=%d, index=%d, fitness=%f\n", tid, sIndexes[tid], sFitnesses[tid]); if (NP2 >= 2048) // compile time { if (tid >= 1024) return; if (sFitnesses[tid] > sFitnesses[tid + 1024]) { sFitnesses[tid] = sFitnesses[tid + 1024]; sIndexes[tid] = sIndexes[tid + 1024]; } __syncthreads(); } if (NP2 >= 1024) // compile time { if (tid >= 512) return; if (sFitnesses[tid] > sFitnesses[tid + 512]) { sFitnesses[tid] = sFitnesses[tid + 512]; sIndexes[tid] = sIndexes[tid + 512]; } __syncthreads(); } if (NP2 >= 512) // compile time { if (tid >= 256) return; if (sFitnesses[tid] > sFitnesses[tid + 256]) { sFitnesses[tid] = sFitnesses[tid + 256]; sIndexes[tid] = sIndexes[tid + 256]; } __syncthreads(); } if (NP2 >= 256) // compile time { if (tid >= 128) return; if (sFitnesses[tid] > sFitnesses[tid + 128]) { sFitnesses[tid] = sFitnesses[tid + 128]; sIndexes[tid] = sIndexes[tid + 128]; } __syncthreads(); } if (NP2 >= 128) // compile time { if (tid >= 64) return; if (sFitnesses[tid] > sFitnesses[tid + 64]) { sFitnesses[tid] = sFitnesses[tid + 64]; sIndexes[tid] = sIndexes[tid + 64]; } __syncthreads(); } // tid < 32 if (NP2 >= 64) // compile time { if (tid >= 32) return; if (sFitnesses[tid] > sFitnesses[tid + 32]) { sFitnesses[tid] = sFitnesses[tid + 32]; sIndexes[tid] = sIndexes[tid + 32]; } } if (NP2 >= 32) // compile time { if (tid >= 16) return; if (sFitnesses[tid] > sFitnesses[tid + 16]) { sFitnesses[tid] = sFitnesses[tid + 16]; sIndexes[tid] = sIndexes[tid + 16]; } } if (NP2 >= 16) // compile time { if (tid >= 8) return; if (sFitnesses[tid] > sFitnesses[tid + 8]) { sFitnesses[tid] = sFitnesses[tid + 8]; sIndexes[tid] = sIndexes[tid + 8]; } } if (NP2 >= 8) // compile time { if (tid >= 4) return; if (sFitnesses[tid] > sFitnesses[tid + 4]) { sFitnesses[tid] = sFitnesses[tid + 4]; sIndexes[tid] = sIndexes[tid + 4]; } } if (NP2 >= 4) // compile time { if (tid >= 2) return; if (sFitnesses[tid] > sFitnesses[tid + 2]) { sFitnesses[tid] = sFitnesses[tid + 2]; sIndexes[tid] = sIndexes[tid + 2]; } } if (NP2 >= 2) // compile time { if (tid >= 1) return; if (sFitnesses[tid] > sFitnesses[tid + 1]) { sFitnesses[tid] = sFitnesses[tid + 1]; sIndexes[tid] = sIndexes[tid + 1]; } } //if (tid == 0) *dIndexOfBest = sIndexes[0]; printf("Best fitness index=%d, fitness=%f\n", sIndexes[0], sFitnesses[0]); //for (int i = 0; i < NP; i++) //printf("i=%d, index=%d, fitness=%f\n", i, sIndexes[i], sFitnesses[i]); } template<typename T> void PrintPopulationWithFitnesses(T* dPopulation) { T *population = nullptr; checkCudaErrors(cudaHostAlloc((void**)&population, NP * D * sizeof(T), cudaHostAllocWriteCombined)); checkCudaErrors(cudaMemcpy(population, dPopulation, NP * D * sizeof(T), cudaMemcpyKind::cudaMemcpyDeviceToHost)); for (size_t i = 0; i < NP; i++) { printf("i=%u, params=[", i); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", population[i * D + d]); } T fitness = FitnessFunc(population, D, i * D); printf("] fitness=%f\n", fitness); } cudaFreeHost(population); } template<typename T> void PrintPopulationWithFitnesses(T* dNewPopulation, T* dOldPopulation) { T *newPopulation = nullptr; checkCudaErrors(cudaHostAlloc((void**)&newPopulation, NP * D * sizeof(T), cudaHostAllocWriteCombined)); checkCudaErrors(cudaMemcpy(newPopulation, dNewPopulation, NP * D * sizeof(T), cudaMemcpyKind::cudaMemcpyDeviceToHost)); T *oldPopulation = nullptr; checkCudaErrors(cudaHostAlloc((void**)&oldPopulation, NP * D * sizeof(T), cudaHostAllocWriteCombined)); checkCudaErrors(cudaMemcpy(oldPopulation, dOldPopulation, NP * D * sizeof(T), cudaMemcpyKind::cudaMemcpyDeviceToHost)); for (size_t i = 0; i < NP; i++) { printf("i=%u, params=[", i); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", newPopulation[i * D + d]); } T newFitness = FitnessFunc(newPopulation, D, i * D); T oldFitness = FitnessFunc(oldPopulation, D, i * D); printf("] newFitness=%f, oldFitness=%f\n", newFitness, oldFitness); } cudaFreeHost(newPopulation); cudaFreeHost(oldPopulation); } template<typename T> void PrintPopulationsWithFitnesses(T* dOldPopulation, T* dNewPopulation) { T *oldPopulation = nullptr; checkCudaErrors(cudaHostAlloc((void**)&oldPopulation, NP * D * sizeof(T), cudaHostAllocWriteCombined)); checkCudaErrors(cudaMemcpy(oldPopulation, dOldPopulation, NP * D * sizeof(T), cudaMemcpyKind::cudaMemcpyDeviceToHost)); T *newPopulation = nullptr; checkCudaErrors(cudaHostAlloc((void**)&newPopulation, NP * D * sizeof(T), cudaHostAllocWriteCombined)); checkCudaErrors(cudaMemcpy(newPopulation, dNewPopulation, NP * D * sizeof(T), cudaMemcpyKind::cudaMemcpyDeviceToHost)); for (size_t i = 0; i < NP; i++) { printf("i=%u\n", i); printf("old = ["); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", oldPopulation[i * D + d]); } T oldFitness = FitnessFunc(oldPopulation, D, i * D); printf("] oldFitness=%f\n", oldFitness); printf("new = ["); for (size_t d = 0; d < D; d++) { if (d != 0) printf(", "); printf("%f", newPopulation[i * D + d]); } T newFitness = FitnessFunc(newPopulation, D, i * D); printf("] newFitness=%f\n", newFitness); } cudaFreeHost(newPopulation); cudaFreeHost(oldPopulation); } template<typename T> T* DifferentialEvolutionCalculate() { KernelSetting ksDE; ksDE.dimBlock = dim3(D); ksDE.blockSize = D; ksDE.dimGrid = dim3(NP); ksDE.print(); KernelSetting ksPrintFitnesses; constexpr size_t printBlockSize = 256; ksPrintFitnesses.dimBlock = dim3(printBlockSize); ksPrintFitnesses.blockSize = printBlockSize; ksPrintFitnesses.dimGrid = dim3(getNumberOfParts(NP, printBlockSize)); ksPrintFitnesses.print(); KernelSetting ksParallelReduce; ksParallelReduce.dimBlock = dim3(NP2); ksParallelReduce.blockSize = NP2; ksParallelReduce.dimGrid = dim3(); ksParallelReduce.print(); size_t randomFloatsOffset = 0; // allocate population matrices T *dPopulation = nullptr; // input population T *dPopulation2 = nullptr; // output population checkCudaErrors(cudaMalloc((void**)&dPopulation, D * NP * sizeof(T))); checkCudaErrors(cudaMalloc((void**)&dPopulation2, D * NP * sizeof(T))); // generate initial population checkDeviceMatrix(dPopulation, D * sizeof(T), 2, D, "%f ", "dPopulation - initial"); KernelRandomPopulation << <ksDE.dimGrid, ksDE.dimBlock >> > (dPopulation, dRandomFloats, randomFloatsOffset); checkError(); //checkDeviceMatrix(dPopulation, D * sizeof(T), NP, D, "%f ", "dPopulation - initial"); checkDeviceMatrix(dPopulation, D * sizeof(T), 2, D, "%f ", "dPopulation - initial"); //randomFloatsOffset += NP * D; randomFloatsOffset++; cudaMemset(dPopulation2, 0, NP * D * sizeof(T)); //checkDeviceMatrix(dPopulation2, D * sizeof(T), NP, D, "%f ", "dPopulation2 - initial"); size_t *dIndexOfBest = nullptr; checkCudaErrors(cudaMalloc((void**)&dIndexOfBest, sizeof(size_t))); size_t *hIndexOfBest = new size_t; //printf("initial fitnesses\n"); //KernelPrintFitnesses << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation); KernelParallelReduce << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation, dIndexOfBest); checkError(); checkCudaErrors(cudaMemcpy(hIndexOfBest, dIndexOfBest, sizeof(size_t), cudaMemcpyKind::cudaMemcpyDeviceToHost)); //PrintPopulationWithFitnesses(dPopulation); for (size_t i = 0; i < Iterations; i++) { printf("ITERATION = %u\n", i); // Generate next generation KernelNextGeneration << <ksDE.dimGrid, ksDE.dimBlock >> > (dPopulation, dPopulation2, dRandomFloats, randomFloatsOffset); checkError(); KernelParallelReduce << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation2, dIndexOfBest); checkError(); checkCudaErrors(cudaMemcpy(hIndexOfBest, dIndexOfBest, sizeof(size_t), cudaMemcpyKind::cudaMemcpyDeviceToHost)); printf("current best fitness is at %u\n", *hIndexOfBest); //PrintPopulationsWithFitnesses(dPopulation, dPopulation2); //KernelPrintFitnesses << <ksPrintFitnesses.dimGrid, ksPrintFitnesses.dimBlock >> > (dPopulation2); //checkDeviceMatrix(dPopulation, D * sizeof(T), NP, D, "%f ", "dPopulation"); //checkDeviceMatrix(dPopulation2, D * sizeof(T), NP, D, "%f ", "dPopulation2"); //randomFloatsOffset += NP * 3; // Each candidate (NP) has 3 random indexes (indexes of candidates for mutation) //randomFloatsOffset += NP; // One guaranteed random index for each candidate (NP) //randomFloatsOffset += NP * D; // For all candidates and its params (CR) randomFloatsOffset++; auto tmp = dPopulation; dPopulation = dPopulation2; dPopulation2 = tmp; } printf("final fitnesses\n"); //KernelPrintFitnesses << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation); //checkDeviceMatrix(dPopulation, D * sizeof(T), NP, D, "%f ", "dPopulation - final"); KernelParallelReduce << <ksParallelReduce.dimGrid, ksParallelReduce.dimBlock >> > (dPopulation, dIndexOfBest); checkCudaErrors(cudaMemcpy(hIndexOfBest, dIndexOfBest, sizeof(size_t), cudaMemcpyKind::cudaMemcpyDeviceToHost)); T *hx = new T[D]; T *dPopulationPtr = dPopulation + (*hIndexOfBest * D); checkCudaErrors(cudaMemcpy(hx, dPopulationPtr, D * sizeof(T), cudaMemcpyKind::cudaMemcpyDeviceToHost)); SAFE_DELETE_CUDA(dPopulation); SAFE_DELETE_CUDA(dPopulation2); SAFE_DELETE_CUDA(dIndexOfBest); SAFE_DELETE(hIndexOfBest); return hx; } void run() { randomInit(); Type *result = DifferentialEvolutionCalculate<Type>(); printf("["); for (size_t i = 0; i < D; i++) { if (i != 0) printf(", "); printf("%f", result[i]); } printf("] = %f", FitnessFunc(result, D)); SAFE_DELETE_ARRAY(result); randomCleanup(); } template<typename T> inline __device__ __host__ T SphereFunction(T *x, size_t size, size_t offset) { T result = 0; T *ptr = x + offset; for (size_t i = 0; i < size; i++, ptr++) result += *ptr * *ptr; return result; } template<typename T> inline __device__ __host__ T RastriginFunction(T *x, size_t size, size_t offset) { constexpr T A = 10; constexpr double PI = 3.141592653589793238463; T result = A * size; T *ptr = x + offset; for (size_t i = 0; i < size; i++, ptr++) result += (*ptr * *ptr - A * cos(2 * PI * *ptr)); return result; } template<typename T> T FitnessFunc(T *x, size_t size, size_t offset) { //return SphereFunction(x, size, offset); return RastriginFunction(x, size, offset); } }
6d31e4d6efc5b1bf2f332c9b90c606c816ab959d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <hip/hip_runtime.h> typedef double FLOAT; __global__ void sum(FLOAT* x) { int tid = threadIdx.x; x[tid] += 1; } int main() { int N = 32; int nbytes = N * sizeof(FLOAT); FLOAT* dx = NULL, * hx = NULL; int i; /* allocate GPU mem */ hipMalloc((void**)&dx, nbytes); if (dx == NULL) { printf("couldn't allocate GPU memory\n"); return -1; } /* alllocate CPU host mem: memory copy is faster than malloc */ hx = (FLOAT*)malloc(nbytes); if (hx == NULL) { printf("couldn't allocate CPU memory\n"); return -2; } /* init */ printf("hx original: \n"); for (i = 0; i < N; i++) { hx[i] = i; printf("%g\n", hx[i]); } /* copy data to GPU */ hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice); /* call GPU */ hipLaunchKernelGGL(( sum) , dim3(1), dim3(N) , 0, 0, dx); /* let GPU finish */ hipDeviceSynchronize(); /* copy data from GPU */ hipMemcpy(hx, dx, nbytes, hipMemcpyDeviceToHost); printf("\nhx from GPU: \n"); for (i = 0; i < N; i++) { printf("%g\n", hx[i]); } hipFree(dx); free(hx); return 0; }
6d31e4d6efc5b1bf2f332c9b90c606c816ab959d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cuda.h> typedef double FLOAT; __global__ void sum(FLOAT* x) { int tid = threadIdx.x; x[tid] += 1; } int main() { int N = 32; int nbytes = N * sizeof(FLOAT); FLOAT* dx = NULL, * hx = NULL; int i; /* allocate GPU mem */ cudaMalloc((void**)&dx, nbytes); if (dx == NULL) { printf("couldn't allocate GPU memory\n"); return -1; } /* alllocate CPU host mem: memory copy is faster than malloc */ hx = (FLOAT*)malloc(nbytes); if (hx == NULL) { printf("couldn't allocate CPU memory\n"); return -2; } /* init */ printf("hx original: \n"); for (i = 0; i < N; i++) { hx[i] = i; printf("%g\n", hx[i]); } /* copy data to GPU */ cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice); /* call GPU */ sum <<<1, N >>> (dx); /* let GPU finish */ cudaDeviceSynchronize(); /* copy data from GPU */ cudaMemcpy(hx, dx, nbytes, cudaMemcpyDeviceToHost); printf("\nhx from GPU: \n"); for (i = 0; i < N; i++) { printf("%g\n", hx[i]); } cudaFree(dx); free(hx); return 0; }
eefffbfa1b27fdccfa586242c55cbb82e0e851bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Octree.hpp" #include <exception> #include "cuda_help.hpp" #include <iostream> #include <fstream> __global__ void insertOctreePointers(index_node_t ** octreeGPU, int * sizes, index_node_t * memoryGPU) { int offset = 0; for(int i=0;i<threadIdx.x; i++) offset+=sizes[i]; octreeGPU[threadIdx.x] = &memoryGPU[offset]; } /* Lee el Octree de un fichero */ OctreeContainer::OctreeContainer(const char * file_name, int p_maxLevel) { maxLevel = p_maxLevel; /* Read octree from file */ std::ifstream file; try { file.open(file_name, std::ifstream::binary); } catch(...) { std::cerr<<"Octree: error opening octree file"<<std::endl; throw; } int magicWord; file.read((char*)&magicWord, sizeof(magicWord)); if (magicWord != 919278872) { std::cerr<<"Octree: error invalid file format"<<std::endl; throw; } file.read((char*)&isosurface, sizeof(isosurface)); file.read((char*)&dimension, sizeof(dimension)); file.read((char*)&realDim.x, sizeof(realDim.x)); file.read((char*)&realDim.y, sizeof(realDim.y)); file.read((char*)&realDim.z, sizeof(realDim.z)); file.read((char*)&nLevels, sizeof(int)); std::cout<<"Octree de dimension "<<dimension<<"x"<<dimension<<"x"<<dimension<<" niveles "<<nLevels<<std::endl; index_node_t ** octreeCPU = new index_node_t*[nLevels+1]; int * sizesCPU = new int[nLevels+1]; for(int i=nLevels; i>=0; i--) { int numElem = 0; file.read((char*)&numElem, sizeof(numElem)); //std::cout<<"Dimension del node en el nivel "<<i<<" es de "<<powf(2.0,*nLevels-i)<<std::endl; //std::cout<<"Numero de elementos de nivel "<<i<<" "<<numElem<<std::endl; sizesCPU[i] = numElem; octreeCPU[i] = new index_node_t[numElem]; for(int j=0; j<numElem; j++) { index_node_t node = 0; file.read((char*) &node, sizeof(index_node_t)); octreeCPU[i][j]= node; } } file.close(); /* end reading octree from file */ std::cerr<<"Copying octree to GPU"<<std::endl; int total = 0; for(int i=0; i<=maxLevel; i++) total+=sizesCPU[i]; std::cerr<<"Allocating memory octree CUDA octree "<<(maxLevel+1)*sizeof(index_node_t*)/1024.0f/1024.0f<<" MB: "<<std::endl; if (hipSuccess != (hipMalloc(&octree, (maxLevel+1)*sizeof(index_node_t*)))) { std::cerr<<"Octree: error allocating octree in the gpu"<<std::endl; throw; } std::cerr<<"Allocating memory octree CUDA memory "<<total*sizeof(index_node_t)/1024.0f/1024.0f<<" MB: "<<std::endl; if (hipSuccess != (hipMalloc(&memoryGPU, total*sizeof(index_node_t)))) { std::cerr<<"Octree: error allocating octree in the gpu"<<std::endl; throw; } std::cerr<<"Allocating memory octree CUDA sizes "<<(maxLevel+1)*sizeof(int)/1024.0f/1024.0f<<" MB: "<<std::endl; if (hipSuccess != (hipMalloc(&sizes, (maxLevel+1)*sizeof(int)))) { std::cerr<<"Octree: error allocating octree in the gpu"<<std::endl; throw; } /* Compiando sizes */ std::cerr<<"Octree: coping to device the sizes "; if (hipSuccess != (hipMemcpy((void*)sizes, (void*)sizesCPU, (maxLevel+1)*sizeof(int), hipMemcpyHostToDevice))) { std::cerr<<"Fail"<<std::endl; throw; } else std::cerr<<"OK"<<std::endl; /* end sizes */ /* Copying octree */ int offset = 0; for(int i=0; i<=maxLevel; i++) { std::cerr<<"Coping to device level "<<i<<": "; if (hipSuccess != (hipMemcpy((void*)(memoryGPU+offset), (void*)octreeCPU[i], sizesCPU[i]*sizeof(index_node_t), hipMemcpyHostToDevice))) { std::cerr<<"Fail"<<std::endl; throw; } else std::cerr<<"OK"<<std::endl; offset+=sizesCPU[i]; } dim3 blocks(1); dim3 threads(maxLevel+1); hipLaunchKernelGGL(( insertOctreePointers), dim3(blocks),dim3(threads), 0, 0, octree, sizes, memoryGPU); // std::cerr<<"Launching kernek blocks ("<<blocks.x<<","<<blocks.y<<","<<blocks.z<<") threads ("<<threads.x<<","<<threads.y<<","<<threads.z<<") error: "<< hipGetErrorString(hipGetLastError())<<std::endl; std::cerr<<"Octree: sorting pointers "; if (hipSuccess != hipDeviceSynchronize()) { std::cerr<<"Fail"<<std::endl; throw; } else std::cerr<<"OK"<<std::endl; std::cerr<<"End copying octree to GPU"<<std::endl; delete[] sizesCPU; for(int i=0; i<=nLevels; i++) { delete[] octreeCPU[i]; } delete[] octreeCPU; } OctreeContainer::~OctreeContainer() { hipFree(octree); hipFree(memoryGPU); hipFree(sizes); } int OctreeContainer::getnLevels(){ return nLevels; } int OctreeContainer::getMaxLevel(){ return maxLevel; } float OctreeContainer::getIsosurface(){ return isosurface; } index_node_t ** OctreeContainer::getOctree(){ return octree; } int * OctreeContainer::getSizes(){ return sizes; }
eefffbfa1b27fdccfa586242c55cbb82e0e851bf.cu
#include "Octree.hpp" #include <exception> #include "cuda_help.hpp" #include <iostream> #include <fstream> __global__ void insertOctreePointers(index_node_t ** octreeGPU, int * sizes, index_node_t * memoryGPU) { int offset = 0; for(int i=0;i<threadIdx.x; i++) offset+=sizes[i]; octreeGPU[threadIdx.x] = &memoryGPU[offset]; } /* Lee el Octree de un fichero */ OctreeContainer::OctreeContainer(const char * file_name, int p_maxLevel) { maxLevel = p_maxLevel; /* Read octree from file */ std::ifstream file; try { file.open(file_name, std::ifstream::binary); } catch(...) { std::cerr<<"Octree: error opening octree file"<<std::endl; throw; } int magicWord; file.read((char*)&magicWord, sizeof(magicWord)); if (magicWord != 919278872) { std::cerr<<"Octree: error invalid file format"<<std::endl; throw; } file.read((char*)&isosurface, sizeof(isosurface)); file.read((char*)&dimension, sizeof(dimension)); file.read((char*)&realDim.x, sizeof(realDim.x)); file.read((char*)&realDim.y, sizeof(realDim.y)); file.read((char*)&realDim.z, sizeof(realDim.z)); file.read((char*)&nLevels, sizeof(int)); std::cout<<"Octree de dimension "<<dimension<<"x"<<dimension<<"x"<<dimension<<" niveles "<<nLevels<<std::endl; index_node_t ** octreeCPU = new index_node_t*[nLevels+1]; int * sizesCPU = new int[nLevels+1]; for(int i=nLevels; i>=0; i--) { int numElem = 0; file.read((char*)&numElem, sizeof(numElem)); //std::cout<<"Dimension del node en el nivel "<<i<<" es de "<<powf(2.0,*nLevels-i)<<std::endl; //std::cout<<"Numero de elementos de nivel "<<i<<" "<<numElem<<std::endl; sizesCPU[i] = numElem; octreeCPU[i] = new index_node_t[numElem]; for(int j=0; j<numElem; j++) { index_node_t node = 0; file.read((char*) &node, sizeof(index_node_t)); octreeCPU[i][j]= node; } } file.close(); /* end reading octree from file */ std::cerr<<"Copying octree to GPU"<<std::endl; int total = 0; for(int i=0; i<=maxLevel; i++) total+=sizesCPU[i]; std::cerr<<"Allocating memory octree CUDA octree "<<(maxLevel+1)*sizeof(index_node_t*)/1024.0f/1024.0f<<" MB: "<<std::endl; if (cudaSuccess != (cudaMalloc(&octree, (maxLevel+1)*sizeof(index_node_t*)))) { std::cerr<<"Octree: error allocating octree in the gpu"<<std::endl; throw; } std::cerr<<"Allocating memory octree CUDA memory "<<total*sizeof(index_node_t)/1024.0f/1024.0f<<" MB: "<<std::endl; if (cudaSuccess != (cudaMalloc(&memoryGPU, total*sizeof(index_node_t)))) { std::cerr<<"Octree: error allocating octree in the gpu"<<std::endl; throw; } std::cerr<<"Allocating memory octree CUDA sizes "<<(maxLevel+1)*sizeof(int)/1024.0f/1024.0f<<" MB: "<<std::endl; if (cudaSuccess != (cudaMalloc(&sizes, (maxLevel+1)*sizeof(int)))) { std::cerr<<"Octree: error allocating octree in the gpu"<<std::endl; throw; } /* Compiando sizes */ std::cerr<<"Octree: coping to device the sizes "; if (cudaSuccess != (cudaMemcpy((void*)sizes, (void*)sizesCPU, (maxLevel+1)*sizeof(int), cudaMemcpyHostToDevice))) { std::cerr<<"Fail"<<std::endl; throw; } else std::cerr<<"OK"<<std::endl; /* end sizes */ /* Copying octree */ int offset = 0; for(int i=0; i<=maxLevel; i++) { std::cerr<<"Coping to device level "<<i<<": "; if (cudaSuccess != (cudaMemcpy((void*)(memoryGPU+offset), (void*)octreeCPU[i], sizesCPU[i]*sizeof(index_node_t), cudaMemcpyHostToDevice))) { std::cerr<<"Fail"<<std::endl; throw; } else std::cerr<<"OK"<<std::endl; offset+=sizesCPU[i]; } dim3 blocks(1); dim3 threads(maxLevel+1); insertOctreePointers<<<blocks,threads>>>(octree, sizes, memoryGPU); // std::cerr<<"Launching kernek blocks ("<<blocks.x<<","<<blocks.y<<","<<blocks.z<<") threads ("<<threads.x<<","<<threads.y<<","<<threads.z<<") error: "<< cudaGetErrorString(cudaGetLastError())<<std::endl; std::cerr<<"Octree: sorting pointers "; if (cudaSuccess != cudaDeviceSynchronize()) { std::cerr<<"Fail"<<std::endl; throw; } else std::cerr<<"OK"<<std::endl; std::cerr<<"End copying octree to GPU"<<std::endl; delete[] sizesCPU; for(int i=0; i<=nLevels; i++) { delete[] octreeCPU[i]; } delete[] octreeCPU; } OctreeContainer::~OctreeContainer() { cudaFree(octree); cudaFree(memoryGPU); cudaFree(sizes); } int OctreeContainer::getnLevels(){ return nLevels; } int OctreeContainer::getMaxLevel(){ return maxLevel; } float OctreeContainer::getIsosurface(){ return isosurface; } index_node_t ** OctreeContainer::getOctree(){ return octree; } int * OctreeContainer::getSizes(){ return sizes; }
ffdbadb3becc9dd43e297e1b5c58bd4d137b7381.hip
// !!! This is a file automatically generated by hipify!!! #include "../constants_test_3d.h" #include <hip/hip_runtime.h> #include <iostream> #include "../../../utils.h" // Dataset constexpr auto N = mean_shift::cuda::test_3d::case_2000::N; constexpr auto D = mean_shift::cuda::test_3d::case_2000::D; constexpr auto M = mean_shift::cuda::test_3d::M; const auto PATH_TO_DATA = mean_shift::cuda::test_3d::case_2000::PATH_TO_DATA; const auto PATH_TO_CENTROIDS = mean_shift::cuda::test_3d::case_2000::PATH_TO_CENTROIDS; // Hyperparams constexpr auto RADIUS = mean_shift::cuda::test_3d::case_2000::RADIUS; constexpr auto MIN_DISTANCE = mean_shift::cuda::test_3d::case_2000::MIN_DISTANCE; constexpr auto NUM_ITER = mean_shift::cuda::test_3d::NUM_ITER; constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::test_3d::case_2000::DBL_SIGMA_SQ; constexpr auto DIST_TO_REAL = mean_shift::cuda::test_3d::DIST_TO_REAL; // Device constexpr auto THREADS = mean_shift::cuda::test_3d::THREADS; constexpr auto BLOCKS = mean_shift::cuda::test_3d::case_2000::BLOCKS; constexpr auto TILE_WIDTH = mean_shift::cuda::test_3d::case_2000::TILE_WIDTH; __global__ void mean_shift_tiling(const float* data, float* data_next) { // Shared memory allocation __shared__ float local_data[TILE_WIDTH * D]; __shared__ float valid_data[TILE_WIDTH]; // A few convenient variables int tid = (blockIdx.x * blockDim.x) + threadIdx.x; int row = tid * D; int local_row = threadIdx.x * D; float new_position[D] = {0.}; float tot_weight = 0.; // Load data in shared memory for (int t = 0; t < BLOCKS; ++t) { int tid_in_tile = t * TILE_WIDTH + threadIdx.x; if (tid_in_tile < N) { int row_in_tile = tid_in_tile * D; for (int j = 0; j < D; ++j) { local_data[local_row + j] = data[row_in_tile + j]; } valid_data[threadIdx.x] = 1; } else { for (int j = 0; j < D; ++j) { local_data[local_row + j] = 0; valid_data[threadIdx.x] = 0; } } __syncthreads(); for (int i = 0; i < TILE_WIDTH; ++i) { int local_row_tile = i * D; float valid_radius = RADIUS * valid_data[i]; float sq_dist = 0.; for (int j = 0; j < D; ++j) { sq_dist += (data[row + j] - local_data[local_row_tile + j]) * (data[row + j] - local_data[local_row_tile + j]); } if (sq_dist <= valid_radius) { float weight = expf(-sq_dist / DBL_SIGMA_SQ); for (int j = 0; j < D; ++j) { new_position[j] += (weight * local_data[local_row_tile + j]); } tot_weight += (weight * valid_data[i]); } } __syncthreads(); } if (tid < N) { for (int j = 0; j < D; ++j) { data_next[row + j] = new_position[j] / tot_weight; } } return; } int main() { mean_shift::cuda::utils::print_info(PATH_TO_DATA, N, D, BLOCKS, THREADS, TILE_WIDTH); // Load data std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ','); std::array<float, N * D> data_next {}; float *dev_data; float *dev_data_next; // Allocate GPU memory size_t data_bytes = N * D * sizeof(float); hipMalloc(&dev_data, data_bytes); hipMalloc(&dev_data_next, data_bytes); // Copy to GPU memory hipMemcpy(dev_data, data.data(), data_bytes, hipMemcpyHostToDevice); hipMemcpy(dev_data_next, data_next.data(), data_bytes, hipMemcpyHostToDevice); // Run mean shift clustering for (size_t i = 0; i < NUM_ITER; ++i) { hipLaunchKernelGGL(( mean_shift_tiling), dim3(BLOCKS), dim3(THREADS), 0, 0, dev_data, dev_data_next); hipDeviceSynchronize(); mean_shift::cuda::utils::swap(dev_data, dev_data_next); } hipMemcpy(data.data(), dev_data, data_bytes, hipMemcpyDeviceToHost); // Copy from GPU and de-allocate hipFree(dev_data); hipFree(dev_data_next); // Reduce data to cluster centers const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE); // Check if correct number assert(centroids.size() == M); // Check if these centroids are sufficiently close to real ones const std::array<float, M * D> real = mean_shift::cuda::utils::load_csv<M, D>(PATH_TO_CENTROIDS, ','); const bool are_close = mean_shift::cuda::utils::are_close_to_real<M, D>(centroids, real, DIST_TO_REAL); assert(are_close); std::cout << "\nSUCCESS!\n"; return 0; }
ffdbadb3becc9dd43e297e1b5c58bd4d137b7381.cu
#include "../constants_test_3d.h" #include <cuda.h> #include <iostream> #include "../../../utils.h" // Dataset constexpr auto N = mean_shift::cuda::test_3d::case_2000::N; constexpr auto D = mean_shift::cuda::test_3d::case_2000::D; constexpr auto M = mean_shift::cuda::test_3d::M; const auto PATH_TO_DATA = mean_shift::cuda::test_3d::case_2000::PATH_TO_DATA; const auto PATH_TO_CENTROIDS = mean_shift::cuda::test_3d::case_2000::PATH_TO_CENTROIDS; // Hyperparams constexpr auto RADIUS = mean_shift::cuda::test_3d::case_2000::RADIUS; constexpr auto MIN_DISTANCE = mean_shift::cuda::test_3d::case_2000::MIN_DISTANCE; constexpr auto NUM_ITER = mean_shift::cuda::test_3d::NUM_ITER; constexpr auto DBL_SIGMA_SQ = mean_shift::cuda::test_3d::case_2000::DBL_SIGMA_SQ; constexpr auto DIST_TO_REAL = mean_shift::cuda::test_3d::DIST_TO_REAL; // Device constexpr auto THREADS = mean_shift::cuda::test_3d::THREADS; constexpr auto BLOCKS = mean_shift::cuda::test_3d::case_2000::BLOCKS; constexpr auto TILE_WIDTH = mean_shift::cuda::test_3d::case_2000::TILE_WIDTH; __global__ void mean_shift_tiling(const float* data, float* data_next) { // Shared memory allocation __shared__ float local_data[TILE_WIDTH * D]; __shared__ float valid_data[TILE_WIDTH]; // A few convenient variables int tid = (blockIdx.x * blockDim.x) + threadIdx.x; int row = tid * D; int local_row = threadIdx.x * D; float new_position[D] = {0.}; float tot_weight = 0.; // Load data in shared memory for (int t = 0; t < BLOCKS; ++t) { int tid_in_tile = t * TILE_WIDTH + threadIdx.x; if (tid_in_tile < N) { int row_in_tile = tid_in_tile * D; for (int j = 0; j < D; ++j) { local_data[local_row + j] = data[row_in_tile + j]; } valid_data[threadIdx.x] = 1; } else { for (int j = 0; j < D; ++j) { local_data[local_row + j] = 0; valid_data[threadIdx.x] = 0; } } __syncthreads(); for (int i = 0; i < TILE_WIDTH; ++i) { int local_row_tile = i * D; float valid_radius = RADIUS * valid_data[i]; float sq_dist = 0.; for (int j = 0; j < D; ++j) { sq_dist += (data[row + j] - local_data[local_row_tile + j]) * (data[row + j] - local_data[local_row_tile + j]); } if (sq_dist <= valid_radius) { float weight = expf(-sq_dist / DBL_SIGMA_SQ); for (int j = 0; j < D; ++j) { new_position[j] += (weight * local_data[local_row_tile + j]); } tot_weight += (weight * valid_data[i]); } } __syncthreads(); } if (tid < N) { for (int j = 0; j < D; ++j) { data_next[row + j] = new_position[j] / tot_weight; } } return; } int main() { mean_shift::cuda::utils::print_info(PATH_TO_DATA, N, D, BLOCKS, THREADS, TILE_WIDTH); // Load data std::array<float, N * D> data = mean_shift::cuda::utils::load_csv<N, D>(PATH_TO_DATA, ','); std::array<float, N * D> data_next {}; float *dev_data; float *dev_data_next; // Allocate GPU memory size_t data_bytes = N * D * sizeof(float); cudaMalloc(&dev_data, data_bytes); cudaMalloc(&dev_data_next, data_bytes); // Copy to GPU memory cudaMemcpy(dev_data, data.data(), data_bytes, cudaMemcpyHostToDevice); cudaMemcpy(dev_data_next, data_next.data(), data_bytes, cudaMemcpyHostToDevice); // Run mean shift clustering for (size_t i = 0; i < NUM_ITER; ++i) { mean_shift_tiling<<<BLOCKS, THREADS>>>(dev_data, dev_data_next); cudaDeviceSynchronize(); mean_shift::cuda::utils::swap(dev_data, dev_data_next); } cudaMemcpy(data.data(), dev_data, data_bytes, cudaMemcpyDeviceToHost); // Copy from GPU and de-allocate cudaFree(dev_data); cudaFree(dev_data_next); // Reduce data to cluster centers const auto centroids = mean_shift::cuda::utils::reduce_to_centroids<N, D>(data, MIN_DISTANCE); // Check if correct number assert(centroids.size() == M); // Check if these centroids are sufficiently close to real ones const std::array<float, M * D> real = mean_shift::cuda::utils::load_csv<M, D>(PATH_TO_CENTROIDS, ','); const bool are_close = mean_shift::cuda::utils::are_close_to_real<M, D>(centroids, real, DIST_TO_REAL); assert(are_close); std::cout << "\nSUCCESS!\n"; return 0; }
58b277c6f4996a86a7a1c3c1b55517f30a8ef3ef.hip
// !!! This is a file automatically generated by hipify!!! #include<Camera.h> __device__ vec3 random_in_unit_disk(hiprandState_t *local_rand_state) { vec3 p; do { p = 2.0f*vec3(hiprand_uniform(local_rand_state),hiprand_uniform(local_rand_state),0) - vec3(1,1,0); } while (dot(p,p) >= 1.0f); return p; } __device__ camera::camera(vec3 lookfrom,vec3 lookat,vec3 vup,float vfov, float aspect,float aperture,float focus_dist) { //vfov in degress lens_radius = aperture / 2.0f; origin = lookfrom; float theta = vfov*float(M_PI)/180.0f; float half_height= tan(theta/2.0f); float half_width = aspect * half_height; w = unit_vector(lookfrom-lookat); u = unit_vector(cross(vup,w)); v = cross(w,u); lower_left_corner = origin - half_width*focus_dist*u - half_height*focus_dist*v -w*focus_dist ; horizontal = 2.0f*half_width*focus_dist*u; vertical = 2.0f*half_height*focus_dist*v; } __device__ ray camera::get_ray(float s, float t,hiprandState_t *local_rand_state) { vec3 rd = lens_radius*random_in_unit_disk(local_rand_state); vec3 offset = u * rd.x() + v * rd.y(); return ray(origin+offset, lower_left_corner + s*horizontal + t*vertical - origin- offset); //return ray(lower_left_corner + u*horizontal + v*vertical - vec3(0,0,-1),vec3(0.0f,0.0f,-1.0f)); }
58b277c6f4996a86a7a1c3c1b55517f30a8ef3ef.cu
#include<Camera.h> __device__ vec3 random_in_unit_disk(curandState *local_rand_state) { vec3 p; do { p = 2.0f*vec3(curand_uniform(local_rand_state),curand_uniform(local_rand_state),0) - vec3(1,1,0); } while (dot(p,p) >= 1.0f); return p; } __device__ camera::camera(vec3 lookfrom,vec3 lookat,vec3 vup,float vfov, float aspect,float aperture,float focus_dist) { //vfov in degress lens_radius = aperture / 2.0f; origin = lookfrom; float theta = vfov*float(M_PI)/180.0f; float half_height= tan(theta/2.0f); float half_width = aspect * half_height; w = unit_vector(lookfrom-lookat); u = unit_vector(cross(vup,w)); v = cross(w,u); lower_left_corner = origin - half_width*focus_dist*u - half_height*focus_dist*v -w*focus_dist ; horizontal = 2.0f*half_width*focus_dist*u; vertical = 2.0f*half_height*focus_dist*v; } __device__ ray camera::get_ray(float s, float t,curandState *local_rand_state) { vec3 rd = lens_radius*random_in_unit_disk(local_rand_state); vec3 offset = u * rd.x() + v * rd.y(); return ray(origin+offset, lower_left_corner + s*horizontal + t*vertical - origin- offset); //return ray(lower_left_corner + u*horizontal + v*vertical - vec3(0,0,-1),vec3(0.0f,0.0f,-1.0f)); }
3316f56d541ba72f0983556696e38131da24af20.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, long long n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; } int main( int argc, char* argv[] ) { FILE *fptr = fopen("parallel_vector_add.txt", "w"); long long minsize = pow(2,8); long long maxsize = pow(2,28); //int cnt = 0; //int n = 20; long long n; for(n = minsize; n<maxsize; n*=2) { // Size of vectors //int n = 1000000; // Host input vectors double *h_a; double *h_b; //Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); long long i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Copy host vectors to device hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); hipEventRecord(start); // Execute the kernel hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); // Copy array back to host hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error // double sum = 0; // for(i=0; i<n; i++) // sum += h_c[i]; // printf("final result: %f\n", sum/n); // Release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); fprintf(fptr, "%ld %lf\n", n, milliseconds); // printf("%ld %lf\n", n, milliseconds); // Release host memory free(h_a); free(h_b); free(h_c); } fclose(fptr); return 0; }
3316f56d541ba72f0983556696e38131da24af20.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, long long n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] + b[id]; } int main( int argc, char* argv[] ) { FILE *fptr = fopen("parallel_vector_add.txt", "w"); long long minsize = pow(2,8); long long maxsize = pow(2,28); //int cnt = 0; //int n = 20; long long n; for(n = minsize; n<maxsize; n*=2) { // Size of vectors //int n = 1000000; // Host input vectors double *h_a; double *h_b; //Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); long long i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Copy host vectors to device cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); cudaEventRecord(start); // Execute the kernel vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // Copy array back to host cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error // double sum = 0; // for(i=0; i<n; i++) // sum += h_c[i]; // printf("final result: %f\n", sum/n); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); fprintf(fptr, "%ld %lf\n", n, milliseconds); // printf("%ld %lf\n", n, milliseconds); // Release host memory free(h_a); free(h_b); free(h_c); } fclose(fptr); return 0; }
568852fafae7a5cc9e298c0d61b8f30f729e4fe9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "convolution_sep.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); const float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); const float *kernel = NULL; hipMalloc(&kernel, XSIZE*YSIZE); const int kernel_size = 1; const dim3 imsize = 1; int dir = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( convolution_sep), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,kernel,kernel_size,imsize,dir); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( convolution_sep), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,kernel,kernel_size,imsize,dir); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( convolution_sep), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,kernel,kernel_size,imsize,dir); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
568852fafae7a5cc9e298c0d61b8f30f729e4fe9.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "convolution_sep.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); const float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); const float *kernel = NULL; cudaMalloc(&kernel, XSIZE*YSIZE); const int kernel_size = 1; const dim3 imsize = 1; int dir = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); convolution_sep<<<gridBlock,threadBlock>>>(output,input,kernel,kernel_size,imsize,dir); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { convolution_sep<<<gridBlock,threadBlock>>>(output,input,kernel,kernel_size,imsize,dir); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { convolution_sep<<<gridBlock,threadBlock>>>(output,input,kernel,kernel_size,imsize,dir); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3750366a7d3d8af11419704175c869fa83d6d8c2.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2016 Andreas Schfer * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) */ #include <boost/detail/lightweight_test.hpp> #include <iostream> #include <stdexcept> #include <vector> #include <libflatarray/cuda_array.hpp> #include "test.hpp" using namespace LibFlatArray; ADD_TEST(basic) { std::vector<double> host_vec1(30, -1); std::vector<double> host_vec2(30, -2); std::vector<double> host_vec3(30, -3); std::vector<double> host_vec4(30, -4); for (int i = 0; i < 30; ++i) { host_vec1[i] = i + 0.5; BOOST_TEST(-2 == host_vec2[i]); BOOST_TEST(-3 == host_vec3[i]); BOOST_TEST(-4 == host_vec4[i]); } cuda_array<double> device_array1(&host_vec1[0], 30); cuda_array<double> device_array2(host_vec1); cuda_array<double> device_array3(device_array1); cuda_array<double> device_array4; device_array4 = cuda_array<double>(30); device_array4.load(&host_vec1[0]); device_array2.save(&host_vec2[0]); device_array3.save(&host_vec3[0]); device_array4.save(&host_vec4[0]); for (int i = 0; i < 30; ++i) { double expected = i + 0.5; BOOST_TEST(expected == host_vec2[i]); BOOST_TEST(expected == host_vec3[i]); BOOST_TEST(expected == host_vec4[i]); } BOOST_TEST(device_array1.data() != device_array2.data()); BOOST_TEST(device_array1.data() != device_array3.data()); BOOST_TEST(device_array1.data() != device_array3.data()); BOOST_TEST(device_array2.data() != device_array3.data()); BOOST_TEST(device_array2.data() != device_array4.data()); BOOST_TEST(device_array3.data() != device_array4.data()); hipError_t error = hipGetLastError(); if (error != hipSuccess) { std::cerr << "ERROR: " << hipGetErrorString(error) << "\n"; throw std::runtime_error("CUDA error"); } } ADD_TEST(initialization) { int value = 4711; cuda_array<int> device_array(3, value); std::vector<int> host_vec(3); device_array.save(&host_vec[0]); BOOST_TEST(host_vec[0] == 4711); BOOST_TEST(host_vec[1] == 4711); BOOST_TEST(host_vec[2] == 4711); } ADD_TEST(resize_after_assignment) { cuda_array<double> device_array1(20, 12.34); cuda_array<double> device_array2(30, 666); cuda_array<double> device_array3(25, 31); std::vector<double> host_vec(30); device_array2.save(host_vec.data()); for (int i = 0; i < 30; ++i) { BOOST_TEST(host_vec[i] == 666); } device_array1 = device_array2; device_array2 = device_array3; BOOST_TEST(device_array1.size() == 30); BOOST_TEST(device_array1.capacity() == 30); BOOST_TEST(device_array2.size() == 25); BOOST_TEST(device_array2.capacity() == 30); host_vec = std::vector<double>(30, -1); device_array1.save(host_vec.data()); for (int i = 0; i < 30; ++i) { BOOST_TEST(host_vec[i] == 666); } device_array2.save(host_vec.data()); for (int i = 0; i < 25; ++i) { BOOST_TEST(host_vec[i] == 31); } BOOST_TEST(device_array1.data() != device_array2.data()); BOOST_TEST(device_array1.data() != device_array3.data()); BOOST_TEST(device_array2.data() != device_array3.data()); } ADD_TEST(resize) { cuda_array<double> device_array(200, 1.3); BOOST_TEST(200 == device_array.size()); BOOST_TEST(200 == device_array.capacity()); device_array.resize(150); BOOST_TEST(150 == device_array.size()); BOOST_TEST(200 == device_array.capacity()); { std::vector<double> host_vec(250, 10); device_array.save(host_vec.data()); for (int i = 0; i < 150; ++i) { BOOST_TEST(host_vec[i] == 1.3); } for (int i = 150; i < 250; ++i) { BOOST_TEST(host_vec[i] == 10); } } device_array.resize(250, 27); BOOST_TEST(250 == device_array.size()); BOOST_TEST(250 == device_array.capacity()); { std::vector<double> host_vec(250, -1); device_array.save(host_vec.data()); for (int i = 0; i < 150; ++i) { BOOST_TEST(host_vec[i] == 1.3); } for (int i = 150; i < 250; ++i) { BOOST_TEST(host_vec[i] == 27); } } // ensure content is kept intact if shrunk and enlarged // afterwards sans default initialization: device_array.resize(10); device_array.resize(210); BOOST_TEST(210 == device_array.size()); BOOST_TEST(250 == device_array.capacity()); { std::vector<double> host_vec(250, -1); device_array.save(host_vec.data()); for (int i = 0; i < 150; ++i) { BOOST_TEST(host_vec[i] == 1.3); } for (int i = 150; i < 210; ++i) { BOOST_TEST(host_vec[i] == 27); } } } ADD_TEST(resize2) { cuda_array<double> array(10, 5); array.resize(20, 6); array.resize(15, 7); double v = 8.0; array.resize(20, v); std::vector<double> vec(20); array.save(vec.data()); for (int i = 0; i < 10; ++i) { BOOST_TEST(vec[i] == 5); } for (int i = 10; i < 15; ++i) { BOOST_TEST(vec[i] == 6); } for (int i = 15; i < 20; ++i) { BOOST_TEST(vec[i] == 8); } } ADD_TEST(reserve) { cuda_array<double> device_array(31, 1.3); BOOST_TEST(31 == device_array.size()); BOOST_TEST(31 == device_array.capacity()); device_array.reserve(55); BOOST_TEST(31 == device_array.size()); BOOST_TEST(55 == device_array.capacity()); std::vector<double> host_vec(31, -1); device_array.save(host_vec.data()); for (int i = 0; i < 31; ++i) { BOOST_TEST(host_vec[i] == 1.3); } } int main(int argc, char **argv) { return 0; }
3750366a7d3d8af11419704175c869fa83d6d8c2.cu
/** * Copyright 2016 Andreas Schäfer * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) */ #include <boost/detail/lightweight_test.hpp> #include <iostream> #include <stdexcept> #include <vector> #include <libflatarray/cuda_array.hpp> #include "test.hpp" using namespace LibFlatArray; ADD_TEST(basic) { std::vector<double> host_vec1(30, -1); std::vector<double> host_vec2(30, -2); std::vector<double> host_vec3(30, -3); std::vector<double> host_vec4(30, -4); for (int i = 0; i < 30; ++i) { host_vec1[i] = i + 0.5; BOOST_TEST(-2 == host_vec2[i]); BOOST_TEST(-3 == host_vec3[i]); BOOST_TEST(-4 == host_vec4[i]); } cuda_array<double> device_array1(&host_vec1[0], 30); cuda_array<double> device_array2(host_vec1); cuda_array<double> device_array3(device_array1); cuda_array<double> device_array4; device_array4 = cuda_array<double>(30); device_array4.load(&host_vec1[0]); device_array2.save(&host_vec2[0]); device_array3.save(&host_vec3[0]); device_array4.save(&host_vec4[0]); for (int i = 0; i < 30; ++i) { double expected = i + 0.5; BOOST_TEST(expected == host_vec2[i]); BOOST_TEST(expected == host_vec3[i]); BOOST_TEST(expected == host_vec4[i]); } BOOST_TEST(device_array1.data() != device_array2.data()); BOOST_TEST(device_array1.data() != device_array3.data()); BOOST_TEST(device_array1.data() != device_array3.data()); BOOST_TEST(device_array2.data() != device_array3.data()); BOOST_TEST(device_array2.data() != device_array4.data()); BOOST_TEST(device_array3.data() != device_array4.data()); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { std::cerr << "ERROR: " << cudaGetErrorString(error) << "\n"; throw std::runtime_error("CUDA error"); } } ADD_TEST(initialization) { int value = 4711; cuda_array<int> device_array(3, value); std::vector<int> host_vec(3); device_array.save(&host_vec[0]); BOOST_TEST(host_vec[0] == 4711); BOOST_TEST(host_vec[1] == 4711); BOOST_TEST(host_vec[2] == 4711); } ADD_TEST(resize_after_assignment) { cuda_array<double> device_array1(20, 12.34); cuda_array<double> device_array2(30, 666); cuda_array<double> device_array3(25, 31); std::vector<double> host_vec(30); device_array2.save(host_vec.data()); for (int i = 0; i < 30; ++i) { BOOST_TEST(host_vec[i] == 666); } device_array1 = device_array2; device_array2 = device_array3; BOOST_TEST(device_array1.size() == 30); BOOST_TEST(device_array1.capacity() == 30); BOOST_TEST(device_array2.size() == 25); BOOST_TEST(device_array2.capacity() == 30); host_vec = std::vector<double>(30, -1); device_array1.save(host_vec.data()); for (int i = 0; i < 30; ++i) { BOOST_TEST(host_vec[i] == 666); } device_array2.save(host_vec.data()); for (int i = 0; i < 25; ++i) { BOOST_TEST(host_vec[i] == 31); } BOOST_TEST(device_array1.data() != device_array2.data()); BOOST_TEST(device_array1.data() != device_array3.data()); BOOST_TEST(device_array2.data() != device_array3.data()); } ADD_TEST(resize) { cuda_array<double> device_array(200, 1.3); BOOST_TEST(200 == device_array.size()); BOOST_TEST(200 == device_array.capacity()); device_array.resize(150); BOOST_TEST(150 == device_array.size()); BOOST_TEST(200 == device_array.capacity()); { std::vector<double> host_vec(250, 10); device_array.save(host_vec.data()); for (int i = 0; i < 150; ++i) { BOOST_TEST(host_vec[i] == 1.3); } for (int i = 150; i < 250; ++i) { BOOST_TEST(host_vec[i] == 10); } } device_array.resize(250, 27); BOOST_TEST(250 == device_array.size()); BOOST_TEST(250 == device_array.capacity()); { std::vector<double> host_vec(250, -1); device_array.save(host_vec.data()); for (int i = 0; i < 150; ++i) { BOOST_TEST(host_vec[i] == 1.3); } for (int i = 150; i < 250; ++i) { BOOST_TEST(host_vec[i] == 27); } } // ensure content is kept intact if shrunk and enlarged // afterwards sans default initialization: device_array.resize(10); device_array.resize(210); BOOST_TEST(210 == device_array.size()); BOOST_TEST(250 == device_array.capacity()); { std::vector<double> host_vec(250, -1); device_array.save(host_vec.data()); for (int i = 0; i < 150; ++i) { BOOST_TEST(host_vec[i] == 1.3); } for (int i = 150; i < 210; ++i) { BOOST_TEST(host_vec[i] == 27); } } } ADD_TEST(resize2) { cuda_array<double> array(10, 5); array.resize(20, 6); array.resize(15, 7); double v = 8.0; array.resize(20, v); std::vector<double> vec(20); array.save(vec.data()); for (int i = 0; i < 10; ++i) { BOOST_TEST(vec[i] == 5); } for (int i = 10; i < 15; ++i) { BOOST_TEST(vec[i] == 6); } for (int i = 15; i < 20; ++i) { BOOST_TEST(vec[i] == 8); } } ADD_TEST(reserve) { cuda_array<double> device_array(31, 1.3); BOOST_TEST(31 == device_array.size()); BOOST_TEST(31 == device_array.capacity()); device_array.reserve(55); BOOST_TEST(31 == device_array.size()); BOOST_TEST(55 == device_array.capacity()); std::vector<double> host_vec(31, -1); device_array.save(host_vec.data()); for (int i = 0; i < 31; ++i) { BOOST_TEST(host_vec[i] == 1.3); } } int main(int argc, char **argv) { return 0; }
17cf498bff9a400ea25e57cba1611a7ff6fef649.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/LinearAlgebra.h> #include <ATen/native/BatchLinearAlgebra.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <c10/core/Scalar.h> namespace at { namespace native { namespace { void addr_kernel_cuda(TensorIterator &iter, const Scalar& beta, const Scalar& alpha) { if (iter.dtype() == ScalarType::Bool) { using scalar_t = bool; auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); // when beta is false, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == false) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val && vec1_val && vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); } ); } return; } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, iter.dtype(), "addr_cuda", [&] { auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); scalar_t zero_val(0); // when beta==0, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == zero_val) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val * vec1_val * vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return beta_val * self_val + alpha_val * vec1_val * vec2_val; } ); } }); } template <int n_threads, int n_elems_per_thread, typename func_t> C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread) __global__ void _elementwise_kernel(int total_n_elems, func_t f) { constexpr int total_work_block = n_threads * n_elems_per_thread; int idx = total_work_block * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < n_elems_per_thread; ++i) { if (idx < total_n_elems) { f(idx); idx += n_threads; } } } template <int n_threads, int n_elems_per_thread, typename func_t> static void _launch_kernel(int total_n_elems, func_t f) { TORCH_INTERNAL_ASSERT( total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max() ); dim3 block(n_threads); constexpr int total_work_block = n_threads * n_elems_per_thread; dim3 grid((total_n_elems + total_work_block - 1) / total_work_block); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( _elementwise_kernel<n_threads, n_elems_per_thread, func_t>) , dim3(grid), dim3(block), 0, stream, total_n_elems, f); C10_HIP_KERNEL_LAUNCH_CHECK(); } void unpack_pivots_cuda_kernel(TensorIterator& iter, const int64_t dim_size) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { unpack_pivots_cuda_kernel(sub_iter, dim_size); } return; } const auto offset_calculator = make_offset_calculator<2>(iter); const auto perm_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const auto pivots_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); auto loop = [=]C10_DEVICE(const int idx) { const auto offsets = offset_calculator.get(idx); int64_t* const __restrict__ perm_data = reinterpret_cast<int64_t*>(perm_ptr + offsets[0]); const int32_t* const __restrict__ pivots_data = reinterpret_cast<const int32_t*>(pivots_ptr + offsets[1]); // QUESTION: can we mix 64bit offsets with 32bit Iterator indexing? for (int64_t i = 0; i < dim_size; ++i) { thrust::swap( perm_data[i], perm_data[pivots_data[i] - 1] ); } }; _launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } } // anonymous namespace REGISTER_DISPATCH(unpack_pivots_stub, &unpack_pivots_cuda_kernel); REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda); }}
17cf498bff9a400ea25e57cba1611a7ff6fef649.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/LinearAlgebra.h> #include <ATen/native/BatchLinearAlgebra.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <c10/core/Scalar.h> namespace at { namespace native { namespace { void addr_kernel_cuda(TensorIterator &iter, const Scalar& beta, const Scalar& alpha) { if (iter.dtype() == ScalarType::Bool) { using scalar_t = bool; auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); // when beta is false, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == false) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val && vec1_val && vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); } ); } return; } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, iter.dtype(), "addr_cuda", [&] { auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); scalar_t zero_val(0); // when beta==0, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == zero_val) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val * vec1_val * vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return beta_val * self_val + alpha_val * vec1_val * vec2_val; } ); } }); } template <int n_threads, int n_elems_per_thread, typename func_t> C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread) __global__ void _elementwise_kernel(int total_n_elems, func_t f) { constexpr int total_work_block = n_threads * n_elems_per_thread; int idx = total_work_block * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < n_elems_per_thread; ++i) { if (idx < total_n_elems) { f(idx); idx += n_threads; } } } template <int n_threads, int n_elems_per_thread, typename func_t> static void _launch_kernel(int total_n_elems, func_t f) { TORCH_INTERNAL_ASSERT( total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max() ); dim3 block(n_threads); constexpr int total_work_block = n_threads * n_elems_per_thread; dim3 grid((total_n_elems + total_work_block - 1) / total_work_block); auto stream = at::cuda::getCurrentCUDAStream(); _elementwise_kernel<n_threads, n_elems_per_thread, func_t> <<<grid, block, 0, stream>>>(total_n_elems, f); C10_CUDA_KERNEL_LAUNCH_CHECK(); } void unpack_pivots_cuda_kernel(TensorIterator& iter, const int64_t dim_size) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { unpack_pivots_cuda_kernel(sub_iter, dim_size); } return; } const auto offset_calculator = make_offset_calculator<2>(iter); const auto perm_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const auto pivots_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); auto loop = [=]C10_DEVICE(const int idx) { const auto offsets = offset_calculator.get(idx); int64_t* const __restrict__ perm_data = reinterpret_cast<int64_t*>(perm_ptr + offsets[0]); const int32_t* const __restrict__ pivots_data = reinterpret_cast<const int32_t*>(pivots_ptr + offsets[1]); // QUESTION: can we mix 64bit offsets with 32bit Iterator indexing? for (int64_t i = 0; i < dim_size; ++i) { thrust::swap( perm_data[i], perm_data[pivots_data[i] - 1] ); } }; _launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } } // anonymous namespace REGISTER_DISPATCH(unpack_pivots_stub, &unpack_pivots_cuda_kernel); REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda); }}
7250ab9d1a8bddd559e031b5eafdb7eb8b07b4a6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <fstream> #include <algorithm> using namespace std; //const int Lib_N = 100000; //const int SampleParallel_N = 1000; const int Atom_type_N = 2; const int Pair_type_N = 3; const int Pt_I = 6, Pt_J = 6, Pt_K = 3; const int Pt_N = 4 * Pt_I * Pt_J * Pt_K; const int Ar_N = 1; class Parameters { public: int Rt, Tt, dumpstep; double Mass[Atom_type_N], T[Atom_type_N], mp_V[Atom_type_N], LJ_E[Pair_type_N], LJ_S[Pair_type_N], Box_x[2], Box_y[2], Box_z[2], Pt_ePos_x[Pt_N], Pt_ePos_y[Pt_N], Pt_ePos_z[Pt_N], Pt_argVel[3]; double PI, kB, fcc_lattice, nd_Mass, nd_Energy, nd_Length, nd_Velocity, nd_Time, nd_Acceleration, cutoff, d, spr_k, dt, Pt_T; bool state; void Init(); void Initialization(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]); void Initialization_Kernel(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]); void rescale_T1(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]); void rescale_T3(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]); void Dump(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep, int ds = 1); void Exit(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep); double random(); }; void Parameters::Init() { // PI = 3.14159265; kB = 1.38E-23; Mass[0] = 39.95 / 6.02 * 1E-26;//kg Mass[1] = 195.08 / 6.02 * 1E-26; LJ_E[0] = 1.654E-21;//J LJ_E[1] = 5.207E-20; LJ_E[2] = 1.093E-21; LJ_S[0] = 3.40 * 1E-10;//m LJ_S[1] = 2.47 * 1E-10; LJ_S[2] = 2.94 * 1E-10; cutoff = 10 * 1E-10; fcc_lattice = 3.93E-10; T[0] = 300.; T[1] = 300.; mp_V[0] = sqrt(2 * kB*T[0] / Mass[0]);// mp_V[1] = sqrt(3 * kB*T[1] / Mass[1]);// // nd_Mass = Mass[1]; nd_Energy = LJ_E[1]; nd_Length = LJ_S[1]; nd_Velocity = sqrt(nd_Energy / nd_Mass); nd_Time = nd_Length / nd_Velocity; nd_Acceleration = nd_Energy / (nd_Mass * nd_Length); // Mass[0] /= nd_Mass; Mass[1] /= nd_Mass; LJ_E[0] /= nd_Energy; LJ_E[1] /= nd_Energy; LJ_E[2] /= nd_Energy; LJ_S[0] /= nd_Length; LJ_S[1] /= nd_Length; LJ_S[2] /= nd_Length; cutoff /= nd_Length; fcc_lattice /= nd_Length; mp_V[0] /= nd_Velocity; mp_V[1] /= nd_Velocity; d = 5.0; spr_k = 5000.; dt = 0.001; Rt = 100; Tt = 35; dumpstep = 1; // Box_x[0] = 0; Box_x[1] = Pt_I * fcc_lattice; Box_y[0] = 0; Box_y[1] = Pt_J * fcc_lattice; Box_z[0] = -(Pt_K - 0.5)*fcc_lattice; Box_z[1] = d; // state = true; cout << "*******Parameters Initialized!*******\n"; } /******************************************************************************/ void Parameters::Initialization(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]) { int i; double *d_Pt_argVel, *d_Pt_T, *d_Mass, *d_T, *d_LJ_E, *d_LJ_S, *d_cutoff, *d_spr_k; double *d_All_Pos_x, *d_All_Pos_y, *d_All_Pos_z, *d_All_Vel_x, *d_All_Vel_y, *d_All_Vel_z, *d_All_Acc_x, *d_All_Acc_y, *d_All_Acc_z, *d_Box_x, *d_Box_y, *d_Box_z, *d_Pt_ePos_x, *d_Pt_ePos_y, *d_Pt_ePos_z; __global__ void Pos_period(double *All_Pos_x, double *All_Pos_y, double *Box_x, double *Box_y, double *Pt_ePos_x, double *Pt_ePos_y); __global__ void rescale_T2(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *Pt_argVel); __global__ void rescale_T4(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *T, double *Pt_T); __global__ void Acceleration_period(double *All_Pos_x, double *All_Pos_y, double *All_Pos_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *LJ_E, double *LJ_S, double *Box_x, double *Box_y, double *Box_z, double *cutoff, double *Pt_ePos_x, double *Pt_ePos_y, double *Pt_ePos_z, double *spr_k, double *Mass); cout << "Box_ZoneX: " << Box_x[0] << ", " << Box_x[1] << "\n"; cout << "Box_ZoneY: " << Box_y[0] << ", " << Box_y[1] << "\n"; cout << "Box_ZoneZ: " << Box_z[0] << ", " << Box_z[1] << "\n"; // Initialization_Kernel(All_type, All_Pos_x, All_Pos_y, All_Pos_z, All_Vel_x, All_Vel_y, All_Vel_z, All_Acc_x, All_Acc_y, All_Acc_z); // hipMalloc((void**)&d_All_Pos_x, sizeof(All_Pos_x)); hipMalloc((void**)&d_All_Pos_y, sizeof(All_Pos_y)); hipMalloc((void**)&d_All_Pos_z, sizeof(All_Pos_z)); hipMalloc((void**)&d_All_Vel_x, sizeof(All_Vel_x)); hipMalloc((void**)&d_All_Vel_y, sizeof(All_Vel_y)); hipMalloc((void**)&d_All_Vel_z, sizeof(All_Vel_z)); hipMalloc((void**)&d_All_Acc_x, sizeof(All_Acc_x)); hipMalloc((void**)&d_All_Acc_y, sizeof(All_Acc_y)); hipMalloc((void**)&d_All_Acc_z, sizeof(All_Acc_z)); hipMalloc((void**)&d_Box_x, sizeof(Box_x)); hipMalloc((void**)&d_Box_y, sizeof(Box_y)); hipMalloc((void**)&d_Box_z, sizeof(Box_z)); hipMalloc((void**)&d_Pt_ePos_x, sizeof(Pt_ePos_x)); hipMalloc((void**)&d_Pt_ePos_y, sizeof(Pt_ePos_y)); hipMalloc((void**)&d_Pt_ePos_z, sizeof(Pt_ePos_z)); hipMalloc((void**)&d_Mass, sizeof(Mass)); hipMalloc((void**)&d_T, sizeof(T)); hipMalloc((void**)&d_LJ_E, sizeof(LJ_E)); hipMalloc((void**)&d_LJ_S, sizeof(LJ_S)); hipMalloc((void**)&d_cutoff, sizeof(double)); hipMalloc((void**)&d_spr_k, sizeof(double)); hipMalloc((void**)&d_Pt_argVel, sizeof(Pt_argVel)); hipMalloc((void**)&d_Pt_T, sizeof(double)); // hipMemcpy(d_All_Pos_x, All_Pos_x, sizeof(All_Pos_x), hipMemcpyHostToDevice); hipMemcpy(d_All_Pos_y, All_Pos_y, sizeof(All_Pos_y), hipMemcpyHostToDevice); hipMemcpy(d_All_Pos_z, All_Pos_z, sizeof(All_Pos_z), hipMemcpyHostToDevice); hipMemcpy(d_All_Vel_x, All_Vel_x, sizeof(All_Vel_x), hipMemcpyHostToDevice); hipMemcpy(d_All_Vel_y, All_Vel_y, sizeof(All_Vel_y), hipMemcpyHostToDevice); hipMemcpy(d_All_Vel_z, All_Vel_z, sizeof(All_Vel_z), hipMemcpyHostToDevice); hipMemcpy(d_All_Acc_x, All_Acc_x, sizeof(All_Acc_x), hipMemcpyHostToDevice); hipMemcpy(d_All_Acc_y, All_Acc_y, sizeof(All_Acc_y), hipMemcpyHostToDevice); hipMemcpy(d_All_Acc_z, All_Acc_z, sizeof(All_Acc_z), hipMemcpyHostToDevice); hipMemcpy(d_Box_x, Box_x, sizeof(Box_x), hipMemcpyHostToDevice); hipMemcpy(d_Box_y, Box_y, sizeof(Box_y), hipMemcpyHostToDevice); hipMemcpy(d_Box_z, Box_z, sizeof(Box_z), hipMemcpyHostToDevice); hipMemcpy(d_Pt_ePos_x, Pt_ePos_x, sizeof(Pt_ePos_x), hipMemcpyHostToDevice); hipMemcpy(d_Pt_ePos_y, Pt_ePos_y, sizeof(Pt_ePos_y), hipMemcpyHostToDevice); hipMemcpy(d_Pt_ePos_z, Pt_ePos_z, sizeof(Pt_ePos_z), hipMemcpyHostToDevice); hipMemcpy(d_Mass, Mass, sizeof(Mass), hipMemcpyHostToDevice); hipMemcpy(d_T, T, sizeof(T), hipMemcpyHostToDevice); hipMemcpy(d_LJ_E, LJ_E, sizeof(LJ_E), hipMemcpyHostToDevice); hipMemcpy(d_LJ_S, LJ_S, sizeof(LJ_S), hipMemcpyHostToDevice); hipMemcpy(d_cutoff, &cutoff, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_spr_k, &spr_k, sizeof(double), hipMemcpyHostToDevice); // Pos_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_Box_x, d_Box_y, d_Pt_ePos_x, d_Pt_ePos_y); hipDeviceSynchronize(); // Box_z[0]=All_Pos_z[0]; for(i=0;i<Pt_N;i++){ if(All_Pos_z[i]<Box_z[0]){ Box_z[0]=All_Pos_z[i]; } } cout<<Box_z[0]<<"\n"; hipMemcpy(d_Box_z, Box_z, sizeof(Box_z), hipMemcpyHostToDevice); // rescale_T1(All_Vel_x, All_Vel_y, All_Vel_z); hipMemcpy(d_Pt_argVel, Pt_argVel, sizeof(Pt_argVel), hipMemcpyHostToDevice); rescale_T2 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_Pt_argVel); hipDeviceSynchronize(); hipMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), hipMemcpyDeviceToHost); rescale_T3(All_Vel_x, All_Vel_y, All_Vel_z); hipMemcpy(d_Pt_T, &Pt_T, sizeof(double), hipMemcpyHostToDevice); rescale_T4 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_T, d_Pt_T); hipDeviceSynchronize(); // Acceleration_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_All_Pos_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_LJ_E, d_LJ_S, d_Box_x, d_Box_y, d_Box_z, d_cutoff, d_Pt_ePos_x, d_Pt_ePos_y, d_Pt_ePos_z, d_spr_k, d_Mass); hipDeviceSynchronize(); hipMemcpy(All_Pos_x, d_All_Pos_x, sizeof(All_Pos_x), hipMemcpyDeviceToHost); hipMemcpy(All_Pos_y, d_All_Pos_y, sizeof(All_Pos_y), hipMemcpyDeviceToHost); hipMemcpy(All_Pos_z, d_All_Pos_z, sizeof(All_Pos_z), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), hipMemcpyDeviceToHost); hipMemcpy(All_Acc_x, d_All_Acc_x, sizeof(All_Acc_x), hipMemcpyDeviceToHost); hipMemcpy(All_Acc_y, d_All_Acc_y, sizeof(All_Acc_y), hipMemcpyDeviceToHost); hipMemcpy(All_Acc_z, d_All_Acc_z, sizeof(All_Acc_z), hipMemcpyDeviceToHost); hipMemcpy(Pt_ePos_x, d_Pt_ePos_x, sizeof(Pt_ePos_x), hipMemcpyDeviceToHost); hipMemcpy(Pt_ePos_y, d_Pt_ePos_y, sizeof(Pt_ePos_y), hipMemcpyDeviceToHost); hipMemcpy(Pt_ePos_z, d_Pt_ePos_z, sizeof(Pt_ePos_z), hipMemcpyDeviceToHost); // cout << "Created " << Pt_N << " Pt\n"; cout << "Created " << Ar_N << " Ar\n"; cout << "Pt Average Speed in X: " << Pt_argVelx << "\n"; cout << "Pt Average Speed in Y: " << Pt_argVely << "\n"; cout << "Pt Average Speed in Z: " << Pt_argVelz << "\n"; cout << "Pt Temperature: " << Pt_T << "\n"; cout << "Ar Incidence Speed: " << All_Vel_x[Pt_N] << "," << All_Vel_y[Pt_N] << "," << All_Vel_z[Pt_N] << "\n"; cout << "*******Model Initialization Done!*******\n"; hipFree(d_All_Pos_x); hipFree(d_All_Pos_y); hipFree(d_All_Pos_z); hipFree(d_All_Vel_x); hipFree(d_All_Vel_y); hipFree(d_All_Vel_z); hipFree(d_All_Acc_x); hipFree(d_All_Acc_y); hipFree(d_All_Acc_z); hipFree(d_Box_x); hipFree(d_Box_y); hipFree(d_Box_z); hipFree(d_Pt_ePos_x); hipFree(d_Pt_ePos_y); hipFree(d_Pt_ePos_z); hipFree(d_nd_Velocity); hipFree(d_Mass); hipFree(d_nd_Mass); hipFree(d_kB); hipFree(d_T); hipFree(d_LJ_E); hipFree(d_LJ_S); hipFree(d_cutoff); hipFree(d_spr_k); hipFree(d_Pt_argVel); hipFree(d_Pt_T); } /******************************************************************************/ void Parameters::Initialization_Kernel(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]) { int i, j, k, axis, count; double R1, R2, Rx, Ry; count = 0; srand((unsigned)time(NULL)); for (i = 0; i < 2 * Pt_I; i++) { for (j = 0; j < 2 * Pt_J; j++) { for (k = 0; k < 2 * Pt_K; k++) { if (i / 2. + j / 2. + k / 2. == int(i / 2. + j / 2. + k / 2.)) { All_type[count] = 1; All_Pos_x[count] = i / 2.*fcc_lattice; Pt_ePos_x[count] = All_Pos_x[count]; All_Pos_y[count] = j / 2.*fcc_lattice; Pt_ePos_y[count] = All_Pos_y[count]; All_Pos_z[count] = (k / 2. - 2.5)*fcc_lattice; Pt_ePos_z[count] = All_Pos_z[count]; R1 = random(); R2 = random(); All_Vel_x[count] = mp_V[1] / sqrt(3)*sqrt(-2 * log(R1))*cos(2 * PI*R2); R1 = random(); R2 = random(); All_Vel_y[count] = mp_V[1] / sqrt(3)*sqrt(-2 * log(R1))*cos(2 * PI*R2); R1 = random(); R2 = random(); All_Vel_z[count] = mp_V[1] / sqrt(3)*sqrt(-2 * log(R1))*cos(2 * PI*R2); All_Acc_x[count] = 0.0; All_Acc_y[count] = 0.0; All_Acc_z[count] = 0.0; count += 1; } } } } Rx = random(); Ry = random(); All_type[count] = 0; All_Pos_x[count] = Box_x[0] + (Box_x[1] - Box_x[0]) * Rx; All_Pos_y[count] = Box_y[0] + (Box_y[1] - Box_y[0]) * Ry; All_Pos_z[count] = Box_z[1]; R1 = random(); R2 = random(); All_Vel_x[count] = mp_V[0] * sqrt(-log(R1))*cos(2 * PI*R2);//Maxwell R1 = random(); R2 = random(); All_Vel_y[count] = mp_V[0] * sqrt(-log(R1))*sin(2 * PI*R2); R1 = random(); All_Vel_z[count] = -mp_V[0] * sqrt(-log(R1)); All_Acc_x[count] = 0.0; All_Acc_y[count] = 0.0; All_Acc_z[count] = 0.0; } /******************************************************************************/ __global__ void Pos_period(double *All_Pos_x, double *All_Pos_y, double *Box_x, double *Box_y, double *Pt_ePos_x, double *Pt_ePos_y) { int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { //X if (All_Pos_x[tid]<Box_x[0]) { All_Pos_x[tid] += Box_x[1] - Box_x[0]; if (tid<Pt_N) { Pt_ePos_x[tid] += Box_x[1] - Box_x[0]; } } else if (All_Pos_x[tid] >= Box_x[1]) { All_Pos_x[tid] -= Box_x[1] - Box_x[0]; if (tid<Pt_N) { Pt_ePos_x[tid] -= Box_x[1] - Box_x[0]; } } //Y if (All_Pos_y[tid]<Box_y[0]) { All_Pos_y[tid] += Box_y[1] - Box_y[0]; if (tid<Pt_N) { Pt_ePos_y[tid] += Box_y[1] - Box_y[0]; } } else if (All_Pos_y[tid] >= Box_y[1]) { All_Pos_y[tid] -= Box_y[1] - Box_y[0]; if (tid<Pt_N) { Pt_ePos_y[tid] -= Box_y[1] - Box_y[0]; } } } } /******************************************************************************/ void Parameters::rescale_T1(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]) { int i; Pt_argVel[0] = 0.0; Pt_argVel[1] = 0.0; Pt_argVel[2] = 0.0; for (i = 0; i < Pt_N; i++) { Pt_argVel[0] += All_Vel_x[i] / Pt_N; Pt_argVel[1] += All_Vel_y[i] / Pt_N; Pt_argVel[2] += All_Vel_z[i] / Pt_N; } } /******************************************************************************/ __global__ void rescale_T2(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *Pt_argVel) { int tid = threadIdx.x; if (tid<Pt_N) { All_Vel_x[tid] -= Pt_argVel[0]; All_Vel_y[tid] -= Pt_argVel[1]; All_Vel_z[tid] -= Pt_argVel[2]; } } /******************************************************************************/ void Parameters::rescale_T3(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]) { int i; Pt_T = 0.0; for (i = 0; i < Pt_N; i++) { Pt_T += All_Vel_x[i] * All_Vel_x[i] + All_Vel_y[i] * All_Vel_y[i] + All_Vel_z[i] * All_Vel_z[i]; } Pt_T *= nd_Velocity * nd_Velocity * Mass[1] * nd_Mass / (3 * Pt_N * kB); } /******************************************************************************/ __global__ void rescale_T4(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *T, double *Pt_T) { int tid = threadIdx.x; if (tid<Pt_N) { All_Vel_x[tid] *= sqrt(T[1] / (*Pt_T)); All_Vel_y[tid] *= sqrt(T[1] / (*Pt_T)); All_Vel_z[tid] *= sqrt(T[1] / (*Pt_T)); } } /******************************************************************************/ __global__ void Acceleration_period(double *All_Pos_x, double *All_Pos_y, double *All_Pos_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *LJ_E, double *LJ_S, double *Box_x, double *Box_y, double *Box_z, double *cutoff, double *Pt_ePos_x, double *Pt_ePos_y, double *Pt_ePos_z, double *spr_k, double *Mass) { int i, LJ_pair; double Epair, Spair, Pairx, Pairy, Pairz, Dispair, Fpair, Atom_Fx, Atom_Fy, Atom_Fz; double Spring_Disx, Spring_Fx, Pt_Fx, Spring_Disy, Spring_Fy, Pt_Fy, Spring_Disz, Spring_Fz, Pt_Fz, Ar_Fx, Ar_Fy, Ar_Fz; int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { Atom_Fx = 0.0; Atom_Fy = 0.0; Atom_Fz = 0.0; for (i = 0; i<Pt_N + Ar_N; i++) { if (tid<Pt_N && i<Pt_N) { LJ_pair = 1; } else if (tid >= Pt_N && i >= Pt_N) { LJ_pair = 0; } else { LJ_pair = 2; } Epair = LJ_E[LJ_pair]; Spair = LJ_S[LJ_pair]; // Pairx = All_Pos_x[tid] - All_Pos_x[i]; Pairy = All_Pos_y[tid] - All_Pos_y[i]; Pairz = All_Pos_z[tid] - All_Pos_z[i]; if (abs(Pairx) >= Box_x[1] - Box_x[0] - (*cutoff)) { Pairx -= (Box_x[1] - Box_x[0])*Pairx / abs(Pairx); } if (abs(Pairy) >= Box_y[1] - Box_y[0] - (*cutoff)) { Pairy -= (Box_y[1] - Box_y[0])*Pairy / abs(Pairy); } // Dispair = sqrt(Pairx * Pairx + Pairy * Pairy + Pairz * Pairz); if (Dispair > 0 && Dispair <= (*cutoff)) { Fpair = 48 * Epair*(pow(Spair, 12) / pow(Dispair, 13) - 0.5*pow(Spair, 6) / pow(Dispair, 7)); Atom_Fx += Pairx * Fpair / Dispair; Atom_Fy += Pairy * Fpair / Dispair; Atom_Fz += Pairz * Fpair / Dispair; } } if (tid<Pt_N) { //Pt Spring_Disx = All_Pos_x[tid] - Pt_ePos_x[tid]; Spring_Fx = -(*spr_k) * Spring_Disx; Pt_Fx = Atom_Fx + Spring_Fx; All_Acc_x[tid] = Pt_Fx / Mass[1]; Spring_Disy = All_Pos_y[tid] - Pt_ePos_y[tid]; Spring_Fy = -(*spr_k) * Spring_Disy; Pt_Fy = Atom_Fy + Spring_Fy; All_Acc_y[tid] = Pt_Fy / Mass[1]; Spring_Disz = All_Pos_z[tid] - Pt_ePos_z[tid]; Spring_Fz = -(*spr_k) * Spring_Disz; Pt_Fz = Atom_Fz + Spring_Fz; All_Acc_z[tid] = Pt_Fz / Mass[1]; } else { //Ar Ar_Fx = Atom_Fx; All_Acc_x[tid] = Ar_Fx / Mass[0]; Ar_Fy = Atom_Fy; All_Acc_y[tid] = Ar_Fy / Mass[0]; Ar_Fz = Atom_Fz; All_Acc_z[tid] = Ar_Fz / Mass[0]; } } } /******************************************************************************/ __global__ void Verlet_Pos(double *All_Pos_x, double *All_Pos_y, double *All_Pos_z, double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *dt) { int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { All_Pos_x[tid] += All_Vel_x[tid] * (*dt) + 0.5*All_Acc_x[tid] * (*dt) * (*dt); All_Pos_y[tid] += All_Vel_y[tid] * (*dt) + 0.5*All_Acc_y[tid] * (*dt) * (*dt); All_Pos_z[tid] += All_Vel_z[tid] * (*dt) + 0.5*All_Acc_z[tid] * (*dt) * (*dt); } } /******************************************************************************/ __global__ void Verlet_Vel(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *All_Acc_temp_x, double *All_Acc_temp_y, double *All_Acc_temp_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *dt) { int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { All_Vel_x[tid] += 0.5*(All_Acc_temp_x[tid] + All_Acc_x[tid])*(*dt); All_Vel_y[tid] += 0.5*(All_Acc_temp_y[tid] + All_Acc_y[tid])*(*dt); All_Vel_z[tid] += 0.5*(All_Acc_temp_z[tid] + All_Acc_z[tid])*(*dt); } } /******************************************************************************/ void Parameters::Dump(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep, int ds) { int i; if (timestep%ds == 0) { ofstream MD; MD.open("Kernel_MD_CUDA_C.dump", ios::app); MD << "ITEM: TIMESTEP\n"; MD << timestep << "\n"; MD << "ITEM: NUMBER OF ATOMS\n"; MD << Pt_N + Ar_N << "\n"; MD << "ITEM: BOX BOUNDS pp pp ff\n"; MD << Box_x[0] << " " << Box_x[1] << "\n"; MD << Box_y[0] << " " << Box_y[1] << "\n"; MD << Box_z[0] << " " << Box_z[1] << "\n"; MD << "ITEM: ATOMS id type x y z\n"; for (i = 0; i < Pt_N + Ar_N; i++) { MD << i + 1 << " " << All_type[i] + 1 << " " << All_Pos_x[i] << " " << All_Pos_y[i] << " " << All_Pos_z[i] << "\n"; } MD.close(); ofstream Zt; Zt.open("Kernel_MD_CUDA_C_Zt.dat", ios::app); Zt << timestep * dt << " " << All_Pos_z[Pt_N] << "\n"; Zt.close(); } } /******************************************************************************/ void Parameters::Exit(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep) { if (All_Pos_z[Pt_N] > d || timestep >= Tt) { state = false; Dump(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep); } else { Dump(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep, dumpstep); } } /******************************************************************************/ double Parameters::random() { double R; R = 0.; while (R == 0.) { R = rand() / double(RAND_MAX); } return R; } //////////////////////////////////////////////////////////////////////////////// /*************************************main*************************************/ //////////////////////////////////////////////////////////////////////////////// int main() { class Parameters Pars; clock_t start, finish; double tperl; int All_type[Pt_N + Ar_N]; double All_Pos_x[Pt_N + Ar_N], All_Pos_y[Pt_N + Ar_N], All_Pos_z[Pt_N + Ar_N], All_Vel_x[Pt_N + Ar_N], All_Vel_y[Pt_N + Ar_N], All_Vel_z[Pt_N + Ar_N], All_Acc_x[Pt_N + Ar_N], All_Acc_y[Pt_N + Ar_N], All_Acc_z[Pt_N + Ar_N]; double *d_All_Pos_x, *d_All_Pos_y, *d_All_Pos_z, *d_All_Vel_x, *d_All_Vel_y, *d_All_Vel_z, *d_All_Acc_x, *d_All_Acc_y, *d_All_Acc_z, *d_All_Acc_temp_x, *d_All_Acc_temp_y, *d_All_Acc_temp_z, *d_Box_x, *d_Box_y, *d_Box_z, *d_Pt_ePos_x, *d_Pt_ePos_y, *d_Pt_ePos_z; double *d_dt, *d_Mass, *d_T, *d_LJ_E, *d_LJ_S, *d_cutoff, *d_spr_k, *d_Pt_argVel, *d_Pt_T; int timestep=0; Pars.Init(); Pars.Initialization(All_type, All_Pos_x, All_Pos_y, All_Pos_z, All_Vel_x, All_Vel_y, All_Vel_z, All_Acc_x, All_Acc_y, All_Acc_z); Pars.Exit(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep); // hipMalloc((void**)&d_All_Pos_x, sizeof(All_Pos_x)); hipMalloc((void**)&d_All_Pos_y, sizeof(All_Pos_y)); hipMalloc((void**)&d_All_Pos_z, sizeof(All_Pos_z)); hipMalloc((void**)&d_All_Vel_x, sizeof(All_Vel_x)); hipMalloc((void**)&d_All_Vel_y, sizeof(All_Vel_y)); hipMalloc((void**)&d_All_Vel_z, sizeof(All_Vel_z)); hipMalloc((void**)&d_All_Acc_x, sizeof(All_Acc_x)); hipMalloc((void**)&d_All_Acc_y, sizeof(All_Acc_y)); hipMalloc((void**)&d_All_Acc_z, sizeof(All_Acc_z)); hipMalloc((void**)&d_All_Acc_temp_x, sizeof(All_Acc_x)); hipMalloc((void**)&d_All_Acc_temp_y, sizeof(All_Acc_y)); hipMalloc((void**)&d_All_Acc_temp_z, sizeof(All_Acc_z)); hipMalloc((void**)&d_Box_x, sizeof(Pars.Box_x)); hipMalloc((void**)&d_Box_y, sizeof(Pars.Box_y)); hipMalloc((void**)&d_Box_z, sizeof(Pars.Box_z)); hipMalloc((void**)&d_Pt_ePos_x, sizeof(Pars.Pt_ePos_x)); hipMalloc((void**)&d_Pt_ePos_y, sizeof(Pars.Pt_ePos_y)); hipMalloc((void**)&d_Pt_ePos_z, sizeof(Pars.Pt_ePos_z)); hipMalloc((void**)&d_dt, sizeof(double)); hipMalloc((void**)&d_Mass, sizeof(Pars.Mass)); hipMalloc((void**)&d_T, sizeof(Pars.T)); hipMalloc((void**)&d_LJ_E, sizeof(Pars.LJ_E)); hipMalloc((void**)&d_LJ_S, sizeof(Pars.LJ_S)); hipMalloc((void**)&d_cutoff, sizeof(double)); hipMalloc((void**)&d_spr_k, sizeof(double)); hipMalloc((void**)&d_Pt_argVel, sizeof(double)); hipMalloc((void**)&d_Pt_T, sizeof(double)); // hipMemcpy(d_All_Pos_x, All_Pos_x, sizeof(All_Pos_x), hipMemcpyHostToDevice); hipMemcpy(d_All_Pos_y, All_Pos_y, sizeof(All_Pos_y), hipMemcpyHostToDevice); hipMemcpy(d_All_Pos_z, All_Pos_z, sizeof(All_Pos_z), hipMemcpyHostToDevice); hipMemcpy(d_All_Vel_x, All_Vel_x, sizeof(All_Vel_x), hipMemcpyHostToDevice); hipMemcpy(d_All_Vel_y, All_Vel_y, sizeof(All_Vel_y), hipMemcpyHostToDevice); hipMemcpy(d_All_Vel_z, All_Vel_z, sizeof(All_Vel_z), hipMemcpyHostToDevice); hipMemcpy(d_All_Acc_x, All_Acc_x, sizeof(All_Acc_x), hipMemcpyHostToDevice); hipMemcpy(d_All_Acc_y, All_Acc_y, sizeof(All_Acc_y), hipMemcpyHostToDevice); hipMemcpy(d_All_Acc_z, All_Acc_z, sizeof(All_Acc_z), hipMemcpyHostToDevice); hipMemcpy(d_Box_x, Pars.Box_x, sizeof(Pars.Box_x), hipMemcpyHostToDevice); hipMemcpy(d_Box_y, Pars.Box_y, sizeof(Pars.Box_y), hipMemcpyHostToDevice); hipMemcpy(d_Box_z, Pars.Box_z, sizeof(Pars.Box_z), hipMemcpyHostToDevice); hipMemcpy(d_Pt_ePos_x, Pars.Pt_ePos_x, sizeof(Pars.Pt_ePos_x), hipMemcpyHostToDevice); hipMemcpy(d_Pt_ePos_y, Pars.Pt_ePos_y, sizeof(Pars.Pt_ePos_y), hipMemcpyHostToDevice); hipMemcpy(d_Pt_ePos_z, Pars.Pt_ePos_z, sizeof(Pars.Pt_ePos_z), hipMemcpyHostToDevice); hipMemcpy(d_dt, &Pars.dt, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_Mass, Pars.Mass, sizeof(Pars.Mass), hipMemcpyHostToDevice); hipMemcpy(d_T, Pars.T, sizeof(Pars.T), hipMemcpyHostToDevice); hipMemcpy(d_LJ_E, Pars.LJ_E, sizeof(Pars.LJ_E), hipMemcpyHostToDevice); hipMemcpy(d_LJ_S, Pars.LJ_S, sizeof(Pars.LJ_S), hipMemcpyHostToDevice); hipMemcpy(d_cutoff, &Pars.cutoff, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_spr_k, &Pars.spr_k, sizeof(double), hipMemcpyHostToDevice); start = clock(); while (Pars.state) { // Verlet_Pos << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_All_Pos_z, d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_dt); hipDeviceSynchronize(); // Pos_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_Box_x, d_Box_y, d_Pt_ePos_x, d_Pt_ePos_y); hipDeviceSynchronize(); // d_All_Acc_temp_x = d_All_Acc_x; d_All_Acc_temp_y = d_All_Acc_y; d_All_Acc_temp_z = d_All_Acc_z; // Acceleration_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_All_Pos_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_LJ_E, d_LJ_S, d_Box_x, d_Box_y, d_Box_z, d_cutoff, d_Pt_ePos_x, d_Pt_ePos_y, d_Pt_ePos_z, d_spr_k, d_Mass); hipDeviceSynchronize(); // Verlet_Vel << <1, Pt_N + Ar_N >> >(d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_All_Acc_temp_x, d_All_Acc_temp_y, d_All_Acc_temp_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_dt); hipDeviceSynchronize(); // hipMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), hipMemcpyDeviceToHost); Pars.rescale_T1(All_Vel_x, All_Vel_y, All_Vel_z); hipMemcpy(d_Pt_argVel, Pars.Pt_argVel, sizeof(Pars.Pt_argVel), hipMemcpyHostToDevice); rescale_T2 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_Pt_argVel); hipDeviceSynchronize(); hipMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), hipMemcpyDeviceToHost); hipMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), hipMemcpyDeviceToHost); Pars.rescale_T3(All_Vel_x, All_Vel_y, All_Vel_z); hipMemcpy(d_Pt_T, &Pars.Pt_T, sizeof(double), hipMemcpyHostToDevice); rescale_T4 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_T, d_Pt_T); hipDeviceSynchronize(); // hipMemcpy(All_Pos_x, d_All_Pos_x, sizeof(All_Pos_x), hipMemcpyDeviceToHost); hipMemcpy(All_Pos_y, d_All_Pos_y, sizeof(All_Pos_y), hipMemcpyDeviceToHost); hipMemcpy(All_Pos_z, d_All_Pos_z, sizeof(All_Pos_z), hipMemcpyDeviceToHost); // Box_z[0]=All_Pos_z[0]; for(i=0;i<Pt_N;i++){ if(All_Pos_z[i]<Box_z[0]){ Box_z[0]=All_Pos_z[i]; } } // timestep += 1; Pars.Exit(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep); finish = clock(); tperl = double(finish - start) / CLOCKS_PER_SEC / timestep; cout << timestep << " TimeSteps; ArgTime: " << tperl << " Seconds!\r"; } hipFree(d_All_Pos_x); hipFree(d_All_Pos_y); hipFree(d_All_Pos_z); hipFree(d_All_Vel_x); hipFree(d_All_Vel_y); hipFree(d_All_Vel_z); hipFree(d_All_Acc_x); hipFree(d_All_Acc_y); hipFree(d_All_Acc_z); hipFree(d_All_Acc_temp_x); hipFree(d_All_Acc_temp_y); hipFree(d_All_Acc_temp_z); hipFree(d_Box_x); hipFree(d_Box_y); hipFree(d_Box_z); hipFree(d_Pt_ePos_x); hipFree(d_Pt_ePos_y); hipFree(d_Pt_ePos_z); hipFree(d_dt); hipFree(d_Mass); hipFree(d_T); hipFree(d_LJ_E); hipFree(d_LJ_S); hipFree(d_cutoff); hipFree(d_spr_k); hipFree(d_Pt_argVel); hipFree(d_Pt_T); cout << "\n"; system("pause"); return 0; }
7250ab9d1a8bddd559e031b5eafdb7eb8b07b4a6.cu
#include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <fstream> #include <algorithm> using namespace std; //const int Lib_N = 100000; //const int SampleParallel_N = 1000; const int Atom_type_N = 2; const int Pair_type_N = 3; const int Pt_I = 6, Pt_J = 6, Pt_K = 3; const int Pt_N = 4 * Pt_I * Pt_J * Pt_K; const int Ar_N = 1; class Parameters { public: int Rt, Tt, dumpstep; double Mass[Atom_type_N], T[Atom_type_N], mp_V[Atom_type_N], LJ_E[Pair_type_N], LJ_S[Pair_type_N], Box_x[2], Box_y[2], Box_z[2], Pt_ePos_x[Pt_N], Pt_ePos_y[Pt_N], Pt_ePos_z[Pt_N], Pt_argVel[3]; double PI, kB, fcc_lattice, nd_Mass, nd_Energy, nd_Length, nd_Velocity, nd_Time, nd_Acceleration, cutoff, d, spr_k, dt, Pt_T; bool state; void Init(); void Initialization(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]); void Initialization_Kernel(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]); void rescale_T1(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]); void rescale_T3(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]); void Dump(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep, int ds = 1); void Exit(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep); double random(); }; void Parameters::Init() { // PI = 3.14159265; kB = 1.38E-23; Mass[0] = 39.95 / 6.02 * 1E-26;//kg Mass[1] = 195.08 / 6.02 * 1E-26; LJ_E[0] = 1.654E-21;//J LJ_E[1] = 5.207E-20; LJ_E[2] = 1.093E-21; LJ_S[0] = 3.40 * 1E-10;//m LJ_S[1] = 2.47 * 1E-10; LJ_S[2] = 2.94 * 1E-10; cutoff = 10 * 1E-10; fcc_lattice = 3.93E-10; T[0] = 300.; T[1] = 300.; mp_V[0] = sqrt(2 * kB*T[0] / Mass[0]);// mp_V[1] = sqrt(3 * kB*T[1] / Mass[1]);// // nd_Mass = Mass[1]; nd_Energy = LJ_E[1]; nd_Length = LJ_S[1]; nd_Velocity = sqrt(nd_Energy / nd_Mass); nd_Time = nd_Length / nd_Velocity; nd_Acceleration = nd_Energy / (nd_Mass * nd_Length); // Mass[0] /= nd_Mass; Mass[1] /= nd_Mass; LJ_E[0] /= nd_Energy; LJ_E[1] /= nd_Energy; LJ_E[2] /= nd_Energy; LJ_S[0] /= nd_Length; LJ_S[1] /= nd_Length; LJ_S[2] /= nd_Length; cutoff /= nd_Length; fcc_lattice /= nd_Length; mp_V[0] /= nd_Velocity; mp_V[1] /= nd_Velocity; d = 5.0; spr_k = 5000.; dt = 0.001; Rt = 100; Tt = 35; dumpstep = 1; // Box_x[0] = 0; Box_x[1] = Pt_I * fcc_lattice; Box_y[0] = 0; Box_y[1] = Pt_J * fcc_lattice; Box_z[0] = -(Pt_K - 0.5)*fcc_lattice; Box_z[1] = d; // state = true; cout << "*******Parameters Initialized!*******\n"; } /******************************************************************************/ void Parameters::Initialization(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]) { int i; double *d_Pt_argVel, *d_Pt_T, *d_Mass, *d_T, *d_LJ_E, *d_LJ_S, *d_cutoff, *d_spr_k; double *d_All_Pos_x, *d_All_Pos_y, *d_All_Pos_z, *d_All_Vel_x, *d_All_Vel_y, *d_All_Vel_z, *d_All_Acc_x, *d_All_Acc_y, *d_All_Acc_z, *d_Box_x, *d_Box_y, *d_Box_z, *d_Pt_ePos_x, *d_Pt_ePos_y, *d_Pt_ePos_z; __global__ void Pos_period(double *All_Pos_x, double *All_Pos_y, double *Box_x, double *Box_y, double *Pt_ePos_x, double *Pt_ePos_y); __global__ void rescale_T2(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *Pt_argVel); __global__ void rescale_T4(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *T, double *Pt_T); __global__ void Acceleration_period(double *All_Pos_x, double *All_Pos_y, double *All_Pos_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *LJ_E, double *LJ_S, double *Box_x, double *Box_y, double *Box_z, double *cutoff, double *Pt_ePos_x, double *Pt_ePos_y, double *Pt_ePos_z, double *spr_k, double *Mass); cout << "Box_ZoneX: " << Box_x[0] << ", " << Box_x[1] << "\n"; cout << "Box_ZoneY: " << Box_y[0] << ", " << Box_y[1] << "\n"; cout << "Box_ZoneZ: " << Box_z[0] << ", " << Box_z[1] << "\n"; // Initialization_Kernel(All_type, All_Pos_x, All_Pos_y, All_Pos_z, All_Vel_x, All_Vel_y, All_Vel_z, All_Acc_x, All_Acc_y, All_Acc_z); // cudaMalloc((void**)&d_All_Pos_x, sizeof(All_Pos_x)); cudaMalloc((void**)&d_All_Pos_y, sizeof(All_Pos_y)); cudaMalloc((void**)&d_All_Pos_z, sizeof(All_Pos_z)); cudaMalloc((void**)&d_All_Vel_x, sizeof(All_Vel_x)); cudaMalloc((void**)&d_All_Vel_y, sizeof(All_Vel_y)); cudaMalloc((void**)&d_All_Vel_z, sizeof(All_Vel_z)); cudaMalloc((void**)&d_All_Acc_x, sizeof(All_Acc_x)); cudaMalloc((void**)&d_All_Acc_y, sizeof(All_Acc_y)); cudaMalloc((void**)&d_All_Acc_z, sizeof(All_Acc_z)); cudaMalloc((void**)&d_Box_x, sizeof(Box_x)); cudaMalloc((void**)&d_Box_y, sizeof(Box_y)); cudaMalloc((void**)&d_Box_z, sizeof(Box_z)); cudaMalloc((void**)&d_Pt_ePos_x, sizeof(Pt_ePos_x)); cudaMalloc((void**)&d_Pt_ePos_y, sizeof(Pt_ePos_y)); cudaMalloc((void**)&d_Pt_ePos_z, sizeof(Pt_ePos_z)); cudaMalloc((void**)&d_Mass, sizeof(Mass)); cudaMalloc((void**)&d_T, sizeof(T)); cudaMalloc((void**)&d_LJ_E, sizeof(LJ_E)); cudaMalloc((void**)&d_LJ_S, sizeof(LJ_S)); cudaMalloc((void**)&d_cutoff, sizeof(double)); cudaMalloc((void**)&d_spr_k, sizeof(double)); cudaMalloc((void**)&d_Pt_argVel, sizeof(Pt_argVel)); cudaMalloc((void**)&d_Pt_T, sizeof(double)); // cudaMemcpy(d_All_Pos_x, All_Pos_x, sizeof(All_Pos_x), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Pos_y, All_Pos_y, sizeof(All_Pos_y), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Pos_z, All_Pos_z, sizeof(All_Pos_z), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Vel_x, All_Vel_x, sizeof(All_Vel_x), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Vel_y, All_Vel_y, sizeof(All_Vel_y), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Vel_z, All_Vel_z, sizeof(All_Vel_z), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Acc_x, All_Acc_x, sizeof(All_Acc_x), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Acc_y, All_Acc_y, sizeof(All_Acc_y), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Acc_z, All_Acc_z, sizeof(All_Acc_z), cudaMemcpyHostToDevice); cudaMemcpy(d_Box_x, Box_x, sizeof(Box_x), cudaMemcpyHostToDevice); cudaMemcpy(d_Box_y, Box_y, sizeof(Box_y), cudaMemcpyHostToDevice); cudaMemcpy(d_Box_z, Box_z, sizeof(Box_z), cudaMemcpyHostToDevice); cudaMemcpy(d_Pt_ePos_x, Pt_ePos_x, sizeof(Pt_ePos_x), cudaMemcpyHostToDevice); cudaMemcpy(d_Pt_ePos_y, Pt_ePos_y, sizeof(Pt_ePos_y), cudaMemcpyHostToDevice); cudaMemcpy(d_Pt_ePos_z, Pt_ePos_z, sizeof(Pt_ePos_z), cudaMemcpyHostToDevice); cudaMemcpy(d_Mass, Mass, sizeof(Mass), cudaMemcpyHostToDevice); cudaMemcpy(d_T, T, sizeof(T), cudaMemcpyHostToDevice); cudaMemcpy(d_LJ_E, LJ_E, sizeof(LJ_E), cudaMemcpyHostToDevice); cudaMemcpy(d_LJ_S, LJ_S, sizeof(LJ_S), cudaMemcpyHostToDevice); cudaMemcpy(d_cutoff, &cutoff, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_spr_k, &spr_k, sizeof(double), cudaMemcpyHostToDevice); // Pos_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_Box_x, d_Box_y, d_Pt_ePos_x, d_Pt_ePos_y); cudaDeviceSynchronize(); // Box_z[0]=All_Pos_z[0]; for(i=0;i<Pt_N;i++){ if(All_Pos_z[i]<Box_z[0]){ Box_z[0]=All_Pos_z[i]; } } cout<<Box_z[0]<<"\n"; cudaMemcpy(d_Box_z, Box_z, sizeof(Box_z), cudaMemcpyHostToDevice); // rescale_T1(All_Vel_x, All_Vel_y, All_Vel_z); cudaMemcpy(d_Pt_argVel, Pt_argVel, sizeof(Pt_argVel), cudaMemcpyHostToDevice); rescale_T2 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_Pt_argVel); cudaDeviceSynchronize(); cudaMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), cudaMemcpyDeviceToHost); rescale_T3(All_Vel_x, All_Vel_y, All_Vel_z); cudaMemcpy(d_Pt_T, &Pt_T, sizeof(double), cudaMemcpyHostToDevice); rescale_T4 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_T, d_Pt_T); cudaDeviceSynchronize(); // Acceleration_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_All_Pos_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_LJ_E, d_LJ_S, d_Box_x, d_Box_y, d_Box_z, d_cutoff, d_Pt_ePos_x, d_Pt_ePos_y, d_Pt_ePos_z, d_spr_k, d_Mass); cudaDeviceSynchronize(); cudaMemcpy(All_Pos_x, d_All_Pos_x, sizeof(All_Pos_x), cudaMemcpyDeviceToHost); cudaMemcpy(All_Pos_y, d_All_Pos_y, sizeof(All_Pos_y), cudaMemcpyDeviceToHost); cudaMemcpy(All_Pos_z, d_All_Pos_z, sizeof(All_Pos_z), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), cudaMemcpyDeviceToHost); cudaMemcpy(All_Acc_x, d_All_Acc_x, sizeof(All_Acc_x), cudaMemcpyDeviceToHost); cudaMemcpy(All_Acc_y, d_All_Acc_y, sizeof(All_Acc_y), cudaMemcpyDeviceToHost); cudaMemcpy(All_Acc_z, d_All_Acc_z, sizeof(All_Acc_z), cudaMemcpyDeviceToHost); cudaMemcpy(Pt_ePos_x, d_Pt_ePos_x, sizeof(Pt_ePos_x), cudaMemcpyDeviceToHost); cudaMemcpy(Pt_ePos_y, d_Pt_ePos_y, sizeof(Pt_ePos_y), cudaMemcpyDeviceToHost); cudaMemcpy(Pt_ePos_z, d_Pt_ePos_z, sizeof(Pt_ePos_z), cudaMemcpyDeviceToHost); // cout << "Created " << Pt_N << " Pt\n"; cout << "Created " << Ar_N << " Ar\n"; cout << "Pt Average Speed in X: " << Pt_argVelx << "\n"; cout << "Pt Average Speed in Y: " << Pt_argVely << "\n"; cout << "Pt Average Speed in Z: " << Pt_argVelz << "\n"; cout << "Pt Temperature: " << Pt_T << "\n"; cout << "Ar Incidence Speed: " << All_Vel_x[Pt_N] << "," << All_Vel_y[Pt_N] << "," << All_Vel_z[Pt_N] << "\n"; cout << "*******Model Initialization Done!*******\n"; cudaFree(d_All_Pos_x); cudaFree(d_All_Pos_y); cudaFree(d_All_Pos_z); cudaFree(d_All_Vel_x); cudaFree(d_All_Vel_y); cudaFree(d_All_Vel_z); cudaFree(d_All_Acc_x); cudaFree(d_All_Acc_y); cudaFree(d_All_Acc_z); cudaFree(d_Box_x); cudaFree(d_Box_y); cudaFree(d_Box_z); cudaFree(d_Pt_ePos_x); cudaFree(d_Pt_ePos_y); cudaFree(d_Pt_ePos_z); cudaFree(d_nd_Velocity); cudaFree(d_Mass); cudaFree(d_nd_Mass); cudaFree(d_kB); cudaFree(d_T); cudaFree(d_LJ_E); cudaFree(d_LJ_S); cudaFree(d_cutoff); cudaFree(d_spr_k); cudaFree(d_Pt_argVel); cudaFree(d_Pt_T); } /******************************************************************************/ void Parameters::Initialization_Kernel(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], double All_Vel_x[], double All_Vel_y[], double All_Vel_z[], double All_Acc_x[], double All_Acc_y[], double All_Acc_z[]) { int i, j, k, axis, count; double R1, R2, Rx, Ry; count = 0; srand((unsigned)time(NULL)); for (i = 0; i < 2 * Pt_I; i++) { for (j = 0; j < 2 * Pt_J; j++) { for (k = 0; k < 2 * Pt_K; k++) { if (i / 2. + j / 2. + k / 2. == int(i / 2. + j / 2. + k / 2.)) { All_type[count] = 1; All_Pos_x[count] = i / 2.*fcc_lattice; Pt_ePos_x[count] = All_Pos_x[count]; All_Pos_y[count] = j / 2.*fcc_lattice; Pt_ePos_y[count] = All_Pos_y[count]; All_Pos_z[count] = (k / 2. - 2.5)*fcc_lattice; Pt_ePos_z[count] = All_Pos_z[count]; R1 = random(); R2 = random(); All_Vel_x[count] = mp_V[1] / sqrt(3)*sqrt(-2 * log(R1))*cos(2 * PI*R2); R1 = random(); R2 = random(); All_Vel_y[count] = mp_V[1] / sqrt(3)*sqrt(-2 * log(R1))*cos(2 * PI*R2); R1 = random(); R2 = random(); All_Vel_z[count] = mp_V[1] / sqrt(3)*sqrt(-2 * log(R1))*cos(2 * PI*R2); All_Acc_x[count] = 0.0; All_Acc_y[count] = 0.0; All_Acc_z[count] = 0.0; count += 1; } } } } Rx = random(); Ry = random(); All_type[count] = 0; All_Pos_x[count] = Box_x[0] + (Box_x[1] - Box_x[0]) * Rx; All_Pos_y[count] = Box_y[0] + (Box_y[1] - Box_y[0]) * Ry; All_Pos_z[count] = Box_z[1]; R1 = random(); R2 = random(); All_Vel_x[count] = mp_V[0] * sqrt(-log(R1))*cos(2 * PI*R2);//Maxwell�ֲ� R1 = random(); R2 = random(); All_Vel_y[count] = mp_V[0] * sqrt(-log(R1))*sin(2 * PI*R2); R1 = random(); All_Vel_z[count] = -mp_V[0] * sqrt(-log(R1)); All_Acc_x[count] = 0.0; All_Acc_y[count] = 0.0; All_Acc_z[count] = 0.0; } /******************************************************************************/ __global__ void Pos_period(double *All_Pos_x, double *All_Pos_y, double *Box_x, double *Box_y, double *Pt_ePos_x, double *Pt_ePos_y) { int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { //X if (All_Pos_x[tid]<Box_x[0]) { All_Pos_x[tid] += Box_x[1] - Box_x[0]; if (tid<Pt_N) { Pt_ePos_x[tid] += Box_x[1] - Box_x[0]; } } else if (All_Pos_x[tid] >= Box_x[1]) { All_Pos_x[tid] -= Box_x[1] - Box_x[0]; if (tid<Pt_N) { Pt_ePos_x[tid] -= Box_x[1] - Box_x[0]; } } //Y if (All_Pos_y[tid]<Box_y[0]) { All_Pos_y[tid] += Box_y[1] - Box_y[0]; if (tid<Pt_N) { Pt_ePos_y[tid] += Box_y[1] - Box_y[0]; } } else if (All_Pos_y[tid] >= Box_y[1]) { All_Pos_y[tid] -= Box_y[1] - Box_y[0]; if (tid<Pt_N) { Pt_ePos_y[tid] -= Box_y[1] - Box_y[0]; } } } } /******************************************************************************/ void Parameters::rescale_T1(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]) { int i; Pt_argVel[0] = 0.0; Pt_argVel[1] = 0.0; Pt_argVel[2] = 0.0; for (i = 0; i < Pt_N; i++) { Pt_argVel[0] += All_Vel_x[i] / Pt_N; Pt_argVel[1] += All_Vel_y[i] / Pt_N; Pt_argVel[2] += All_Vel_z[i] / Pt_N; } } /******************************************************************************/ __global__ void rescale_T2(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *Pt_argVel) { int tid = threadIdx.x; if (tid<Pt_N) { All_Vel_x[tid] -= Pt_argVel[0]; All_Vel_y[tid] -= Pt_argVel[1]; All_Vel_z[tid] -= Pt_argVel[2]; } } /******************************************************************************/ void Parameters::rescale_T3(double All_Vel_x[], double All_Vel_y[], double All_Vel_z[]) { int i; Pt_T = 0.0; for (i = 0; i < Pt_N; i++) { Pt_T += All_Vel_x[i] * All_Vel_x[i] + All_Vel_y[i] * All_Vel_y[i] + All_Vel_z[i] * All_Vel_z[i]; } Pt_T *= nd_Velocity * nd_Velocity * Mass[1] * nd_Mass / (3 * Pt_N * kB); } /******************************************************************************/ __global__ void rescale_T4(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *T, double *Pt_T) { int tid = threadIdx.x; if (tid<Pt_N) { All_Vel_x[tid] *= sqrt(T[1] / (*Pt_T)); All_Vel_y[tid] *= sqrt(T[1] / (*Pt_T)); All_Vel_z[tid] *= sqrt(T[1] / (*Pt_T)); } } /******************************************************************************/ __global__ void Acceleration_period(double *All_Pos_x, double *All_Pos_y, double *All_Pos_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *LJ_E, double *LJ_S, double *Box_x, double *Box_y, double *Box_z, double *cutoff, double *Pt_ePos_x, double *Pt_ePos_y, double *Pt_ePos_z, double *spr_k, double *Mass) { int i, LJ_pair; double Epair, Spair, Pairx, Pairy, Pairz, Dispair, Fpair, Atom_Fx, Atom_Fy, Atom_Fz; double Spring_Disx, Spring_Fx, Pt_Fx, Spring_Disy, Spring_Fy, Pt_Fy, Spring_Disz, Spring_Fz, Pt_Fz, Ar_Fx, Ar_Fy, Ar_Fz; int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { Atom_Fx = 0.0; Atom_Fy = 0.0; Atom_Fz = 0.0; for (i = 0; i<Pt_N + Ar_N; i++) { if (tid<Pt_N && i<Pt_N) { LJ_pair = 1; } else if (tid >= Pt_N && i >= Pt_N) { LJ_pair = 0; } else { LJ_pair = 2; } Epair = LJ_E[LJ_pair]; Spair = LJ_S[LJ_pair]; // Pairx = All_Pos_x[tid] - All_Pos_x[i]; Pairy = All_Pos_y[tid] - All_Pos_y[i]; Pairz = All_Pos_z[tid] - All_Pos_z[i]; if (abs(Pairx) >= Box_x[1] - Box_x[0] - (*cutoff)) { Pairx -= (Box_x[1] - Box_x[0])*Pairx / abs(Pairx); } if (abs(Pairy) >= Box_y[1] - Box_y[0] - (*cutoff)) { Pairy -= (Box_y[1] - Box_y[0])*Pairy / abs(Pairy); } // Dispair = sqrt(Pairx * Pairx + Pairy * Pairy + Pairz * Pairz); if (Dispair > 0 && Dispair <= (*cutoff)) { Fpair = 48 * Epair*(pow(Spair, 12) / pow(Dispair, 13) - 0.5*pow(Spair, 6) / pow(Dispair, 7)); Atom_Fx += Pairx * Fpair / Dispair; Atom_Fy += Pairy * Fpair / Dispair; Atom_Fz += Pairz * Fpair / Dispair; } } if (tid<Pt_N) { //Pt Spring_Disx = All_Pos_x[tid] - Pt_ePos_x[tid]; Spring_Fx = -(*spr_k) * Spring_Disx; Pt_Fx = Atom_Fx + Spring_Fx; All_Acc_x[tid] = Pt_Fx / Mass[1]; Spring_Disy = All_Pos_y[tid] - Pt_ePos_y[tid]; Spring_Fy = -(*spr_k) * Spring_Disy; Pt_Fy = Atom_Fy + Spring_Fy; All_Acc_y[tid] = Pt_Fy / Mass[1]; Spring_Disz = All_Pos_z[tid] - Pt_ePos_z[tid]; Spring_Fz = -(*spr_k) * Spring_Disz; Pt_Fz = Atom_Fz + Spring_Fz; All_Acc_z[tid] = Pt_Fz / Mass[1]; } else { //Ar Ar_Fx = Atom_Fx; All_Acc_x[tid] = Ar_Fx / Mass[0]; Ar_Fy = Atom_Fy; All_Acc_y[tid] = Ar_Fy / Mass[0]; Ar_Fz = Atom_Fz; All_Acc_z[tid] = Ar_Fz / Mass[0]; } } } /******************************************************************************/ __global__ void Verlet_Pos(double *All_Pos_x, double *All_Pos_y, double *All_Pos_z, double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *dt) { int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { All_Pos_x[tid] += All_Vel_x[tid] * (*dt) + 0.5*All_Acc_x[tid] * (*dt) * (*dt); All_Pos_y[tid] += All_Vel_y[tid] * (*dt) + 0.5*All_Acc_y[tid] * (*dt) * (*dt); All_Pos_z[tid] += All_Vel_z[tid] * (*dt) + 0.5*All_Acc_z[tid] * (*dt) * (*dt); } } /******************************************************************************/ __global__ void Verlet_Vel(double *All_Vel_x, double *All_Vel_y, double *All_Vel_z, double *All_Acc_temp_x, double *All_Acc_temp_y, double *All_Acc_temp_z, double *All_Acc_x, double *All_Acc_y, double *All_Acc_z, double *dt) { int tid = threadIdx.x; if (tid<Pt_N + Ar_N) { All_Vel_x[tid] += 0.5*(All_Acc_temp_x[tid] + All_Acc_x[tid])*(*dt); All_Vel_y[tid] += 0.5*(All_Acc_temp_y[tid] + All_Acc_y[tid])*(*dt); All_Vel_z[tid] += 0.5*(All_Acc_temp_z[tid] + All_Acc_z[tid])*(*dt); } } /******************************************************************************/ void Parameters::Dump(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep, int ds) { int i; if (timestep%ds == 0) { ofstream MD; MD.open("Kernel_MD_CUDA_C.dump", ios::app); MD << "ITEM: TIMESTEP\n"; MD << timestep << "\n"; MD << "ITEM: NUMBER OF ATOMS\n"; MD << Pt_N + Ar_N << "\n"; MD << "ITEM: BOX BOUNDS pp pp ff\n"; MD << Box_x[0] << " " << Box_x[1] << "\n"; MD << Box_y[0] << " " << Box_y[1] << "\n"; MD << Box_z[0] << " " << Box_z[1] << "\n"; MD << "ITEM: ATOMS id type x y z\n"; for (i = 0; i < Pt_N + Ar_N; i++) { MD << i + 1 << " " << All_type[i] + 1 << " " << All_Pos_x[i] << " " << All_Pos_y[i] << " " << All_Pos_z[i] << "\n"; } MD.close(); ofstream Zt; Zt.open("Kernel_MD_CUDA_C_Zt.dat", ios::app); Zt << timestep * dt << " " << All_Pos_z[Pt_N] << "\n"; Zt.close(); } } /******************************************************************************/ void Parameters::Exit(int All_type[], double All_Pos_x[], double All_Pos_y[], double All_Pos_z[], int timestep) { if (All_Pos_z[Pt_N] > d || timestep >= Tt) { state = false; Dump(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep); } else { Dump(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep, dumpstep); } } /******************************************************************************/ double Parameters::random() { double R; R = 0.; while (R == 0.) { R = rand() / double(RAND_MAX); } return R; } //////////////////////////////////////////////////////////////////////////////// /*************************************main*************************************/ //////////////////////////////////////////////////////////////////////////////// int main() { class Parameters Pars; clock_t start, finish; double tperl; int All_type[Pt_N + Ar_N]; double All_Pos_x[Pt_N + Ar_N], All_Pos_y[Pt_N + Ar_N], All_Pos_z[Pt_N + Ar_N], All_Vel_x[Pt_N + Ar_N], All_Vel_y[Pt_N + Ar_N], All_Vel_z[Pt_N + Ar_N], All_Acc_x[Pt_N + Ar_N], All_Acc_y[Pt_N + Ar_N], All_Acc_z[Pt_N + Ar_N]; double *d_All_Pos_x, *d_All_Pos_y, *d_All_Pos_z, *d_All_Vel_x, *d_All_Vel_y, *d_All_Vel_z, *d_All_Acc_x, *d_All_Acc_y, *d_All_Acc_z, *d_All_Acc_temp_x, *d_All_Acc_temp_y, *d_All_Acc_temp_z, *d_Box_x, *d_Box_y, *d_Box_z, *d_Pt_ePos_x, *d_Pt_ePos_y, *d_Pt_ePos_z; double *d_dt, *d_Mass, *d_T, *d_LJ_E, *d_LJ_S, *d_cutoff, *d_spr_k, *d_Pt_argVel, *d_Pt_T; int timestep=0; Pars.Init(); Pars.Initialization(All_type, All_Pos_x, All_Pos_y, All_Pos_z, All_Vel_x, All_Vel_y, All_Vel_z, All_Acc_x, All_Acc_y, All_Acc_z); Pars.Exit(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep); // cudaMalloc((void**)&d_All_Pos_x, sizeof(All_Pos_x)); cudaMalloc((void**)&d_All_Pos_y, sizeof(All_Pos_y)); cudaMalloc((void**)&d_All_Pos_z, sizeof(All_Pos_z)); cudaMalloc((void**)&d_All_Vel_x, sizeof(All_Vel_x)); cudaMalloc((void**)&d_All_Vel_y, sizeof(All_Vel_y)); cudaMalloc((void**)&d_All_Vel_z, sizeof(All_Vel_z)); cudaMalloc((void**)&d_All_Acc_x, sizeof(All_Acc_x)); cudaMalloc((void**)&d_All_Acc_y, sizeof(All_Acc_y)); cudaMalloc((void**)&d_All_Acc_z, sizeof(All_Acc_z)); cudaMalloc((void**)&d_All_Acc_temp_x, sizeof(All_Acc_x)); cudaMalloc((void**)&d_All_Acc_temp_y, sizeof(All_Acc_y)); cudaMalloc((void**)&d_All_Acc_temp_z, sizeof(All_Acc_z)); cudaMalloc((void**)&d_Box_x, sizeof(Pars.Box_x)); cudaMalloc((void**)&d_Box_y, sizeof(Pars.Box_y)); cudaMalloc((void**)&d_Box_z, sizeof(Pars.Box_z)); cudaMalloc((void**)&d_Pt_ePos_x, sizeof(Pars.Pt_ePos_x)); cudaMalloc((void**)&d_Pt_ePos_y, sizeof(Pars.Pt_ePos_y)); cudaMalloc((void**)&d_Pt_ePos_z, sizeof(Pars.Pt_ePos_z)); cudaMalloc((void**)&d_dt, sizeof(double)); cudaMalloc((void**)&d_Mass, sizeof(Pars.Mass)); cudaMalloc((void**)&d_T, sizeof(Pars.T)); cudaMalloc((void**)&d_LJ_E, sizeof(Pars.LJ_E)); cudaMalloc((void**)&d_LJ_S, sizeof(Pars.LJ_S)); cudaMalloc((void**)&d_cutoff, sizeof(double)); cudaMalloc((void**)&d_spr_k, sizeof(double)); cudaMalloc((void**)&d_Pt_argVel, sizeof(double)); cudaMalloc((void**)&d_Pt_T, sizeof(double)); // cudaMemcpy(d_All_Pos_x, All_Pos_x, sizeof(All_Pos_x), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Pos_y, All_Pos_y, sizeof(All_Pos_y), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Pos_z, All_Pos_z, sizeof(All_Pos_z), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Vel_x, All_Vel_x, sizeof(All_Vel_x), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Vel_y, All_Vel_y, sizeof(All_Vel_y), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Vel_z, All_Vel_z, sizeof(All_Vel_z), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Acc_x, All_Acc_x, sizeof(All_Acc_x), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Acc_y, All_Acc_y, sizeof(All_Acc_y), cudaMemcpyHostToDevice); cudaMemcpy(d_All_Acc_z, All_Acc_z, sizeof(All_Acc_z), cudaMemcpyHostToDevice); cudaMemcpy(d_Box_x, Pars.Box_x, sizeof(Pars.Box_x), cudaMemcpyHostToDevice); cudaMemcpy(d_Box_y, Pars.Box_y, sizeof(Pars.Box_y), cudaMemcpyHostToDevice); cudaMemcpy(d_Box_z, Pars.Box_z, sizeof(Pars.Box_z), cudaMemcpyHostToDevice); cudaMemcpy(d_Pt_ePos_x, Pars.Pt_ePos_x, sizeof(Pars.Pt_ePos_x), cudaMemcpyHostToDevice); cudaMemcpy(d_Pt_ePos_y, Pars.Pt_ePos_y, sizeof(Pars.Pt_ePos_y), cudaMemcpyHostToDevice); cudaMemcpy(d_Pt_ePos_z, Pars.Pt_ePos_z, sizeof(Pars.Pt_ePos_z), cudaMemcpyHostToDevice); cudaMemcpy(d_dt, &Pars.dt, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_Mass, Pars.Mass, sizeof(Pars.Mass), cudaMemcpyHostToDevice); cudaMemcpy(d_T, Pars.T, sizeof(Pars.T), cudaMemcpyHostToDevice); cudaMemcpy(d_LJ_E, Pars.LJ_E, sizeof(Pars.LJ_E), cudaMemcpyHostToDevice); cudaMemcpy(d_LJ_S, Pars.LJ_S, sizeof(Pars.LJ_S), cudaMemcpyHostToDevice); cudaMemcpy(d_cutoff, &Pars.cutoff, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_spr_k, &Pars.spr_k, sizeof(double), cudaMemcpyHostToDevice); start = clock(); while (Pars.state) { // Verlet_Pos << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_All_Pos_z, d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_dt); cudaDeviceSynchronize(); // Pos_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_Box_x, d_Box_y, d_Pt_ePos_x, d_Pt_ePos_y); cudaDeviceSynchronize(); // d_All_Acc_temp_x = d_All_Acc_x; d_All_Acc_temp_y = d_All_Acc_y; d_All_Acc_temp_z = d_All_Acc_z; // Acceleration_period << <1, Pt_N + Ar_N >> >(d_All_Pos_x, d_All_Pos_y, d_All_Pos_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_LJ_E, d_LJ_S, d_Box_x, d_Box_y, d_Box_z, d_cutoff, d_Pt_ePos_x, d_Pt_ePos_y, d_Pt_ePos_z, d_spr_k, d_Mass); cudaDeviceSynchronize(); // Verlet_Vel << <1, Pt_N + Ar_N >> >(d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_All_Acc_temp_x, d_All_Acc_temp_y, d_All_Acc_temp_z, d_All_Acc_x, d_All_Acc_y, d_All_Acc_z, d_dt); cudaDeviceSynchronize(); // cudaMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), cudaMemcpyDeviceToHost); Pars.rescale_T1(All_Vel_x, All_Vel_y, All_Vel_z); cudaMemcpy(d_Pt_argVel, Pars.Pt_argVel, sizeof(Pars.Pt_argVel), cudaMemcpyHostToDevice); rescale_T2 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_Pt_argVel); cudaDeviceSynchronize(); cudaMemcpy(All_Vel_x, d_All_Vel_x, sizeof(All_Vel_x), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_y, d_All_Vel_y, sizeof(All_Vel_y), cudaMemcpyDeviceToHost); cudaMemcpy(All_Vel_z, d_All_Vel_z, sizeof(All_Vel_z), cudaMemcpyDeviceToHost); Pars.rescale_T3(All_Vel_x, All_Vel_y, All_Vel_z); cudaMemcpy(d_Pt_T, &Pars.Pt_T, sizeof(double), cudaMemcpyHostToDevice); rescale_T4 << <1, Pt_N >> > (d_All_Vel_x, d_All_Vel_y, d_All_Vel_z, d_T, d_Pt_T); cudaDeviceSynchronize(); // cudaMemcpy(All_Pos_x, d_All_Pos_x, sizeof(All_Pos_x), cudaMemcpyDeviceToHost); cudaMemcpy(All_Pos_y, d_All_Pos_y, sizeof(All_Pos_y), cudaMemcpyDeviceToHost); cudaMemcpy(All_Pos_z, d_All_Pos_z, sizeof(All_Pos_z), cudaMemcpyDeviceToHost); // Box_z[0]=All_Pos_z[0]; for(i=0;i<Pt_N;i++){ if(All_Pos_z[i]<Box_z[0]){ Box_z[0]=All_Pos_z[i]; } } // timestep += 1; Pars.Exit(All_type, All_Pos_x, All_Pos_y, All_Pos_z, timestep); finish = clock(); tperl = double(finish - start) / CLOCKS_PER_SEC / timestep; cout << timestep << " TimeSteps; ArgTime: " << tperl << " Seconds!\r"; } cudaFree(d_All_Pos_x); cudaFree(d_All_Pos_y); cudaFree(d_All_Pos_z); cudaFree(d_All_Vel_x); cudaFree(d_All_Vel_y); cudaFree(d_All_Vel_z); cudaFree(d_All_Acc_x); cudaFree(d_All_Acc_y); cudaFree(d_All_Acc_z); cudaFree(d_All_Acc_temp_x); cudaFree(d_All_Acc_temp_y); cudaFree(d_All_Acc_temp_z); cudaFree(d_Box_x); cudaFree(d_Box_y); cudaFree(d_Box_z); cudaFree(d_Pt_ePos_x); cudaFree(d_Pt_ePos_y); cudaFree(d_Pt_ePos_z); cudaFree(d_dt); cudaFree(d_Mass); cudaFree(d_T); cudaFree(d_LJ_E); cudaFree(d_LJ_S); cudaFree(d_cutoff); cudaFree(d_spr_k); cudaFree(d_Pt_argVel); cudaFree(d_Pt_T); cout << "\n"; system("pause"); return 0; }
c667171e5d70f02ecaf7c58c52e435c24fe97fd5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Demo code of Cuda programming lecture * * This programme is a simple implementation of vector addition in CUDA * * */ #include <sys/time.h> #include <cstdlib> #include <cstdio> // Device code __global__ void VecAdd(int* A, int* B, int* C) { int i = blockDim.x * blockIdx.x + threadIdx.x; C[i] = A[i] + B[i]; } // Host code int main() { int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 33554432; size_t size = N * sizeof(int); int threadsPerBlock = 1024; int blocksPerGrid = N / threadsPerBlock; //Time measurement timeval kernel_start, kernel_end; timeval global_start, global_end; float kernel_elapsed_time, global_elapsed_time; // Allocate host memory h_A = (int*)malloc(size); h_B = (int*)malloc(size); h_C = (int*)malloc(size); //Initialization for (int i = 0; i < N; i++) { h_A[i] = i; h_B[i] = i; } // Allocate device memory hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C, size); //Start global timer gettimeofday(&global_start, NULL); // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); //Start kernel timer gettimeofday(&kernel_start, NULL); // Invoke kernel hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C); //Since kernel launch is asynchronized, block the host code until the kernel finishes hipDeviceSynchronize(); //End kernel timer gettimeofday(&kernel_end, NULL); // Copy result from device memory to host memory // h_C contains the result in host memory hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); //hipMemcpy is synchronized, no barrier is needed here //Stop global timer gettimeofday(&global_end, NULL); //get kernel elapsed time kernel_elapsed_time = 1000*(kernel_end.tv_sec - kernel_start.tv_sec) + (float)(kernel_end.tv_usec - kernel_start.tv_usec)/1000; //get global elapsed time global_elapsed_time = 1000*(global_end.tv_sec - global_start.tv_sec) + (float)(global_end.tv_usec - global_start.tv_usec)/1000; printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is excluded): %.2f ms\n", kernel_elapsed_time); printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is included): %.2f ms\n", global_elapsed_time); //Free host memory free(h_A); free(h_B); free(h_C); //Free device memory hipFree(d_A); hipFree(d_B); hipFree(d_C); }
c667171e5d70f02ecaf7c58c52e435c24fe97fd5.cu
/** * Demo code of Cuda programming lecture * * This programme is a simple implementation of vector addition in CUDA * * */ #include <sys/time.h> #include <cstdlib> #include <cstdio> // Device code __global__ void VecAdd(int* A, int* B, int* C) { int i = blockDim.x * blockIdx.x + threadIdx.x; C[i] = A[i] + B[i]; } // Host code int main() { int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; int N = 33554432; size_t size = N * sizeof(int); int threadsPerBlock = 1024; int blocksPerGrid = N / threadsPerBlock; //Time measurement timeval kernel_start, kernel_end; timeval global_start, global_end; float kernel_elapsed_time, global_elapsed_time; // Allocate host memory h_A = (int*)malloc(size); h_B = (int*)malloc(size); h_C = (int*)malloc(size); //Initialization for (int i = 0; i < N; i++) { h_A[i] = i; h_B[i] = i; } // Allocate device memory cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); //Start global timer gettimeofday(&global_start, NULL); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); //Start kernel timer gettimeofday(&kernel_start, NULL); // Invoke kernel VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C); //Since kernel launch is asynchronized, block the host code until the kernel finishes cudaDeviceSynchronize(); //End kernel timer gettimeofday(&kernel_end, NULL); // Copy result from device memory to host memory // h_C contains the result in host memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); //cudaMemcpy is synchronized, no barrier is needed here //Stop global timer gettimeofday(&global_end, NULL); //get kernel elapsed time kernel_elapsed_time = 1000*(kernel_end.tv_sec - kernel_start.tv_sec) + (float)(kernel_end.tv_usec - kernel_start.tv_usec)/1000; //get global elapsed time global_elapsed_time = 1000*(global_end.tv_sec - global_start.tv_sec) + (float)(global_end.tv_usec - global_start.tv_usec)/1000; printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is excluded): %.2f ms\n", kernel_elapsed_time); printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is included): %.2f ms\n", global_elapsed_time); //Free host memory free(h_A); free(h_B); free(h_C); //Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
61e5b812b00751bb65d4a7894ec320940df50ef4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // modify from // https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from // https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename scalar_t> __global__ void SigmoidFocalLossForward(const int nthreads, const scalar_t *logits, const long *targets, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. scalar_t c1 = (t == (d + 1)); scalar_t c2 = (t >= 0 & t != (d + 1)); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // added by Shengkai Wu: // log(p): don't compute focal loss for positive examples scalar_t term1_non_focal = logf(max(p, FLT_MIN)); // p**gamma * log(1-p) scalar_t term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename scalar_t> __global__ void SigmoidFocalLossBackward( const int nthreads, const scalar_t *logits, const long *targets, const scalar_t *d_losses, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. scalar_t c1 = (t == (d + 1)); scalar_t c2 = (t >= 0 & t != (d + 1)); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p)): this is the derivative to logits[i] scalar_t term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // added by Shengkai Wu // compute the derivative of log(p) relative to logits[i]; scalar_t term1_non_focal = (1. - p); // (p**g) * (g*(1-p)*log(1-p) - p) scalar_t term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, const at::Tensor &targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); dim3 grid(::min(THCCeilDiv(losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(hipGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.type(), "SigmoidFocalLoss_forward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossForward<scalar_t>), dim3(grid), dim3(block), 0, 0, losses_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<long>(), num_classes, gamma, alpha, num_samples, losses.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, const at::Tensor &targets, const at::Tensor &d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); dim3 grid(::min(THCCeilDiv(d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(hipGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.type(), "SigmoidFocalLoss_backward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossBackward<scalar_t>), dim3(grid), dim3(block), 0, 0, d_logits_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<long>(), d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return d_logits; }
61e5b812b00751bb65d4a7894ec320940df50ef4.cu
// modify from // https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/csrc/cuda/SigmoidFocalLoss_cuda.cu // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from // https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename scalar_t> __global__ void SigmoidFocalLossForward(const int nthreads, const scalar_t *logits, const long *targets, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. scalar_t c1 = (t == (d + 1)); scalar_t c2 = (t >= 0 & t != (d + 1)); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where scalar_t term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // added by Shengkai Wu: // log(p): don't compute focal loss for positive examples scalar_t term1_non_focal = logf(max(p, FLT_MIN)); // p**gamma * log(1-p) scalar_t term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename scalar_t> __global__ void SigmoidFocalLossBackward( const int nthreads, const scalar_t *logits, const long *targets, const scalar_t *d_losses, const int num_classes, const float gamma, const float alpha, const int num, scalar_t *d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. scalar_t c1 = (t == (d + 1)); scalar_t c2 = (t >= 0 & t != (d + 1)); scalar_t zn = (1.0 - alpha); scalar_t zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) scalar_t p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p)): this is the derivative to logits[i] scalar_t term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // added by Shengkai Wu // compute the derivative of log(p) relative to logits[i]; scalar_t term1_non_focal = (1. - p); // (p**g) * (g*(1-p)*log(1-p) - p) scalar_t term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda(const at::Tensor &logits, const at::Tensor &targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.type(), "SigmoidFocalLoss_forward", [&] { SigmoidFocalLossForward<scalar_t><<<grid, block>>>( losses_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<long>(), num_classes, gamma, alpha, num_samples, losses.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda(const at::Tensor &logits, const at::Tensor &targets, const at::Tensor &d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); dim3 grid(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(cudaGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES_AND_HALF( logits.type(), "SigmoidFocalLoss_backward", [&] { SigmoidFocalLossBackward<scalar_t><<<grid, block>>>( d_logits_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<long>(), d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return d_logits; }
97341a22ef1933bfbab2f3a67a1d189c2218907e.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <cusp/array2d.h> #include <cusp/blas/blas.h> #include <cusp/gallery/poisson.h> #include <cusp/system/cuda/detail/cublas/blas.h> template<typename ValueType> void TestCublasAmax(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array x(6); View view_x(x); x[0] = 7.0f; x[1] = -5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 1.0f; ASSERT_EQUAL(cusp::blas::amax(cusp::cuda::par.with(handle),x), 0); ASSERT_EQUAL(cusp::blas::amax(cusp::cuda::par.with(handle),view_x), 0); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasAmax); template<typename ValueType> void TestCublasAsum(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array x(6); View view_x(x); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 1.0f; ASSERT_EQUAL(cusp::blas::asum(cusp::cuda::par.with(handle),x), 20.0f); ASSERT_EQUAL(cusp::blas::asum(cusp::cuda::par.with(handle),view_x), 20.0f); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasAsum); template<typename ValueType> void TestCublasAxpy(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array x(4); Array y(4); x[0] = 7.0f; y[0] = 0.0f; x[1] = 5.0f; y[1] = -2.0f; x[2] = 4.0f; y[2] = 0.0f; x[3] = -3.0f; y[3] = 5.0f; cusp::blas::axpy(cusp::cuda::par.with(handle), x, y, 2.0f); ASSERT_EQUAL(y[0], 14.0); ASSERT_EQUAL(y[1], 8.0); ASSERT_EQUAL(y[2], 8.0); ASSERT_EQUAL(y[3], -1.0); View view_x(x); View view_y(y); cusp::blas::axpy(cusp::cuda::par.with(handle), view_x, view_y, 2.0f); ASSERT_EQUAL(y[0], 28.0); ASSERT_EQUAL(y[1], 18.0); ASSERT_EQUAL(y[2], 16.0); ASSERT_EQUAL(y[3], -7.0); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasAxpy); template<typename ValueType> void TestCublasCopy(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array x(4); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; { Array y(4, -1); cusp::blas::copy(cusp::cuda::par.with(handle), x, y); ASSERT_EQUAL(x==y, true); } { Array y(4, -1); View view_x(x); View view_y(y); cusp::blas::copy(cusp::cuda::par.with(handle), view_x, view_y); ASSERT_EQUAL(x==y, true); } if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasCopy); template<typename ValueType> void TestCublasDot(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array x(6); Array y(6); x[0] = 7.0f; y[0] = 0.0f; x[1] = 5.0f; y[1] = -2.0f; x[2] = 4.0f; y[2] = 0.0f; x[3] = -3.0f; y[3] = 5.0f; x[4] = 0.0f; y[4] = 6.0f; x[5] = 4.0f; y[5] = 1.0f; ASSERT_EQUAL(cusp::blas::dot(cusp::cuda::par.with(handle), x, y), -21.0f); View view_x(x); View view_y(y); ASSERT_EQUAL(cusp::blas::dot(cusp::cuda::par.with(handle), view_x, view_y), -21.0f); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_REAL_UNITTEST(TestCublasDot); template<typename ValueType> void TestCublasNrm2(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array x(6); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 1.0f; ASSERT_EQUAL(cusp::blas::nrm2(cusp::cuda::par.with(handle), x), 10.0f); ASSERT_EQUAL(cusp::blas::nrm2(cusp::cuda::par.with(handle), View(x)), 10.0f); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasNrm2); template<typename ValueType> void TestCublasScal(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array x(6); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; cusp::blas::scal(cusp::cuda::par.with(handle), x, 4.0f); ASSERT_EQUAL(x[0], 28.0); ASSERT_EQUAL(x[1], 20.0); ASSERT_EQUAL(x[2], 16.0); ASSERT_EQUAL(x[3], -12.0); ASSERT_EQUAL(x[4], 0.0); ASSERT_EQUAL(x[5], 16.0); View v(x); cusp::blas::scal(cusp::cuda::par.with(handle), v, 2.0f); ASSERT_EQUAL(x[0], 56.0); ASSERT_EQUAL(x[1], 40.0); ASSERT_EQUAL(x[2], 32.0); ASSERT_EQUAL(x[3], -24.0); ASSERT_EQUAL(x[4], 0.0); ASSERT_EQUAL(x[5], 32.0); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasScal); template<typename ValueType> void TestCublasGemv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array2d A; Array1d x(9); Array1d y(9); cusp::gallery::poisson5pt(A, 3, 3); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; cusp::blas::gemv(cusp::cuda::par.with(handle), A, x, y); ASSERT_EQUAL(y[0], 26.0); ASSERT_EQUAL(y[1], 9.0); ASSERT_EQUAL(y[2], 7.0); ASSERT_EQUAL(y[3], -16.0); ASSERT_EQUAL(y[4], -6.0); ASSERT_EQUAL(y[5], 8.0); ASSERT_EQUAL(y[6], -9.0); ASSERT_EQUAL(y[7], -1.0); ASSERT_EQUAL(y[8], 12.0); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasGemv); template<typename ValueType> void TestCublasSymv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array2d A; Array1d x(9); Array1d y(9); cusp::gallery::poisson5pt(A, 3, 3); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; cusp::blas::symv(cusp::cuda::par.with(handle), A, x, y); ASSERT_EQUAL(y[0], 26.0); ASSERT_EQUAL(y[1], 9.0); ASSERT_EQUAL(y[2], 7.0); ASSERT_EQUAL(y[3], -16.0); ASSERT_EQUAL(y[4], -6.0); ASSERT_EQUAL(y[5], 8.0); ASSERT_EQUAL(y[6], -9.0); ASSERT_EQUAL(y[7], -1.0); ASSERT_EQUAL(y[8], 12.0); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_REAL_UNITTEST(TestCublasSymv); template<typename ValueType> void TestCublasTrmv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array2d A; Array1d x(9); Array1d expected(9); cusp::gallery::poisson5pt(A, 3, 3); // set lower diagonal entries to zero for(int j = 0; j < 9; j++) for(int i = j + 1; i < 9; i++) A(i,j) = ValueType(0); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; cusp::blas::gemv(cusp::cuda::par.with(handle), A, x, expected); cusp::blas::trmv(cusp::cuda::par.with(handle), A, x); /* ASSERT_ALMOST_EQUAL(x, expected); */ if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } KNOWN_FAILURE; } DECLARE_NUMERIC_UNITTEST(TestCublasTrmv); template<typename ValueType> void TestCublasTrsv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array2d A; Array1d x(9); cusp::gallery::poisson5pt(A, 3, 3); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; Array1d b(x); cusp::blas::trsv(cusp::cuda::par.with(handle), A, x); // check residual norm cusp::array1d<ValueType, cusp::device_memory> residual(x); cusp::blas::trmv(cusp::cuda::par.with(handle), A, residual); cusp::blas::axpby(residual, b, residual, -1.0f, 1.0f); ASSERT_EQUAL(cusp::blas::nrm2(residual) < 1e-7, true); if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasTrsv); template<typename ValueType, typename Orientation> void TestCublasGemmOrientation(void) { typedef cusp::array2d<ValueType, cusp::device_memory, Orientation> Array2dDev; typedef typename Array2dDev::rebind<cusp::host_memory>::type Array2dHost; hipblasHandle_t handle; if(hipblasCreate(&handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasCreate failed"); } Array2dDev A(3, 4); Array2dDev B(4, 3); cusp::counting_array<ValueType> init_values(A.num_entries, 1); A.values = init_values; B.values = init_values; Array2dHost A_h(A); Array2dHost B_h(B); { Array2dDev C(A.num_rows, B.num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A, B, C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h, B_h, C_h); ASSERT_EQUAL(C_h.values, C.values); } { Array2dDev C(A.T().num_rows, B.T().num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A.T(), B.T(), C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h.T(), B_h.T(), C_h); ASSERT_EQUAL(C_h.values, C.values); } { Array2dDev C(A.T().num_rows, A.num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A.T(), A, C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h.T(), A_h, C_h); ASSERT_EQUAL(C_h.values, C.values); } { Array2dDev C(A.num_rows, A.T().num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A, A.T(), C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h, A_h.T(), C_h); ASSERT_EQUAL(C_h.values, C.values); } if(hipblasDestroy(handle) != HIPBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("hipblasDestroy failed"); } } template<typename ValueType> void TestCublasGemm(void) { TestCublasGemmOrientation<ValueType,cusp::row_major>(); TestCublasGemmOrientation<ValueType,cusp::column_major>(); } DECLARE_REAL_UNITTEST(TestCublasGemm);
97341a22ef1933bfbab2f3a67a1d189c2218907e.cu
#include <unittest/unittest.h> #include <cusp/array2d.h> #include <cusp/blas/blas.h> #include <cusp/gallery/poisson.h> #include <cusp/system/cuda/detail/cublas/blas.h> template<typename ValueType> void TestCublasAmax(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array x(6); View view_x(x); x[0] = 7.0f; x[1] = -5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 1.0f; ASSERT_EQUAL(cusp::blas::amax(cusp::cuda::par.with(handle),x), 0); ASSERT_EQUAL(cusp::blas::amax(cusp::cuda::par.with(handle),view_x), 0); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasAmax); template<typename ValueType> void TestCublasAsum(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array x(6); View view_x(x); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 1.0f; ASSERT_EQUAL(cusp::blas::asum(cusp::cuda::par.with(handle),x), 20.0f); ASSERT_EQUAL(cusp::blas::asum(cusp::cuda::par.with(handle),view_x), 20.0f); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasAsum); template<typename ValueType> void TestCublasAxpy(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array x(4); Array y(4); x[0] = 7.0f; y[0] = 0.0f; x[1] = 5.0f; y[1] = -2.0f; x[2] = 4.0f; y[2] = 0.0f; x[3] = -3.0f; y[3] = 5.0f; cusp::blas::axpy(cusp::cuda::par.with(handle), x, y, 2.0f); ASSERT_EQUAL(y[0], 14.0); ASSERT_EQUAL(y[1], 8.0); ASSERT_EQUAL(y[2], 8.0); ASSERT_EQUAL(y[3], -1.0); View view_x(x); View view_y(y); cusp::blas::axpy(cusp::cuda::par.with(handle), view_x, view_y, 2.0f); ASSERT_EQUAL(y[0], 28.0); ASSERT_EQUAL(y[1], 18.0); ASSERT_EQUAL(y[2], 16.0); ASSERT_EQUAL(y[3], -7.0); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasAxpy); template<typename ValueType> void TestCublasCopy(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array x(4); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; { Array y(4, -1); cusp::blas::copy(cusp::cuda::par.with(handle), x, y); ASSERT_EQUAL(x==y, true); } { Array y(4, -1); View view_x(x); View view_y(y); cusp::blas::copy(cusp::cuda::par.with(handle), view_x, view_y); ASSERT_EQUAL(x==y, true); } if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasCopy); template<typename ValueType> void TestCublasDot(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array x(6); Array y(6); x[0] = 7.0f; y[0] = 0.0f; x[1] = 5.0f; y[1] = -2.0f; x[2] = 4.0f; y[2] = 0.0f; x[3] = -3.0f; y[3] = 5.0f; x[4] = 0.0f; y[4] = 6.0f; x[5] = 4.0f; y[5] = 1.0f; ASSERT_EQUAL(cusp::blas::dot(cusp::cuda::par.with(handle), x, y), -21.0f); View view_x(x); View view_y(y); ASSERT_EQUAL(cusp::blas::dot(cusp::cuda::par.with(handle), view_x, view_y), -21.0f); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_REAL_UNITTEST(TestCublasDot); template<typename ValueType> void TestCublasNrm2(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array x(6); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 1.0f; ASSERT_EQUAL(cusp::blas::nrm2(cusp::cuda::par.with(handle), x), 10.0f); ASSERT_EQUAL(cusp::blas::nrm2(cusp::cuda::par.with(handle), View(x)), 10.0f); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasNrm2); template<typename ValueType> void TestCublasScal(void) { typedef cusp::array1d<ValueType, cusp::device_memory> Array; typedef typename Array::view View; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array x(6); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; cusp::blas::scal(cusp::cuda::par.with(handle), x, 4.0f); ASSERT_EQUAL(x[0], 28.0); ASSERT_EQUAL(x[1], 20.0); ASSERT_EQUAL(x[2], 16.0); ASSERT_EQUAL(x[3], -12.0); ASSERT_EQUAL(x[4], 0.0); ASSERT_EQUAL(x[5], 16.0); View v(x); cusp::blas::scal(cusp::cuda::par.with(handle), v, 2.0f); ASSERT_EQUAL(x[0], 56.0); ASSERT_EQUAL(x[1], 40.0); ASSERT_EQUAL(x[2], 32.0); ASSERT_EQUAL(x[3], -24.0); ASSERT_EQUAL(x[4], 0.0); ASSERT_EQUAL(x[5], 32.0); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasScal); template<typename ValueType> void TestCublasGemv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array2d A; Array1d x(9); Array1d y(9); cusp::gallery::poisson5pt(A, 3, 3); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; cusp::blas::gemv(cusp::cuda::par.with(handle), A, x, y); ASSERT_EQUAL(y[0], 26.0); ASSERT_EQUAL(y[1], 9.0); ASSERT_EQUAL(y[2], 7.0); ASSERT_EQUAL(y[3], -16.0); ASSERT_EQUAL(y[4], -6.0); ASSERT_EQUAL(y[5], 8.0); ASSERT_EQUAL(y[6], -9.0); ASSERT_EQUAL(y[7], -1.0); ASSERT_EQUAL(y[8], 12.0); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasGemv); template<typename ValueType> void TestCublasSymv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array2d A; Array1d x(9); Array1d y(9); cusp::gallery::poisson5pt(A, 3, 3); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; cusp::blas::symv(cusp::cuda::par.with(handle), A, x, y); ASSERT_EQUAL(y[0], 26.0); ASSERT_EQUAL(y[1], 9.0); ASSERT_EQUAL(y[2], 7.0); ASSERT_EQUAL(y[3], -16.0); ASSERT_EQUAL(y[4], -6.0); ASSERT_EQUAL(y[5], 8.0); ASSERT_EQUAL(y[6], -9.0); ASSERT_EQUAL(y[7], -1.0); ASSERT_EQUAL(y[8], 12.0); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_REAL_UNITTEST(TestCublasSymv); template<typename ValueType> void TestCublasTrmv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array2d A; Array1d x(9); Array1d expected(9); cusp::gallery::poisson5pt(A, 3, 3); // set lower diagonal entries to zero for(int j = 0; j < 9; j++) for(int i = j + 1; i < 9; i++) A(i,j) = ValueType(0); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; cusp::blas::gemv(cusp::cuda::par.with(handle), A, x, expected); cusp::blas::trmv(cusp::cuda::par.with(handle), A, x); /* ASSERT_ALMOST_EQUAL(x, expected); */ if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } KNOWN_FAILURE; } DECLARE_NUMERIC_UNITTEST(TestCublasTrmv); template<typename ValueType> void TestCublasTrsv(void) { typedef cusp::array2d<ValueType, cusp::device_memory> Array2d; typedef cusp::array1d<ValueType, cusp::device_memory> Array1d; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array2d A; Array1d x(9); cusp::gallery::poisson5pt(A, 3, 3); x[0] = 7.0f; x[1] = 5.0f; x[2] = 4.0f; x[3] = -3.0f; x[4] = 0.0f; x[5] = 4.0f; x[6] = -3.0f; x[7] = 0.0f; x[8] = 4.0f; Array1d b(x); cusp::blas::trsv(cusp::cuda::par.with(handle), A, x); // check residual norm cusp::array1d<ValueType, cusp::device_memory> residual(x); cusp::blas::trmv(cusp::cuda::par.with(handle), A, residual); cusp::blas::axpby(residual, b, residual, -1.0f, 1.0f); ASSERT_EQUAL(cusp::blas::nrm2(residual) < 1e-7, true); if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } DECLARE_NUMERIC_UNITTEST(TestCublasTrsv); template<typename ValueType, typename Orientation> void TestCublasGemmOrientation(void) { typedef cusp::array2d<ValueType, cusp::device_memory, Orientation> Array2dDev; typedef typename Array2dDev::rebind<cusp::host_memory>::type Array2dHost; cublasHandle_t handle; if(cublasCreate(&handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasCreate failed"); } Array2dDev A(3, 4); Array2dDev B(4, 3); cusp::counting_array<ValueType> init_values(A.num_entries, 1); A.values = init_values; B.values = init_values; Array2dHost A_h(A); Array2dHost B_h(B); { Array2dDev C(A.num_rows, B.num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A, B, C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h, B_h, C_h); ASSERT_EQUAL(C_h.values, C.values); } { Array2dDev C(A.T().num_rows, B.T().num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A.T(), B.T(), C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h.T(), B_h.T(), C_h); ASSERT_EQUAL(C_h.values, C.values); } { Array2dDev C(A.T().num_rows, A.num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A.T(), A, C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h.T(), A_h, C_h); ASSERT_EQUAL(C_h.values, C.values); } { Array2dDev C(A.num_rows, A.T().num_cols); cusp::blas::gemm(cusp::cuda::par.with(handle), A, A.T(), C); Array2dHost C_h(C.num_rows, C.num_cols); cusp::blas::gemm(A_h, A_h.T(), C_h); ASSERT_EQUAL(C_h.values, C.values); } if(cublasDestroy(handle) != CUBLAS_STATUS_SUCCESS) { throw cusp::runtime_exception("cublasDestroy failed"); } } template<typename ValueType> void TestCublasGemm(void) { TestCublasGemmOrientation<ValueType,cusp::row_major>(); TestCublasGemmOrientation<ValueType,cusp::column_major>(); } DECLARE_REAL_UNITTEST(TestCublasGemm);
1dd79f2fd6b3f72333bde02082f218e281efdf5d.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/conv_grad_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/fluid/framework/eigen.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { template <typename T, typename Context> void ConvCudnnGradGradKernel( const Context& ctx, paddle::optional<const DenseTensor&> input_grad_grad, paddle::optional<const DenseTensor&> filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { auto X = &input; auto W = &filter; auto dO = &out_grad; auto ddX = input_grad_grad.get_ptr(); auto ddW = filter_grad_grad.get_ptr(); auto ddO = out_grad_grad; auto dW = filter_grad; auto dX = input_grad; if (ddO) { ctx.template Alloc<T>(ddO); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, ddO, static_cast<T>(0)); } if (dW) { ctx.template Alloc<T>(dW); } if (dX) { ctx.template Alloc<T>(dX); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = paddings_t; const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- DenseTensor transformed_X_channel(X->type()); DenseTensor transformed_dO_channel(dO->type()); DenseTensor transformed_ddX_channel(X->type()); DenseTensor transformed_ddO_channel(dO->type()); DenseTensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel); ctx.template Alloc<T>(&transformed_dX_channel); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_X(X->type()); DenseTensor transformed_ddX(X->type()); DenseTensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_X); if (ddX) { ctx.template Alloc<T>(&transformed_ddX); } if (dX) { ctx.template Alloc<T>(&transformed_dX); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { funcs::PadFunction<Context, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = paddle::platform::CudnnDataType<T>::type; auto handle = ctx.cudnn_handle(); paddle::operators::ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP miopenConvFwdAlgorithm_t fwd_algo1 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvFwdAlgorithm_t fwd_algo2 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvBwdDataAlgorithm_t data_algo = static_cast<miopenConvBwdDataAlgorithm_t>(0); miopenConvBwdWeightsAlgorithm_t filter_algo = static_cast<miopenConvBwdWeightsAlgorithm_t>(0); #else cudnnConvolutionFwdAlgo_t fwd_algo1 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionFwdAlgo_t fwd_algo2 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionBwdDataAlgo_t data_algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(0); cudnnConvolutionBwdFilterAlgo_t filter_algo = static_cast<cudnnConvolutionBwdFilterAlgo_t>(0); #endif auto layout = paddle::platform::GetCudnnTensorFormat( paddle::platform::DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_algo1 = search1::Find<T>( args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_algo2 = search2::Find<T>( args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = ::max(workspace_size, search2::GetWorkspaceSize(args2, fwd_algo2)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_algo = search3::Find<T>( args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3, filter_algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4)); data_algo = search4::Find<T>( args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_algo = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW( transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_algo1, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_algo2, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_algo2, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { paddle::operators::RemovePaddingSlice<Context, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { paddle::operators::RemovePaddingSlice<Context, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX); } } } template <typename T, typename Context> void DepthwiseConvCudnnGradGradKernel( const Context& ctx, paddle::optional<const DenseTensor&> input_grad_grad, paddle::optional<const DenseTensor&> filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, bool fuse_relu, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { ConvCudnnGradGradKernel<T>(ctx, input_grad_grad, filter_grad_grad, out_grad, input, filter, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, out_grad_grad, input_grad, filter_grad); } template <typename T, typename Context> void Conv3DCudnnGradGradKernel( const Context& ctx, paddle::optional<const DenseTensor&> input_grad_grad, paddle::optional<const DenseTensor&> filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { ConvCudnnGradGradKernel<T>(ctx, input_grad_grad, filter_grad_grad, out_grad, input, filter, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, out_grad_grad, input_grad, filter_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} #endif #endif
1dd79f2fd6b3f72333bde02082f218e281efdf5d.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/kernels/conv_grad_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/fluid/framework/eigen.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/operators/conv_miopen_helper.h" #else #include "paddle/fluid/operators/conv_cudnn_helper.h" #endif #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/kernels/funcs/padding.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" #include "paddle/phi/kernels/impl/conv_cudnn_impl.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { template <typename T, typename Context> void ConvCudnnGradGradKernel( const Context& ctx, paddle::optional<const DenseTensor&> input_grad_grad, paddle::optional<const DenseTensor&> filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { auto X = &input; auto W = &filter; auto dO = &out_grad; auto ddX = input_grad_grad.get_ptr(); auto ddW = filter_grad_grad.get_ptr(); auto ddO = out_grad_grad; auto dW = filter_grad; auto dX = input_grad; if (ddO) { ctx.template Alloc<T>(ddO); phi::funcs::SetConstant<Context, T> set_zero; set_zero(ctx, ddO, static_cast<T>(0)); } if (dW) { ctx.template Alloc<T>(dW); } if (dX) { ctx.template Alloc<T>(dX); } // const T* x = X->data<T>(); const T* dy = dO->data<T>(); const T* w = W->data<T>(); const T* ddx = nullptr; const T* ddw = nullptr; T *dw, *dx, *ddy; dw = dx = ddy = nullptr; T* transformed_dx = nullptr; std::vector<int> dilations = dilations_t; bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t; bool deterministic = FLAGS_cudnn_deterministic; auto exhaustive_deterministic = exhaustive_search && deterministic; PADDLE_ENFORCE_EQ(exhaustive_deterministic, false, phi::errors::InvalidArgument( "Cann't set exhaustive_search True and " "FLAGS_cudnn_deterministic True at same time.")); std::vector<int> paddings = paddings_t; const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); // transform Tensors to channel first----------- DenseTensor transformed_X_channel(X->type()); DenseTensor transformed_dO_channel(dO->type()); DenseTensor transformed_ddX_channel(X->type()); DenseTensor transformed_ddO_channel(dO->type()); DenseTensor transformed_dX_channel(X->type()); if (channel_last) { ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel); ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel); if (ddX) { ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel); } if (ddO) { ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel); } if (dX) { ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel); ctx.template Alloc<T>(&transformed_dX_channel); } } else { transformed_X_channel = *X; transformed_dO_channel = *dO; if (ddX) { transformed_ddX_channel = *ddX; } if (ddO) { transformed_ddO_channel.ShareDataWith(*ddO); } if (dX) { transformed_dX_channel.ShareDataWith(*dX); } } auto in_dims = transformed_X_channel.dims(); auto filter_dims = W->dims(); DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size()); DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); std::vector<int> ksize = vectorize<int>(filter_data_dims); UpdatePaddingAndDilation( &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim); DenseTensor transformed_X(X->type()); DenseTensor transformed_ddX(X->type()); DenseTensor transformed_dX(X->type()); std::vector<int> padding_common(data_dim, 0); std::vector<int> input_pad(X->dims().size() * 2, 0); if (!is_sys_pad) { // get pad std::vector<int> padding_diff(data_dim); std::vector<int> new_input_shape_vec(data_dim + 2); new_input_shape_vec[0] = transformed_X_channel.dims()[0]; new_input_shape_vec[1] = transformed_X_channel.dims()[1]; for (size_t i = 0; i < data_dim; ++i) { padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); new_input_shape_vec[i + 2] = transformed_X_channel.dims()[i + 2] + padding_diff[i]; input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; } DDim new_input_shape(make_ddim(new_input_shape_vec)); transformed_X.Resize(new_input_shape); transformed_ddX.Resize(new_input_shape); transformed_dX.Resize(new_input_shape); ctx.template Alloc<T>(&transformed_X); if (ddX) { ctx.template Alloc<T>(&transformed_ddX); } if (dX) { ctx.template Alloc<T>(&transformed_dX); } // pad for input const int rank = X->dims().size(); T pad_value(0.0); switch (rank) { case 4: { funcs::PadFunction<Context, T, 4>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 4>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; case 5: { funcs::PadFunction<Context, T, 5>( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); if (ddX) { funcs::PadFunction<Context, T, 5>(ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } } break; default: PADDLE_THROW(phi::errors::InvalidArgument( "ConvOp only support tensors with 4 or 5 dimensions.")); } } else { transformed_X.ShareDataWith(transformed_X_channel); if (ddX) { transformed_ddX.ShareDataWith(transformed_ddX_channel); } if (dX) { transformed_dX.ShareDataWith(transformed_dX_channel); } if (paddings.size() == data_dim) { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[i]; } } else { for (size_t i = 0; i < data_dim; ++i) { padding_common[i] = paddings[2 * i]; } } } const T* x = transformed_X.data<T>(); int iwo_group = groups; int c_group = 1; #if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) iwo_group = 1; c_group = groups; groups = 1; #endif auto dtype = paddle::platform::CudnnDataType<T>::type; auto handle = ctx.cudnn_handle(); paddle::operators::ConvArgs args1{&transformed_ddX, W, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; paddle::operators::ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common, dilations, dtype}; #ifdef PADDLE_WITH_HIP miopenConvFwdAlgorithm_t fwd_algo1 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvFwdAlgorithm_t fwd_algo2 = static_cast<miopenConvFwdAlgorithm_t>(0); miopenConvBwdDataAlgorithm_t data_algo = static_cast<miopenConvBwdDataAlgorithm_t>(0); miopenConvBwdWeightsAlgorithm_t filter_algo = static_cast<miopenConvBwdWeightsAlgorithm_t>(0); #else cudnnConvolutionFwdAlgo_t fwd_algo1 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionFwdAlgo_t fwd_algo2 = static_cast<cudnnConvolutionFwdAlgo_t>(0); cudnnConvolutionBwdDataAlgo_t data_algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(0); cudnnConvolutionBwdFilterAlgo_t filter_algo = static_cast<cudnnConvolutionBwdFilterAlgo_t>(0); #endif auto layout = paddle::platform::GetCudnnTensorFormat( paddle::platform::DataLayout::kNCHW); // ddo = conv(ddI, W) + conv(I, ddW) size_t workspace_size = 0; T* transformed_ddy_channel = nullptr; if (ddO) { ddy = ddO->data<T>(); transformed_ddy_channel = transformed_ddO_channel.data<T>(); if (ddX) { args1.handle = handle; args1.idesc.set(transformed_ddX, iwo_group); args1.wdesc.set(*W, layout, iwo_group); args1.odesc.set(transformed_ddO_channel, iwo_group); args1.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search1 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = search1::GetWorkspaceSize(args1); fwd_algo1 = search1::Find<T>( args1, exhaustive_search, false, workspace_size, ctx); #else using search1 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx); workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1); #endif } if (ddW) { ddw = ddW->data<T>(); args2.handle = handle; args2.idesc.set(transformed_X, iwo_group); args2.wdesc.set(*ddW, layout, iwo_group); args2.odesc.set(transformed_ddO_channel, iwo_group); args2.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search2 = paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>; workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); fwd_algo2 = search2::Find<T>( args2, exhaustive_search, false, workspace_size, ctx); #else using search2 = paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>; fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx); workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2, fwd_algo2)); #endif } } if (dW && ddX) { dw = dW->data<T>(); args3.handle = handle; args3.idesc.set(transformed_ddX, iwo_group); args3.wdesc.set(*dW, layout, iwo_group); args3.odesc.set(transformed_dO_channel, iwo_group); args3.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search3 = paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>; workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3)); filter_algo = search3::Find<T>( args3, exhaustive_search, deterministic, workspace_size, ctx); #else using search3 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>; filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic, ctx); workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3, filter_algo)); #endif } if (ddW && dX) { transformed_dx = transformed_dX.data<T>(); args4.handle = handle; args4.idesc.set(transformed_dX, iwo_group); args4.wdesc.set(*ddW, layout, iwo_group); args4.odesc.set(transformed_dO_channel, iwo_group); args4.cdesc.set(dtype, padding_common, strides, dilations, paddle::platform::AllowTF32Cudnn(), c_group); #ifdef PADDLE_WITH_HIP using search4 = paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>; workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4)); data_algo = search4::Find<T>( args4, exhaustive_search, deterministic, workspace_size, ctx); #else using search4 = paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>; data_algo = search4::Find<T>(args4, exhaustive_search, deterministic, ctx); workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo)); #endif } int i_n, i_c, i_d, i_h, i_w; GetNCDHW( transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); int o_n, o_c, o_d, o_h, o_w; GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); int group_offset_in = i_c / groups * i_h * i_w * i_d; int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = W->numel() / groups; paddle::operators::ScalingParamType<T> alpha = 1.0f; paddle::operators::ScalingParamType<T> beta = 0.0f; // NOTE(zhiqiu): inplace addto is not supportted in double grad yet. // ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : // 0.0f; // VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto"); auto wkspace_handle = ctx.cudnn_workspace_handle(); if (ddO) { if (ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx, args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_algo1, &beta, args1.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, args1.wdesc.desc(), w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1, workspace_ptr, workspace_size, &beta, args1.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (ddW) { #ifdef PADDLE_WITH_HIP // MIOPEN ONLY support beta to be 0.0f wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionForward( handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(), ddw, args2.cdesc.desc(), fwd_algo2, &beta, args2.odesc.desc(), transformed_ddy_channel, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionForward( handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, args2.wdesc.desc(), ddw + i * group_offset_filter, args2.cdesc.desc(), fwd_algo2, workspace_ptr, workspace_size, &alpha, args2.odesc.desc(), transformed_ddy_channel + i * group_offset_out)); }, workspace_size); } #endif } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO); } } T* transformed_dy_channel = transformed_dO_channel.data<T>(); if (dW && ddX) { ddx = transformed_ddX.data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardWeights( handle, &alpha, args3.odesc.desc(), transformed_dy_channel, args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_algo, &beta, args3.wdesc.desc(), dw, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, args3.odesc.desc(), transformed_dy_channel + i * group_offset_out, args3.cdesc.desc(), filter_algo, workspace_ptr, workspace_size, &beta, args3.wdesc.desc(), dw + i * group_offset_filter)); }, workspace_size); } #endif } if (dX && ddW) { ddw = ddW->data<T>(); #ifdef PADDLE_WITH_HIP wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::miopenConvolutionBackwardData( handle, &alpha, args4.odesc.desc(), transformed_dy_channel, args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_algo, &beta, args4.idesc.desc(), transformed_dx, workspace_ptr, workspace_size)); }, workspace_size); #else for (int i = 0; i < groups; i++) { wkspace_handle.RunFunc( [&](void* workspace_ptr) { PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, args4.wdesc.desc(), ddw + i * group_offset_filter, args4.odesc.desc(), transformed_dy_channel + i * group_offset_out, args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size, &beta, args4.idesc.desc(), transformed_dx + i * group_offset_in)); }, workspace_size); } #endif if (!is_sys_pad) { // reverse padded input std::vector<int> starts(X->dims().size(), 0); std::vector<int> axes(X->dims().size(), 0); for (size_t i = 0; i < X->dims().size(); ++i) { starts[i] = input_pad[2 * i]; axes[i] = i; } if (X->dims().size() == 4) { paddle::operators::RemovePaddingSlice<Context, T, 4>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { paddle::operators::RemovePaddingSlice<Context, T, 5>( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } if (channel_last) { TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX); } } } template <typename T, typename Context> void DepthwiseConvCudnnGradGradKernel( const Context& ctx, paddle::optional<const DenseTensor&> input_grad_grad, paddle::optional<const DenseTensor&> filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, bool fuse_relu, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { ConvCudnnGradGradKernel<T>(ctx, input_grad_grad, filter_grad_grad, out_grad, input, filter, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, out_grad_grad, input_grad, filter_grad); } template <typename T, typename Context> void Conv3DCudnnGradGradKernel( const Context& ctx, paddle::optional<const DenseTensor&> input_grad_grad, paddle::optional<const DenseTensor&> filter_grad_grad, const DenseTensor& out_grad, const DenseTensor& input, const DenseTensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings_t, const std::string& padding_algorithm, int groups, const std::vector<int>& dilations_t, const std::string& data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search_t, DenseTensor* out_grad_grad, DenseTensor* input_grad, DenseTensor* filter_grad) { ConvCudnnGradGradKernel<T>(ctx, input_grad_grad, filter_grad_grad, out_grad, input, filter, strides, paddings_t, padding_algorithm, groups, dilations_t, data_format, use_addto, workspace_size_MB, exhaustive_search_t, out_grad_grad, input_grad, filter_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, phi::dtype::float16) {} #else #if CUDNN_VERSION_MIN(8, 1, 0) PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} #else PD_REGISTER_KERNEL(conv2d_grad_grad, GPUDNN, ALL_LAYOUT, phi::ConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(conv3d_grad_grad, GPUDNN, ALL_LAYOUT, phi::Conv3DCudnnGradGradKernel, float, double, phi::dtype::float16) {} PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad, GPU, ALL_LAYOUT, phi::DepthwiseConvCudnnGradGradKernel, float, double, phi::dtype::float16) {} #endif #endif
e55c8f0a8ca3886a23792ba3ca2340310e90d8b2.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------- // // C++ Interface: main // // Description: mainpart.cpp, main.h // The C++ program, mainpart.cpp will read the starting lon, lat, start_time, // end_time and the inputfilename. The geometry will be read off the netcdf file // and mesh parameters calculated and stored into RAM. The main line program will // loop over the time variable. The velocity fields will be read off the NetCDF // file only when necessary to update the time stepping. An inner loop will go over // all particles. Parallelization of the run can be done at this point by simply // calculating groups of particles on multiple processors. // // // Author: Tom Gross // // Copyright: See COPYING file that comes with this distribution // // ---------------------------------------------------------------------*/ #include "Main.h" /**/ double dt_sec; int num_P = NUM_PARTICLES; int node = NODE; int nsigma = NSIGMA; float time_now; /**/ #include "ChesPartGL.h" #include "FindElei.h" #ifndef SIMP_GLCU #define SIMP_GLCU #include "ChesPartGL.cu" #endif //#include "struct.h" // try to add a global struct for reference to internal routines // Struct these at end of ChesPartGL.h //PPart *host_P; //PPart *dev_P; //MMesh *dev_MM; //DData *dev_DD; extern struct CControl CC; int main( int argc, char** argv ) { printf("mainpart Arguments: %d\n",argc); for (size_t i{}; i<argc; i++) { printf(" argv[%zd] = %s\n",i,argv[i]); } printf("mainpart.cu Cuda based particle mover \n"); /*---------------------------------------------------------------------- // Read in all the time independent data and put them into MM struct. ----------------------------------------------------------------------*/ CControl CC; char st0[128]="dummydummydummy"; CC.filetemplate=st0; bool error; if(argc==1){ error = CC.read_control_data("CControl/CBOFS_ColorbyDepth.txt"); } else { error = CC.read_control_data(argv[1]); } cout << "error =" << error << endl; cout<<"CC.Pipell "<<CC.Pipell[0]<<" "<<CC.Pipell[1]<<endl; MMesh *MM; MM = (MMesh *)malloc(4*sizeof(MMesh)); BuildMM(MM, CC) ; // Little routine to find the node number of center of // range as set in CC. float lonpp=CC.LONmid+CC.LONwidth*0.; float latpp=CC.LATmid+CC.LAThieght; int iMM=0; int inode = FindElei(lonpp,latpp,MM, iMM ); printf("\n FindElei NEWEST Lon=%g Lat=%g inode=%d MM[%d].Lon=%g MM[%d].Lat=%g \n", lonpp,latpp, inode, iMM, MM[iMM].Lon[inode],iMM, MM[iMM].Lat[inode]); // No need to initialize DD here. ReadData will do that later followed by hipMemcpy printf("\n\nFour separate DData's for past present future and reading\n"); DData *DD; size_t DDSizeGeneral = sizeof(DData)*4; DD = (DData *)malloc(DDSizeGeneral); // Start of BuildDD(struct DData *DD, struct MMesh *MM, struct CControl CC); BuildDD(DD,MM,CC); /* Build the Particle Struct PPart host_P */ printf("\nInitialize the PPart Structs \n"); size_t PPSizeGeneral ; PPSizeGeneral = sizeof(PPart)*num_P; host_P = (PPart *)malloc(PPSizeGeneral); num_P = MM[0].node-MM[0].firstnodeborder +CC.NUM_PARTICLEs; if (num_P> NUM_PARTICLES) num_P=NUM_PARTICLES; PPartInit(host_P,MM,&CC,num_P); //MMesh *dev_MM; // no need to do this. Space is hipMalloc'd and call is to (struct MMesh dev_MM) size_t MMSizeGeneral = 4*sizeof(MMesh); hipMalloc((void**)&dev_MM,MMSizeGeneral); hipMemcpy(dev_MM,MM,MMSizeGeneral,hipMemcpyHostToDevice); //PPart *dev_P; //size_t PPSizeGeneral = (sizeof(PPart)*num_P); hipMalloc((void**)&dev_P,PPSizeGeneral); hipMemcpy(dev_P,host_P,PPSizeGeneral,hipMemcpyHostToDevice); //printf("after hipMalloc for dev_P, PPSizeGeneral=%ld\n\n",PPSizeGeneral); //DData *dev_DD; hipMalloc((void**)&dev_DD,DDSizeGeneral); hipMemcpy(dev_DD,DD,DDSizeGeneral,hipMemcpyHostToDevice); printf("/n/nLaunch GLmoveparticle from mainpart.cu\n"); #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif cout <<" CC.shadervs = "<< CC.shadervs << endl; GLmoveparticle(host_P,MM,DD); //, &CC); /*---------------------------------------------------- // End of particle movement calculation. ----------------------------------------------------*/ printf("\n mainpart.cp END \n"); hipFree(dev_DD); hipFree(dev_MM); hipFree(dev_P); return 0; } // end of main
e55c8f0a8ca3886a23792ba3ca2340310e90d8b2.cu
/*------------------------------------------------------------------- // // C++ Interface: main // // Description: mainpart.cpp, main.h // The C++ program, mainpart.cpp will read the starting lon, lat, start_time, // end_time and the inputfilename. The geometry will be read off the netcdf file // and mesh parameters calculated and stored into RAM. The main line program will // loop over the time variable. The velocity fields will be read off the NetCDF // file only when necessary to update the time stepping. An inner loop will go over // all particles. Parallelization of the run can be done at this point by simply // calculating groups of particles on multiple processors. // // // Author: Tom Gross // // Copyright: See COPYING file that comes with this distribution // // ---------------------------------------------------------------------*/ #include "Main.h" /**/ double dt_sec; int num_P = NUM_PARTICLES; int node = NODE; int nsigma = NSIGMA; float time_now; /**/ #include "ChesPartGL.h" #include "FindElei.h" #ifndef SIMP_GLCU #define SIMP_GLCU #include "ChesPartGL.cu" #endif //#include "struct.h" // try to add a global struct for reference to internal routines // Struct these at end of ChesPartGL.h //PPart *host_P; //PPart *dev_P; //MMesh *dev_MM; //DData *dev_DD; extern struct CControl CC; int main( int argc, char** argv ) { printf("mainpart Arguments: %d\n",argc); for (size_t i{}; i<argc; i++) { printf(" argv[%zd] = %s\n",i,argv[i]); } printf("mainpart.cu Cuda based particle mover \n"); /*---------------------------------------------------------------------- // Read in all the time independent data and put them into MM struct. ----------------------------------------------------------------------*/ CControl CC; char st0[128]="dummydummydummy"; CC.filetemplate=st0; bool error; if(argc==1){ error = CC.read_control_data("CControl/CBOFS_ColorbyDepth.txt"); } else { error = CC.read_control_data(argv[1]); } cout << "error =" << error << endl; cout<<"CC.Pipell "<<CC.Pipell[0]<<" "<<CC.Pipell[1]<<endl; MMesh *MM; MM = (MMesh *)malloc(4*sizeof(MMesh)); BuildMM(MM, CC) ; // Little routine to find the node number of center of // range as set in CC. float lonpp=CC.LONmid+CC.LONwidth*0.; float latpp=CC.LATmid+CC.LAThieght; int iMM=0; int inode = FindElei(lonpp,latpp,MM, iMM ); printf("\n FindElei NEWEST Lon=%g Lat=%g inode=%d MM[%d].Lon=%g MM[%d].Lat=%g \n", lonpp,latpp, inode, iMM, MM[iMM].Lon[inode],iMM, MM[iMM].Lat[inode]); // No need to initialize DD here. ReadData will do that later followed by cudaMemcpy printf("\n\nFour separate DData's for past present future and reading\n"); DData *DD; size_t DDSizeGeneral = sizeof(DData)*4; DD = (DData *)malloc(DDSizeGeneral); // Start of BuildDD(struct DData *DD, struct MMesh *MM, struct CControl CC); BuildDD(DD,MM,CC); /* Build the Particle Struct PPart host_P */ printf("\nInitialize the PPart Structs \n"); size_t PPSizeGeneral ; PPSizeGeneral = sizeof(PPart)*num_P; host_P = (PPart *)malloc(PPSizeGeneral); num_P = MM[0].node-MM[0].firstnodeborder +CC.NUM_PARTICLEs; if (num_P> NUM_PARTICLES) num_P=NUM_PARTICLES; PPartInit(host_P,MM,&CC,num_P); //MMesh *dev_MM; // no need to do this. Space is cudaMalloc'd and call is to (struct MMesh dev_MM) size_t MMSizeGeneral = 4*sizeof(MMesh); cudaMalloc((void**)&dev_MM,MMSizeGeneral); cudaMemcpy(dev_MM,MM,MMSizeGeneral,cudaMemcpyHostToDevice); //PPart *dev_P; //size_t PPSizeGeneral = (sizeof(PPart)*num_P); cudaMalloc((void**)&dev_P,PPSizeGeneral); cudaMemcpy(dev_P,host_P,PPSizeGeneral,cudaMemcpyHostToDevice); //printf("after cudaMalloc for dev_P, PPSizeGeneral=%ld\n\n",PPSizeGeneral); //DData *dev_DD; cudaMalloc((void**)&dev_DD,DDSizeGeneral); cudaMemcpy(dev_DD,DD,DDSizeGeneral,cudaMemcpyHostToDevice); printf("/n/nLaunch GLmoveparticle from mainpart.cu\n"); #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif cout <<" CC.shadervs = "<< CC.shadervs << endl; GLmoveparticle(host_P,MM,DD); //, &CC); /*---------------------------------------------------- // End of particle movement calculation. ----------------------------------------------------*/ printf("\n mainpart.cp END \n"); cudaFree(dev_DD); cudaFree(dev_MM); cudaFree(dev_P); return 0; } // end of main
3f2d8d2afcc10182a3111097e8ab9c59313318ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Shallow_Water_Eq.h" #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/binary_search.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <thrust/iterator/zip_iterator.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // #define void NC_ERR(e) {fprintf(stderr,"Error: %s\n", nc_strerror(e));} Shallow_Water_Eq::Shallow_Water_Eq (int k) { // Read parameters from NETCDF file read_parameters(&Nx, &Ny, &num_particles, &parint, &cntr, &Hmax, &g, &f, &Lx, &Ly, &Ti, &Tf, &plot_interval); // Time parameters timestep = 0; tk = Ti; dtk = 0; c0 = sqrt(g*Hmax); next_output_time = plot_interval + Ti; // CUDA parameters tpb = 512; // Threads per Block nblks = (Nx*Ny)/tpb + (((Nx*Ny)%tpb) ? 1 : 0); // Number of Blocks // Build grid and related parameters dx = Lx/Nx; dy = Ly/Ny; xgrid = new double[Nx]; for (int ii=0; ii<Nx; ii++) { xgrid[ii] = dx*(ii); } ygrid = new double[Ny]; for (int ii=0; ii<Ny; ii++) { ygrid[ii] = dy*(ii); } // Wavenumber and filter parameters dk = 2*M_PI/(Lx); k_nq = M_PI/dx; dl = 2*M_PI/(Ly); l_nq = M_PI/dy; k_cut = 0.45 * k_nq; l_cut = 0.45 * l_nq; alpha = 20.0; beta = 2.0; // Allocate wavenumber arrays // Since FFTW only uses (N/2+1) entries for one dimension we pick it to be the x direction // thus k is only size (Nx/2+1) h_k = (double *)malloc(Nx*sizeof(double)); h_l = (double *)malloc(Ny*sizeof(double)); h_filter = (double *)malloc(Ny*(Nx/2+1)*sizeof(double)); hipMalloc(&d_k,Nx*sizeof(double)); hipMalloc(&d_l,Ny*sizeof(double)); hipMalloc(&d_filter,Ny*(Nx/2+1)*sizeof(double)); // Build wavenumber vectors for (int ii=0; ii<Nx/2; ii++) { h_k[ii] = ii * dk; h_k[ii+(Nx/2)] = ( ii - (Nx/2) ) * dk; } h_k[Nx/2] = 0; for (int ii=0; ii<Ny/2; ii++) { h_l[ii] = ii * dl; h_l[ii+(Ny/2)] = ( ii - (Ny/2) ) * dl; } h_l[Ny/2] = 0; // Build filter double fx; double fy; for (int jj=0; jj<Ny; jj++) { if (jj == Ny/2) { fy = 0.0; } else if (fabs(h_l[jj]) < l_cut) { fy = 1.0; } else { fy = exp(-alpha*(pow((fabs(h_l[jj]) - l_cut)/(l_nq - l_cut), beta))); } for (int ii=0; ii<(Nx/2+1); ii++) { if (ii == Nx/2) { fx = 0.0; } else if (fabs(h_k[ii]) < k_cut) { fx = 1.0; } else { fx = exp(-alpha*(pow((fabs(h_k[ii]) - k_cut)/(k_nq - k_cut), beta))); } h_filter[ii+jj*(Nx/2+1)] = fx*fy; } } hipMemcpy(d_k, h_k, Nx*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_l, h_l, Ny*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_filter, h_filter, Ny*(Nx/2+1)*sizeof(double), hipMemcpyHostToDevice); // Initialize the primary variables for the two timesteps e_j = new Variable (Nx, Ny); e_k = new Variable (Nx, Ny); u_j = new Variable (Nx, Ny); u_k = new Variable (Nx, Ny); v_j = new Variable (Nx, Ny); v_k = new Variable (Nx, Ny); tracer_j = new Variable (Nx, Ny); tracer_k = new Variable (Nx, Ny); H_xy = new Variable (Nx, Ny); // Initialize the particles trajectory = new double[num_particles]; for (int ii=0; ii<num_particles; ii++) { trajectory[ii] = ii; } particles = new Particle (num_particles); // Initialize the total energies to zero total_ke = 0.0; total_pe = 0.0; // Initialize the vort array vort = new double[Nx*Ny]; // Read in the initial conditions from NETCDF file read_variables(&(u_k->h_var), &(v_k->h_var), &(e_k->h_var), &(tracer_k->h_var), &(particles->h_part_pos_x), &(particles->h_part_pos_y), &(H_xy->h_var)); // Copy the initial conditions to the GPU hipMemcpy(e_k->d_var, e_k->h_var, Nx*Ny*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(u_k->d_var, u_k->h_var, Nx*Ny*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(v_k->d_var, v_k->h_var, Nx*Ny*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(tracer_k->d_var, tracer_k->h_var, Nx*Ny*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(particles->d_part_pos_x, particles->h_part_pos_x, num_particles*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(particles->d_part_pos_y, particles->h_part_pos_y, num_particles*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(H_xy->d_var, H_xy->h_var, Nx*Ny*sizeof(double), hipMemcpyHostToDevice); // // Initialize the particles on the GPU // ParticleTi<<<1+((particles->num_particles-1)/tpb),tpb>>>(particles->d_part_pos_x, particles->d_part_pos_y, particles->num_particles, Lx, Ly); // gpuErrchk( hipPeekAtLastError() ); // gpuErrchk( hipDeviceSynchronize() ); // Take derivatives of ICs e_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); u_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); v_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); tracer_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); H_xy->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); // Print Print_To_File(cntr); }; Shallow_Water_Eq::~Shallow_Water_Eq () { hipFree(d_k); hipFree(d_l); hipFree(d_filter); //free(h_k); //free(h_l); free(h_filter); }; struct grid_index : public thrust::binary_function<double,double,int> //Thrust function to calculate the bucket grid index based of a particle's positional coordinates { const double gs_x; const double gs_y; const int nx_intgrid; grid_index(double _gs_x,double _gs_y, int _nx_intgrid) : gs_x(_gs_x),gs_y(_gs_y),nx_intgrid(_nx_intgrid) {} __host__ __device__ int operator()(const double& x, const double& y) const { return int(x/gs_x) + nx_intgrid*int(y/gs_y); } }; typedef thrust::tuple<int, int> Tuple; struct TupleComp { __host__ __device__ bool operator()(const Tuple& t1, const Tuple& t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() > t2.get<0>()) return false; return t1.get<1>() < t2.get<1>(); } }; void Shallow_Water_Eq::writeInteractions (const int cntr) { //h represents host and d repesents device. double *h_box_size; double *d_box_size;// Domain size int *h_grid_dim; int *d_grid_dim;// Number of buckets double *d_part_pos_x_sorted,*d_part_pos_y_sorted; // copy of arrays for sorting int *d_grid_index,*d_atom_index,*d_num_interactions,*d_start_interactions; int *d_storepair1,*d_storepair2; int *h_storepair1,*h_storepair2;//Lists for storing the interaction pairs. size_t memsize_index,memsize_interactions,memsize_grid; int nx_intgrid,ny_intgrid; double gs_x,gs_y; double r; // temporary definition int blockSize, nBlocks; int ncid; char FILE_NAME[24]; int atomindid,pair1_id,pair2_id; int FLAG = NC_NETCDF4; memsize_index= particles->num_particles * sizeof(int); //The interaction distance read from the initial conditions file. r = parint; h_box_size = (double *)malloc(2*sizeof(double)); h_grid_dim = (int *)malloc(2*sizeof(int)); h_box_size[0]=Lx; h_box_size[1]=Ly; nx_intgrid=h_box_size[0]/r; ny_intgrid=h_box_size[1]/r; gs_x=h_box_size[0]/nx_intgrid; gs_y=h_box_size[1]/ny_intgrid; h_grid_dim[0]=nx_intgrid; h_grid_dim[1]=ny_intgrid; //Allocate device memory to variables and create thrust device pointers hipMalloc((void **) &d_part_pos_x_sorted, particles->num_particles*sizeof(double)); hipMalloc((void **) &d_part_pos_y_sorted, particles->num_particles*sizeof(double)); thrust::device_ptr<double> d_ptr_x_sorted(d_part_pos_x_sorted); thrust::device_ptr<double> d_ptr_y_sorted(d_part_pos_y_sorted); thrust::device_ptr<double> d_ptr_x_original(particles->d_part_pos_x); thrust::device_ptr<double> d_ptr_y_original(particles->d_part_pos_y); thrust::copy(thrust::device, d_ptr_x_original, d_ptr_x_original+particles->num_particles, d_ptr_x_sorted); thrust::copy(thrust::device, d_ptr_y_original, d_ptr_y_original+particles->num_particles, d_ptr_y_sorted); hipMalloc((void **) &d_box_size, 2*sizeof(double)); hipMalloc((void **) &d_grid_dim, 2*sizeof(int)); hipMalloc((void **) &d_grid_index, memsize_index); hipMalloc((void **) &d_atom_index, memsize_index); hipMalloc((void **) &d_num_interactions, memsize_index); hipMalloc((void **) &d_start_interactions, memsize_index); hipMemcpy(d_box_size, h_box_size, 2*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_grid_dim, h_grid_dim, 2*sizeof(int), hipMemcpyHostToDevice); thrust::device_ptr<int> d_ptr_grid_index(d_grid_index); thrust::device_ptr<int> d_ptr_atom_index(d_atom_index); thrust::device_ptr<int> d_ptr_num_interactions(d_num_interactions); thrust::device_ptr<int> d_ptr_start_interactions(d_start_interactions); // fill d_atom_index with 0,1,2,3,... thrust::sequence(thrust::device, d_ptr_atom_index, d_ptr_atom_index+particles->num_particles); // Find out the bucket index for each particle/atom by using the grid_index function. thrust::transform(d_ptr_x_sorted, d_ptr_x_sorted+particles->num_particles, d_ptr_y_sorted, d_ptr_grid_index, grid_index(gs_x,gs_y,nx_intgrid)); //Create temporary keys and copy the bucket grid index onto them. thrust::device_vector<int> tmpkey1(particles->num_particles); thrust::copy(thrust::device, d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, tmpkey1.begin()); thrust::device_vector<int> tmpkey2(particles->num_particles); thrust::copy(thrust::device, d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, tmpkey2.begin()); //Sort the x and y positions of the particles according to the bucket indices thrust::sort_by_key(tmpkey1.begin(),tmpkey1.end(),d_ptr_x_sorted); thrust::sort_by_key(tmpkey2.begin(),tmpkey2.end(),d_ptr_y_sorted); //Sort the bucket grid indices based on the particle indices thrust::sort_by_key(d_ptr_grid_index,d_ptr_grid_index+particles->num_particles,d_ptr_atom_index); // find the beginning of each bucket's list of points int *d_bucket_begin,*d_bucket_end; memsize_grid= nx_intgrid*ny_intgrid * sizeof(int); hipMalloc((void **) &d_bucket_begin, memsize_grid); hipMalloc((void **) &d_bucket_end, memsize_grid); thrust::device_ptr<int> d_ptr_bucket_begin(d_bucket_begin); thrust::device_ptr<int> d_ptr_bucket_end(d_bucket_end); thrust::counting_iterator<unsigned int> search_begin(0); // find the beginning of each bucket's list of points thrust::lower_bound(d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, search_begin, search_begin + nx_intgrid*ny_intgrid, d_ptr_bucket_begin); // find the end of each bucket's list of points thrust::upper_bound(d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, search_begin, search_begin + nx_intgrid*ny_intgrid, d_ptr_bucket_end); // detect number of interactions for each particle/atom, store in array d_storepair1=NULL; d_storepair2=NULL; blockSize = 512; nBlocks = particles->num_particles / blockSize + (particles->num_particles % blockSize > 0); //Call interactions_grid first time to detect the number of interactions per particle hipLaunchKernelGGL(( interactions_grid), dim3(nBlocks), dim3(blockSize), 0, 0, d_part_pos_x_sorted, d_part_pos_y_sorted, d_grid_dim, d_bucket_begin, d_bucket_end, d_box_size, d_atom_index, d_num_interactions, d_start_interactions, r ,particles->num_particles,0,d_storepair1,d_storepair2); // all atom kernel for debugging // interactions_n2<<<nBlocks, blockSize>>>(d_part_pos_x_sorted, d_part_pos_y_sorted, // d_box_size, d_atom_index, d_num_interactions, d_start_interactions, // r ,particles->num_particles,0,d_storepair1,d_storepair2); //exclusive_scan gives us position of each particle in the storing lists, based on the number of interactions each particle has. thrust::exclusive_scan(thrust::device, d_ptr_num_interactions, d_ptr_num_interactions + particles->num_particles, d_ptr_start_interactions); // obtain size of array to allocate for pair storage int n1,n2,ninteractions; hipMemcpy(&n1, d_num_interactions+particles->num_particles-1, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&n2, d_start_interactions+particles->num_particles-1, sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); //A way of determining the size of our storing list, obtained by summing the number of interactions of the last particle and the it's start position in the storing list. ninteractions=n1+n2; memsize_interactions=ninteractions*sizeof(int); // allocate array to store list of interactions // as the number of interactions unknown a priori, will check hipMalloc for errors gpuErrchk( hipMalloc((void **) &d_storepair1, memsize_interactions) ); gpuErrchk( hipMalloc((void **) &d_storepair2, memsize_interactions) ); //Call interactions_grid to compute the storing lists. hipLaunchKernelGGL(( interactions_grid), dim3(nBlocks), dim3(blockSize), 0, 0, d_part_pos_x_sorted, d_part_pos_y_sorted, d_grid_dim, d_bucket_begin, d_bucket_end, d_box_size, d_atom_index, d_num_interactions, d_start_interactions, r ,particles->num_particles,1,d_storepair1,d_storepair2); // all atom kernel for debugging // interactions_n2<<<nBlocks, blockSize>>>(d_part_pos_x_sorted, d_part_pos_y_sorted, // d_box_size, d_atom_index, d_num_interactions, d_start_interactions, // r ,particles->num_particles,1,d_storepair1,d_storepair2); // sort results, as tuple, by first index and then second index thrust::device_ptr<int> d_ptr_storepair1(d_storepair1); thrust::device_ptr<int> d_ptr_storepair2(d_storepair2); thrust::sort(thrust::make_zip_iterator(thrust::make_tuple(d_ptr_storepair1, d_ptr_storepair2)), thrust::make_zip_iterator(thrust::make_tuple(d_ptr_storepair1 + ninteractions, d_ptr_storepair2 + ninteractions)), TupleComp()); // allocate host arrays to store pair data h_storepair1 = (int *)malloc(memsize_interactions); h_storepair2 = (int *)malloc(memsize_interactions); hipMemcpy(h_storepair1, d_storepair1, memsize_interactions, hipMemcpyDeviceToHost); hipMemcpy(h_storepair2, d_storepair2, memsize_interactions, hipMemcpyDeviceToHost); hipDeviceSynchronize(); sprintf(FILE_NAME, "interactions_%d.nc",cntr); // Create file nc_create(FILE_NAME, FLAG, &ncid); nc_def_dim(ncid, "Atomindex", ninteractions, &atomindid); nc_def_var(ncid, "Pair1", NC_INT, 1, &atomindid, &pair1_id); nc_put_var_int(ncid, pair1_id, h_storepair1); nc_def_var(ncid, "Pair2", NC_INT, 1, &atomindid, &pair2_id); nc_put_var_int(ncid, pair2_id, h_storepair2); nc_close(ncid); hipFree(d_part_pos_x_sorted); hipFree(d_part_pos_y_sorted); hipFree(d_box_size); hipFree(d_grid_dim); hipFree(d_grid_index); hipFree(d_atom_index); hipFree(d_num_interactions); hipFree(d_start_interactions); hipFree(d_bucket_begin); hipFree(d_bucket_end); hipFree(d_storepair1); hipFree(d_storepair2); free(h_storepair1); free(h_storepair2); free(h_box_size); free(h_grid_dim); } void Shallow_Water_Eq::Print_To_File (const int cntr) { // Copy the variables that are going to be printed back to the host hipMemcpy(e_k->h_var, e_k->d_var, Nx*Ny*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(u_k->h_var, u_k->d_var, Nx*Ny*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(v_k->h_var, v_k->d_var, Nx*Ny*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(tracer_k->h_var, tracer_k->d_var, Nx*Ny*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(u_k->h_vary, u_k->d_vary, Nx*Ny*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(v_k->h_varx, v_k->d_varx, Nx*Ny*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(particles->h_part_pos_x, particles->d_part_pos_x, particles->num_particles*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(particles->h_part_pos_y, particles->d_part_pos_y, particles->num_particles*sizeof(double), hipMemcpyDeviceToHost); doEnergy(); doVort(); char FILE_NAME[24]; sprintf(FILE_NAME, "output_%d.nc",cntr); // int retval; int const ndims = 3; // ID variables int ncid, timedimid, xdimid, ydimid, trajdimid, timevarid, trajvarid, xvarid, yvarid, uvarid, vvarid, etavarid, tracervarid, vortvarid, particlexvarid, particleyvarid, kevarid, apevarid; int constid, output_varid, Nx_varid, Ny_varid, num_particles_varid, g_varid, H_varid, f_varid, Lx_varid, Ly_varid, Tf_varid, plot_interval_varid; int dimids[ndims]; int partdimids[2]; int FLAG = NC_NETCDF4; // Create file nc_create(FILE_NAME, FLAG, &ncid); // ERR(retval); // Define dimensions nc_def_dim(ncid, "time", 1, &timedimid); nc_def_dim(ncid, "X", Nx, &xdimid); // ERR(retval); nc_def_dim(ncid, "Y", Ny, &ydimid); // ERR(retval); nc_def_dim(ncid, "trajectory", particles->num_particles, &trajdimid); nc_def_dim(ncid, "const", 1, &constid); // Fill dimensions and constants nc_def_var(ncid, "time", NC_DOUBLE, 0, &timedimid, &timevarid); nc_def_var(ncid, "X", NC_DOUBLE, 1, &xdimid, &xvarid); // ERR(retval); nc_def_var(ncid, "Y", NC_DOUBLE, 1, &ydimid, &yvarid); // ERR(retval); nc_def_var(ncid, "trajectory", NC_DOUBLE, 1, &trajdimid, &trajvarid); nc_def_var(ncid, "Nx", NC_INT, 0, &constid, &Nx_varid); nc_def_var(ncid, "Ny", NC_INT, 0, &constid, &Ny_varid); nc_def_var(ncid, "num_particles", NC_INT, 0, &constid, &num_particles_varid); nc_def_var(ncid, "output", NC_INT, 0, &constid, &output_varid); nc_def_var(ncid, "g", NC_DOUBLE, 0, &constid, &g_varid); nc_def_var(ncid, "Hmax", NC_DOUBLE, 0, &constid, &H_varid); nc_def_var(ncid, "f", NC_DOUBLE, 0, &constid, &f_varid); nc_def_var(ncid, "Lx", NC_DOUBLE, 0, &constid, &Lx_varid); nc_def_var(ncid, "Ly", NC_DOUBLE, 0, &constid, &Ly_varid); nc_def_var(ncid, "Tf", NC_DOUBLE, 0, &constid, &Tf_varid); nc_def_var(ncid, "plot_interval", NC_DOUBLE, 0, &constid, &plot_interval_varid); dimids[0] = timedimid; dimids[1] = xdimid; dimids[2] = ydimid; partdimids[0] = timedimid; partdimids[1] = trajdimid; // Fill variables nc_def_var(ncid, "u", NC_DOUBLE, ndims, dimids, &uvarid); // ERR(retval); nc_def_var(ncid, "v", NC_DOUBLE, ndims, dimids, &vvarid); // ERR(retval); nc_def_var(ncid, "eta", NC_DOUBLE, ndims, dimids, &etavarid); // ERR(retval); nc_def_var(ncid, "tracer", NC_DOUBLE, ndims, dimids, &tracervarid); nc_def_var(ncid, "vorticity", NC_DOUBLE, ndims, dimids, &vortvarid); // ERR(retval); nc_def_var(ncid, "particle_x_position", NC_DOUBLE, 2, partdimids, &particlexvarid); // ERR(retval); nc_def_var(ncid, "particle_y_position", NC_DOUBLE, 2, partdimids, &particleyvarid); // ERR(retval); nc_def_var(ncid, "total KE", NC_DOUBLE, 0, &timedimid, &kevarid); nc_def_var(ncid, "total APE", NC_DOUBLE, 0, &timedimid, &apevarid); nc_enddef(ncid); // ERR(retval); nc_put_var_double(ncid, timevarid, &tk); nc_put_var_double(ncid, xvarid, xgrid); // ERR(retval); nc_put_var_double(ncid, yvarid, ygrid); // ERR(retval); nc_put_var_double(ncid, trajvarid, trajectory); nc_put_var_double(ncid,uvarid,u_k->h_var); // ERR(retval); nc_put_var_double(ncid,vvarid,v_k->h_var); // ERR(retval); nc_put_var_double(ncid,etavarid,e_k->h_var); // ERR(retval); nc_put_var_double(ncid,tracervarid,tracer_k->h_var); nc_put_var_double(ncid,vortvarid,vort); // ERR(retval); nc_put_var_double(ncid,particlexvarid,particles->h_part_pos_x); // ERR(retval); nc_put_var_double(ncid,particleyvarid,particles->h_part_pos_y); // ERR(retval); nc_put_var_double(ncid,kevarid,&total_ke); nc_put_var_double(ncid,apevarid,&total_pe); nc_put_var_int(ncid, Nx_varid, &Nx); nc_put_var_int(ncid, Ny_varid, &Ny); nc_put_var_int(ncid, num_particles_varid, &num_particles); nc_put_var_int(ncid, output_varid, &cntr); nc_put_var_double(ncid, g_varid, &g); nc_put_var_double(ncid, H_varid, &Hmax); nc_put_var_double(ncid, f_varid, &f); nc_put_var_double(ncid, Lx_varid, &Lx); nc_put_var_double(ncid, Ly_varid, &Ly); nc_put_var_double(ncid, Tf_varid, &Tf); nc_put_var_double(ncid, plot_interval_varid, &plot_interval); nc_close(ncid); } void Shallow_Water_Eq::doDerivatives () { // Compute derivatives e_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); u_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); v_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); H_xy->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); tracer_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); } void Shallow_Water_Eq::doRK2() { // Start by filtering the variables e_k->Filter(d_filter, Nx, Ny, tpb, nblks); u_k->Filter(d_filter, Nx, Ny, tpb, nblks); v_k->Filter(d_filter, Nx, Ny, tpb, nblks); tracer_k->Filter(d_filter, Nx, Ny, tpb, nblks); // Take an Euler step hipLaunchKernelGGL(( EulerStep), dim3(nblks),dim3(tpb), 0, 0, e_j->d_var, e_k->d_var, e_k->d_varx, e_k->d_vary, u_j->d_var, u_k->d_var, u_k->d_varx, u_k->d_vary, v_j->d_var, v_k->d_var, v_k->d_varx, v_k->d_vary, tracer_j->d_var, tracer_k->d_var, tracer_k->d_varx, tracer_k->d_vary, H_xy->d_var, H_xy->d_varx, H_xy->d_vary, Nx, Ny, dtk, f, g); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Compute derivatives at the Euler step e_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); u_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); v_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); H_xy->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); tracer_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); // Take the RK2 step and write to original (k) variable hipLaunchKernelGGL(( RK2), dim3(nblks),dim3(tpb), 0, 0, e_j->d_var, e_j->d_varx, e_j->d_vary, e_k->d_var, e_k->d_varx, e_k->d_vary, u_j->d_var, u_j->d_varx, u_j->d_vary, u_k->d_var, u_k->d_varx, u_k->d_vary, v_j->d_var, v_j->d_varx, v_j->d_vary, v_k->d_var, v_k->d_varx, v_k->d_vary, tracer_j->d_var, tracer_j->d_varx, tracer_j->d_vary, tracer_k->d_var, tracer_k->d_varx, tracer_k->d_vary, H_xy->d_var,H_xy->d_varx,H_xy->d_vary, Nx, Ny, dtk, f, g); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); doDerivatives(); // Step time and calculate new dt tk = tk + dtk; dtk = adaptive_timestep(); } void Shallow_Water_Eq::doParticle () { // Execute the particle interpolation and timestepper hipLaunchKernelGGL(( particle_interp_evolve_better), dim3(1+((particles->num_particles-1)/tpb)),dim3(tpb), 0, 0, u_k->d_var, v_k->d_var, particles->d_part_pos_x, particles->d_part_pos_y, particles->interp_u, particles->interp_v, Nx, Ny, particles->num_particles, dtk, dx, dy); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); } double Shallow_Water_Eq::adaptive_timestep () { // Calculate the new dt based on 10% of the cfl condition return fmin(dx/max(u_k->MaxVar(Nx, Ny),c0), dy/max(v_k->MaxVar(Nx, Ny),c0) ) /10.; } void Shallow_Water_Eq::doEnergy () { double* tmp; tmp = new double[Nx*Ny]; for (int ii=0; ii<Nx*Ny; ii++) { tmp[ii] = H_xy->h_var[ii]*(u_k->h_var[ii]*u_k->h_var[ii] + v_k->h_var[ii]*v_k->h_var[ii]); } total_ke = 0.5 * std::accumulate(tmp, tmp + Nx*Ny, 0.0); for (int ii=0; ii<Nx*Ny; ii++) { tmp[ii] = e_k->h_var[ii]*e_k->h_var[ii]; } total_pe = 0.5 * g * std::accumulate(tmp, tmp + Nx*Ny, 0.0); } void Shallow_Water_Eq::doVort () { for (int ii=0; ii<Nx*Ny; ii++) { vort[ii] = v_k->h_varx[ii]-u_k->h_vary[ii]; } } void Shallow_Water_Eq::read_parameters(int* Nx, int* Ny, int* num_particles, double* parint, int* cntr, double* Hmax, double* g, double* f, double* Lx, double* Ly, double* Ti, double* Tf, double* plot_interval) { // Open the NETCDF file int FLAG = NC_NOWRITE; int ncid=0; nc_open("initial_conditions.nc", FLAG, &ncid); int Nx_varid; nc_inq_varid(ncid, "Nx", &Nx_varid); nc_get_var_int(ncid, Nx_varid, Nx); int Ny_varid; nc_inq_varid(ncid, "Ny", &Ny_varid); nc_get_var_int(ncid, Ny_varid, Ny); int num_particles_varid; nc_inq_varid(ncid, "num_particles", &num_particles_varid); nc_get_var_int(ncid, num_particles_varid, num_particles); int output_varid; nc_inq_varid(ncid, "output", &output_varid); nc_get_var_int(ncid, output_varid, cntr); int g_varid; nc_inq_varid(ncid, "g", &g_varid); nc_get_var_double(ncid, g_varid, g); int H_varid; nc_inq_varid(ncid, "Hmax", &H_varid); nc_get_var_double(ncid, H_varid, Hmax); int parint_varid; nc_inq_varid(ncid, "par_int", &parint_varid); nc_get_var_double(ncid, parint_varid, parint); int f_varid; nc_inq_varid(ncid, "f", &f_varid); nc_get_var_double(ncid, f_varid, f); int Lx_varid; nc_inq_varid(ncid, "Lx", &Lx_varid); nc_get_var_double(ncid, Lx_varid, Lx); int Ly_varid; nc_inq_varid(ncid, "Ly", &Ly_varid); nc_get_var_double(ncid, Ly_varid, Ly); int time_varid; nc_inq_varid(ncid, "time", &time_varid); nc_get_var_double(ncid, time_varid, Ti); int Tf_varid; nc_inq_varid(ncid, "Tf", &Tf_varid); nc_get_var_double(ncid, Tf_varid, Tf); int plot_interval_varid; nc_inq_varid(ncid, "plot_interval", &plot_interval_varid); nc_get_var_double(ncid, plot_interval_varid, plot_interval); } void Shallow_Water_Eq::read_variables(double** my_u, double** my_v, double** my_eta, double** my_t, double** my_xp, double** my_yp, double** my_Hxy) { // Open the NETCDF file int FLAG = NC_NOWRITE; int ncid=0; nc_open("initial_conditions.nc", FLAG, &ncid); // Declare variables int u_varid, v_varid, eta_varid, t_varid, xp_varid, yp_varid, Hxy_varid; nc_inq_varid(ncid, "u", &u_varid); nc_inq_varid(ncid, "v", &v_varid); nc_inq_varid(ncid, "eta", &eta_varid); nc_inq_varid(ncid, "tracer", &t_varid); nc_inq_varid(ncid, "H", &Hxy_varid); nc_inq_varid(ncid, "particle_x_position", &xp_varid); nc_inq_varid(ncid, "particle_y_position", &yp_varid); nc_get_var_double(ncid, u_varid, my_u[0]); nc_get_var_double(ncid, v_varid, my_v[0]); nc_get_var_double(ncid, eta_varid, my_eta[0]); nc_get_var_double(ncid, t_varid, my_t[0]); nc_get_var_double(ncid, Hxy_varid, my_Hxy[0]); nc_get_var_double(ncid, xp_varid, my_xp[0]); nc_get_var_double(ncid, yp_varid, my_yp[0]); }
3f2d8d2afcc10182a3111097e8ab9c59313318ab.cu
#include "Shallow_Water_Eq.h" #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/binary_search.h> #include <thrust/scan.h> #include <thrust/tuple.h> #include <thrust/iterator/zip_iterator.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // #define void NC_ERR(e) {fprintf(stderr,"Error: %s\n", nc_strerror(e));} Shallow_Water_Eq::Shallow_Water_Eq (int k) { // Read parameters from NETCDF file read_parameters(&Nx, &Ny, &num_particles, &parint, &cntr, &Hmax, &g, &f, &Lx, &Ly, &Ti, &Tf, &plot_interval); // Time parameters timestep = 0; tk = Ti; dtk = 0; c0 = sqrt(g*Hmax); next_output_time = plot_interval + Ti; // CUDA parameters tpb = 512; // Threads per Block nblks = (Nx*Ny)/tpb + (((Nx*Ny)%tpb) ? 1 : 0); // Number of Blocks // Build grid and related parameters dx = Lx/Nx; dy = Ly/Ny; xgrid = new double[Nx]; for (int ii=0; ii<Nx; ii++) { xgrid[ii] = dx*(ii); } ygrid = new double[Ny]; for (int ii=0; ii<Ny; ii++) { ygrid[ii] = dy*(ii); } // Wavenumber and filter parameters dk = 2*M_PI/(Lx); k_nq = M_PI/dx; dl = 2*M_PI/(Ly); l_nq = M_PI/dy; k_cut = 0.45 * k_nq; l_cut = 0.45 * l_nq; alpha = 20.0; beta = 2.0; // Allocate wavenumber arrays // Since FFTW only uses (N/2+1) entries for one dimension we pick it to be the x direction // thus k is only size (Nx/2+1) h_k = (double *)malloc(Nx*sizeof(double)); h_l = (double *)malloc(Ny*sizeof(double)); h_filter = (double *)malloc(Ny*(Nx/2+1)*sizeof(double)); cudaMalloc(&d_k,Nx*sizeof(double)); cudaMalloc(&d_l,Ny*sizeof(double)); cudaMalloc(&d_filter,Ny*(Nx/2+1)*sizeof(double)); // Build wavenumber vectors for (int ii=0; ii<Nx/2; ii++) { h_k[ii] = ii * dk; h_k[ii+(Nx/2)] = ( ii - (Nx/2) ) * dk; } h_k[Nx/2] = 0; for (int ii=0; ii<Ny/2; ii++) { h_l[ii] = ii * dl; h_l[ii+(Ny/2)] = ( ii - (Ny/2) ) * dl; } h_l[Ny/2] = 0; // Build filter double fx; double fy; for (int jj=0; jj<Ny; jj++) { if (jj == Ny/2) { fy = 0.0; } else if (fabs(h_l[jj]) < l_cut) { fy = 1.0; } else { fy = exp(-alpha*(pow((fabs(h_l[jj]) - l_cut)/(l_nq - l_cut), beta))); } for (int ii=0; ii<(Nx/2+1); ii++) { if (ii == Nx/2) { fx = 0.0; } else if (fabs(h_k[ii]) < k_cut) { fx = 1.0; } else { fx = exp(-alpha*(pow((fabs(h_k[ii]) - k_cut)/(k_nq - k_cut), beta))); } h_filter[ii+jj*(Nx/2+1)] = fx*fy; } } cudaMemcpy(d_k, h_k, Nx*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_l, h_l, Ny*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_filter, h_filter, Ny*(Nx/2+1)*sizeof(double), cudaMemcpyHostToDevice); // Initialize the primary variables for the two timesteps e_j = new Variable (Nx, Ny); e_k = new Variable (Nx, Ny); u_j = new Variable (Nx, Ny); u_k = new Variable (Nx, Ny); v_j = new Variable (Nx, Ny); v_k = new Variable (Nx, Ny); tracer_j = new Variable (Nx, Ny); tracer_k = new Variable (Nx, Ny); H_xy = new Variable (Nx, Ny); // Initialize the particles trajectory = new double[num_particles]; for (int ii=0; ii<num_particles; ii++) { trajectory[ii] = ii; } particles = new Particle (num_particles); // Initialize the total energies to zero total_ke = 0.0; total_pe = 0.0; // Initialize the vort array vort = new double[Nx*Ny]; // Read in the initial conditions from NETCDF file read_variables(&(u_k->h_var), &(v_k->h_var), &(e_k->h_var), &(tracer_k->h_var), &(particles->h_part_pos_x), &(particles->h_part_pos_y), &(H_xy->h_var)); // Copy the initial conditions to the GPU cudaMemcpy(e_k->d_var, e_k->h_var, Nx*Ny*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(u_k->d_var, u_k->h_var, Nx*Ny*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(v_k->d_var, v_k->h_var, Nx*Ny*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(tracer_k->d_var, tracer_k->h_var, Nx*Ny*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(particles->d_part_pos_x, particles->h_part_pos_x, num_particles*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(particles->d_part_pos_y, particles->h_part_pos_y, num_particles*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(H_xy->d_var, H_xy->h_var, Nx*Ny*sizeof(double), cudaMemcpyHostToDevice); // // Initialize the particles on the GPU // ParticleTi<<<1+((particles->num_particles-1)/tpb),tpb>>>(particles->d_part_pos_x, particles->d_part_pos_y, particles->num_particles, Lx, Ly); // gpuErrchk( cudaPeekAtLastError() ); // gpuErrchk( cudaDeviceSynchronize() ); // Take derivatives of ICs e_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); u_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); v_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); tracer_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); H_xy->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); // Print Print_To_File(cntr); }; Shallow_Water_Eq::~Shallow_Water_Eq () { cudaFree(d_k); cudaFree(d_l); cudaFree(d_filter); //free(h_k); //free(h_l); free(h_filter); }; struct grid_index : public thrust::binary_function<double,double,int> //Thrust function to calculate the bucket grid index based of a particle's positional coordinates { const double gs_x; const double gs_y; const int nx_intgrid; grid_index(double _gs_x,double _gs_y, int _nx_intgrid) : gs_x(_gs_x),gs_y(_gs_y),nx_intgrid(_nx_intgrid) {} __host__ __device__ int operator()(const double& x, const double& y) const { return int(x/gs_x) + nx_intgrid*int(y/gs_y); } }; typedef thrust::tuple<int, int> Tuple; struct TupleComp { __host__ __device__ bool operator()(const Tuple& t1, const Tuple& t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() > t2.get<0>()) return false; return t1.get<1>() < t2.get<1>(); } }; void Shallow_Water_Eq::writeInteractions (const int cntr) { //h represents host and d repesents device. double *h_box_size; double *d_box_size;// Domain size int *h_grid_dim; int *d_grid_dim;// Number of buckets double *d_part_pos_x_sorted,*d_part_pos_y_sorted; // copy of arrays for sorting int *d_grid_index,*d_atom_index,*d_num_interactions,*d_start_interactions; int *d_storepair1,*d_storepair2; int *h_storepair1,*h_storepair2;//Lists for storing the interaction pairs. size_t memsize_index,memsize_interactions,memsize_grid; int nx_intgrid,ny_intgrid; double gs_x,gs_y; double r; // temporary definition int blockSize, nBlocks; int ncid; char FILE_NAME[24]; int atomindid,pair1_id,pair2_id; int FLAG = NC_NETCDF4; memsize_index= particles->num_particles * sizeof(int); //The interaction distance read from the initial conditions file. r = parint; h_box_size = (double *)malloc(2*sizeof(double)); h_grid_dim = (int *)malloc(2*sizeof(int)); h_box_size[0]=Lx; h_box_size[1]=Ly; nx_intgrid=h_box_size[0]/r; ny_intgrid=h_box_size[1]/r; gs_x=h_box_size[0]/nx_intgrid; gs_y=h_box_size[1]/ny_intgrid; h_grid_dim[0]=nx_intgrid; h_grid_dim[1]=ny_intgrid; //Allocate device memory to variables and create thrust device pointers cudaMalloc((void **) &d_part_pos_x_sorted, particles->num_particles*sizeof(double)); cudaMalloc((void **) &d_part_pos_y_sorted, particles->num_particles*sizeof(double)); thrust::device_ptr<double> d_ptr_x_sorted(d_part_pos_x_sorted); thrust::device_ptr<double> d_ptr_y_sorted(d_part_pos_y_sorted); thrust::device_ptr<double> d_ptr_x_original(particles->d_part_pos_x); thrust::device_ptr<double> d_ptr_y_original(particles->d_part_pos_y); thrust::copy(thrust::device, d_ptr_x_original, d_ptr_x_original+particles->num_particles, d_ptr_x_sorted); thrust::copy(thrust::device, d_ptr_y_original, d_ptr_y_original+particles->num_particles, d_ptr_y_sorted); cudaMalloc((void **) &d_box_size, 2*sizeof(double)); cudaMalloc((void **) &d_grid_dim, 2*sizeof(int)); cudaMalloc((void **) &d_grid_index, memsize_index); cudaMalloc((void **) &d_atom_index, memsize_index); cudaMalloc((void **) &d_num_interactions, memsize_index); cudaMalloc((void **) &d_start_interactions, memsize_index); cudaMemcpy(d_box_size, h_box_size, 2*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_grid_dim, h_grid_dim, 2*sizeof(int), cudaMemcpyHostToDevice); thrust::device_ptr<int> d_ptr_grid_index(d_grid_index); thrust::device_ptr<int> d_ptr_atom_index(d_atom_index); thrust::device_ptr<int> d_ptr_num_interactions(d_num_interactions); thrust::device_ptr<int> d_ptr_start_interactions(d_start_interactions); // fill d_atom_index with 0,1,2,3,... thrust::sequence(thrust::device, d_ptr_atom_index, d_ptr_atom_index+particles->num_particles); // Find out the bucket index for each particle/atom by using the grid_index function. thrust::transform(d_ptr_x_sorted, d_ptr_x_sorted+particles->num_particles, d_ptr_y_sorted, d_ptr_grid_index, grid_index(gs_x,gs_y,nx_intgrid)); //Create temporary keys and copy the bucket grid index onto them. thrust::device_vector<int> tmpkey1(particles->num_particles); thrust::copy(thrust::device, d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, tmpkey1.begin()); thrust::device_vector<int> tmpkey2(particles->num_particles); thrust::copy(thrust::device, d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, tmpkey2.begin()); //Sort the x and y positions of the particles according to the bucket indices thrust::sort_by_key(tmpkey1.begin(),tmpkey1.end(),d_ptr_x_sorted); thrust::sort_by_key(tmpkey2.begin(),tmpkey2.end(),d_ptr_y_sorted); //Sort the bucket grid indices based on the particle indices thrust::sort_by_key(d_ptr_grid_index,d_ptr_grid_index+particles->num_particles,d_ptr_atom_index); // find the beginning of each bucket's list of points int *d_bucket_begin,*d_bucket_end; memsize_grid= nx_intgrid*ny_intgrid * sizeof(int); cudaMalloc((void **) &d_bucket_begin, memsize_grid); cudaMalloc((void **) &d_bucket_end, memsize_grid); thrust::device_ptr<int> d_ptr_bucket_begin(d_bucket_begin); thrust::device_ptr<int> d_ptr_bucket_end(d_bucket_end); thrust::counting_iterator<unsigned int> search_begin(0); // find the beginning of each bucket's list of points thrust::lower_bound(d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, search_begin, search_begin + nx_intgrid*ny_intgrid, d_ptr_bucket_begin); // find the end of each bucket's list of points thrust::upper_bound(d_ptr_grid_index, d_ptr_grid_index+particles->num_particles, search_begin, search_begin + nx_intgrid*ny_intgrid, d_ptr_bucket_end); // detect number of interactions for each particle/atom, store in array d_storepair1=NULL; d_storepair2=NULL; blockSize = 512; nBlocks = particles->num_particles / blockSize + (particles->num_particles % blockSize > 0); //Call interactions_grid first time to detect the number of interactions per particle interactions_grid<<<nBlocks, blockSize>>>(d_part_pos_x_sorted, d_part_pos_y_sorted, d_grid_dim, d_bucket_begin, d_bucket_end, d_box_size, d_atom_index, d_num_interactions, d_start_interactions, r ,particles->num_particles,0,d_storepair1,d_storepair2); // all atom kernel for debugging // interactions_n2<<<nBlocks, blockSize>>>(d_part_pos_x_sorted, d_part_pos_y_sorted, // d_box_size, d_atom_index, d_num_interactions, d_start_interactions, // r ,particles->num_particles,0,d_storepair1,d_storepair2); //exclusive_scan gives us position of each particle in the storing lists, based on the number of interactions each particle has. thrust::exclusive_scan(thrust::device, d_ptr_num_interactions, d_ptr_num_interactions + particles->num_particles, d_ptr_start_interactions); // obtain size of array to allocate for pair storage int n1,n2,ninteractions; cudaMemcpy(&n1, d_num_interactions+particles->num_particles-1, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&n2, d_start_interactions+particles->num_particles-1, sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //A way of determining the size of our storing list, obtained by summing the number of interactions of the last particle and the it's start position in the storing list. ninteractions=n1+n2; memsize_interactions=ninteractions*sizeof(int); // allocate array to store list of interactions // as the number of interactions unknown a priori, will check cudaMalloc for errors gpuErrchk( cudaMalloc((void **) &d_storepair1, memsize_interactions) ); gpuErrchk( cudaMalloc((void **) &d_storepair2, memsize_interactions) ); //Call interactions_grid to compute the storing lists. interactions_grid<<<nBlocks, blockSize>>>(d_part_pos_x_sorted, d_part_pos_y_sorted, d_grid_dim, d_bucket_begin, d_bucket_end, d_box_size, d_atom_index, d_num_interactions, d_start_interactions, r ,particles->num_particles,1,d_storepair1,d_storepair2); // all atom kernel for debugging // interactions_n2<<<nBlocks, blockSize>>>(d_part_pos_x_sorted, d_part_pos_y_sorted, // d_box_size, d_atom_index, d_num_interactions, d_start_interactions, // r ,particles->num_particles,1,d_storepair1,d_storepair2); // sort results, as tuple, by first index and then second index thrust::device_ptr<int> d_ptr_storepair1(d_storepair1); thrust::device_ptr<int> d_ptr_storepair2(d_storepair2); thrust::sort(thrust::make_zip_iterator(thrust::make_tuple(d_ptr_storepair1, d_ptr_storepair2)), thrust::make_zip_iterator(thrust::make_tuple(d_ptr_storepair1 + ninteractions, d_ptr_storepair2 + ninteractions)), TupleComp()); // allocate host arrays to store pair data h_storepair1 = (int *)malloc(memsize_interactions); h_storepair2 = (int *)malloc(memsize_interactions); cudaMemcpy(h_storepair1, d_storepair1, memsize_interactions, cudaMemcpyDeviceToHost); cudaMemcpy(h_storepair2, d_storepair2, memsize_interactions, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); sprintf(FILE_NAME, "interactions_%d.nc",cntr); // Create file nc_create(FILE_NAME, FLAG, &ncid); nc_def_dim(ncid, "Atomindex", ninteractions, &atomindid); nc_def_var(ncid, "Pair1", NC_INT, 1, &atomindid, &pair1_id); nc_put_var_int(ncid, pair1_id, h_storepair1); nc_def_var(ncid, "Pair2", NC_INT, 1, &atomindid, &pair2_id); nc_put_var_int(ncid, pair2_id, h_storepair2); nc_close(ncid); cudaFree(d_part_pos_x_sorted); cudaFree(d_part_pos_y_sorted); cudaFree(d_box_size); cudaFree(d_grid_dim); cudaFree(d_grid_index); cudaFree(d_atom_index); cudaFree(d_num_interactions); cudaFree(d_start_interactions); cudaFree(d_bucket_begin); cudaFree(d_bucket_end); cudaFree(d_storepair1); cudaFree(d_storepair2); free(h_storepair1); free(h_storepair2); free(h_box_size); free(h_grid_dim); } void Shallow_Water_Eq::Print_To_File (const int cntr) { // Copy the variables that are going to be printed back to the host cudaMemcpy(e_k->h_var, e_k->d_var, Nx*Ny*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(u_k->h_var, u_k->d_var, Nx*Ny*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(v_k->h_var, v_k->d_var, Nx*Ny*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(tracer_k->h_var, tracer_k->d_var, Nx*Ny*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(u_k->h_vary, u_k->d_vary, Nx*Ny*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(v_k->h_varx, v_k->d_varx, Nx*Ny*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(particles->h_part_pos_x, particles->d_part_pos_x, particles->num_particles*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(particles->h_part_pos_y, particles->d_part_pos_y, particles->num_particles*sizeof(double), cudaMemcpyDeviceToHost); doEnergy(); doVort(); char FILE_NAME[24]; sprintf(FILE_NAME, "output_%d.nc",cntr); // int retval; int const ndims = 3; // ID variables int ncid, timedimid, xdimid, ydimid, trajdimid, timevarid, trajvarid, xvarid, yvarid, uvarid, vvarid, etavarid, tracervarid, vortvarid, particlexvarid, particleyvarid, kevarid, apevarid; int constid, output_varid, Nx_varid, Ny_varid, num_particles_varid, g_varid, H_varid, f_varid, Lx_varid, Ly_varid, Tf_varid, plot_interval_varid; int dimids[ndims]; int partdimids[2]; int FLAG = NC_NETCDF4; // Create file nc_create(FILE_NAME, FLAG, &ncid); // ERR(retval); // Define dimensions nc_def_dim(ncid, "time", 1, &timedimid); nc_def_dim(ncid, "X", Nx, &xdimid); // ERR(retval); nc_def_dim(ncid, "Y", Ny, &ydimid); // ERR(retval); nc_def_dim(ncid, "trajectory", particles->num_particles, &trajdimid); nc_def_dim(ncid, "const", 1, &constid); // Fill dimensions and constants nc_def_var(ncid, "time", NC_DOUBLE, 0, &timedimid, &timevarid); nc_def_var(ncid, "X", NC_DOUBLE, 1, &xdimid, &xvarid); // ERR(retval); nc_def_var(ncid, "Y", NC_DOUBLE, 1, &ydimid, &yvarid); // ERR(retval); nc_def_var(ncid, "trajectory", NC_DOUBLE, 1, &trajdimid, &trajvarid); nc_def_var(ncid, "Nx", NC_INT, 0, &constid, &Nx_varid); nc_def_var(ncid, "Ny", NC_INT, 0, &constid, &Ny_varid); nc_def_var(ncid, "num_particles", NC_INT, 0, &constid, &num_particles_varid); nc_def_var(ncid, "output", NC_INT, 0, &constid, &output_varid); nc_def_var(ncid, "g", NC_DOUBLE, 0, &constid, &g_varid); nc_def_var(ncid, "Hmax", NC_DOUBLE, 0, &constid, &H_varid); nc_def_var(ncid, "f", NC_DOUBLE, 0, &constid, &f_varid); nc_def_var(ncid, "Lx", NC_DOUBLE, 0, &constid, &Lx_varid); nc_def_var(ncid, "Ly", NC_DOUBLE, 0, &constid, &Ly_varid); nc_def_var(ncid, "Tf", NC_DOUBLE, 0, &constid, &Tf_varid); nc_def_var(ncid, "plot_interval", NC_DOUBLE, 0, &constid, &plot_interval_varid); dimids[0] = timedimid; dimids[1] = xdimid; dimids[2] = ydimid; partdimids[0] = timedimid; partdimids[1] = trajdimid; // Fill variables nc_def_var(ncid, "u", NC_DOUBLE, ndims, dimids, &uvarid); // ERR(retval); nc_def_var(ncid, "v", NC_DOUBLE, ndims, dimids, &vvarid); // ERR(retval); nc_def_var(ncid, "eta", NC_DOUBLE, ndims, dimids, &etavarid); // ERR(retval); nc_def_var(ncid, "tracer", NC_DOUBLE, ndims, dimids, &tracervarid); nc_def_var(ncid, "vorticity", NC_DOUBLE, ndims, dimids, &vortvarid); // ERR(retval); nc_def_var(ncid, "particle_x_position", NC_DOUBLE, 2, partdimids, &particlexvarid); // ERR(retval); nc_def_var(ncid, "particle_y_position", NC_DOUBLE, 2, partdimids, &particleyvarid); // ERR(retval); nc_def_var(ncid, "total KE", NC_DOUBLE, 0, &timedimid, &kevarid); nc_def_var(ncid, "total APE", NC_DOUBLE, 0, &timedimid, &apevarid); nc_enddef(ncid); // ERR(retval); nc_put_var_double(ncid, timevarid, &tk); nc_put_var_double(ncid, xvarid, xgrid); // ERR(retval); nc_put_var_double(ncid, yvarid, ygrid); // ERR(retval); nc_put_var_double(ncid, trajvarid, trajectory); nc_put_var_double(ncid,uvarid,u_k->h_var); // ERR(retval); nc_put_var_double(ncid,vvarid,v_k->h_var); // ERR(retval); nc_put_var_double(ncid,etavarid,e_k->h_var); // ERR(retval); nc_put_var_double(ncid,tracervarid,tracer_k->h_var); nc_put_var_double(ncid,vortvarid,vort); // ERR(retval); nc_put_var_double(ncid,particlexvarid,particles->h_part_pos_x); // ERR(retval); nc_put_var_double(ncid,particleyvarid,particles->h_part_pos_y); // ERR(retval); nc_put_var_double(ncid,kevarid,&total_ke); nc_put_var_double(ncid,apevarid,&total_pe); nc_put_var_int(ncid, Nx_varid, &Nx); nc_put_var_int(ncid, Ny_varid, &Ny); nc_put_var_int(ncid, num_particles_varid, &num_particles); nc_put_var_int(ncid, output_varid, &cntr); nc_put_var_double(ncid, g_varid, &g); nc_put_var_double(ncid, H_varid, &Hmax); nc_put_var_double(ncid, f_varid, &f); nc_put_var_double(ncid, Lx_varid, &Lx); nc_put_var_double(ncid, Ly_varid, &Ly); nc_put_var_double(ncid, Tf_varid, &Tf); nc_put_var_double(ncid, plot_interval_varid, &plot_interval); nc_close(ncid); } void Shallow_Water_Eq::doDerivatives () { // Compute derivatives e_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); u_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); v_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); H_xy->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); tracer_k->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); } void Shallow_Water_Eq::doRK2() { // Start by filtering the variables e_k->Filter(d_filter, Nx, Ny, tpb, nblks); u_k->Filter(d_filter, Nx, Ny, tpb, nblks); v_k->Filter(d_filter, Nx, Ny, tpb, nblks); tracer_k->Filter(d_filter, Nx, Ny, tpb, nblks); // Take an Euler step EulerStep<<<nblks,tpb>>>(e_j->d_var, e_k->d_var, e_k->d_varx, e_k->d_vary, u_j->d_var, u_k->d_var, u_k->d_varx, u_k->d_vary, v_j->d_var, v_k->d_var, v_k->d_varx, v_k->d_vary, tracer_j->d_var, tracer_k->d_var, tracer_k->d_varx, tracer_k->d_vary, H_xy->d_var, H_xy->d_varx, H_xy->d_vary, Nx, Ny, dtk, f, g); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Compute derivatives at the Euler step e_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); u_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); v_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); H_xy->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); tracer_j->ComputeDerivatives(d_k, d_l, Nx, Ny, tpb, nblks); // Take the RK2 step and write to original (k) variable RK2<<<nblks,tpb>>>( e_j->d_var, e_j->d_varx, e_j->d_vary, e_k->d_var, e_k->d_varx, e_k->d_vary, u_j->d_var, u_j->d_varx, u_j->d_vary, u_k->d_var, u_k->d_varx, u_k->d_vary, v_j->d_var, v_j->d_varx, v_j->d_vary, v_k->d_var, v_k->d_varx, v_k->d_vary, tracer_j->d_var, tracer_j->d_varx, tracer_j->d_vary, tracer_k->d_var, tracer_k->d_varx, tracer_k->d_vary, H_xy->d_var,H_xy->d_varx,H_xy->d_vary, Nx, Ny, dtk, f, g); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); doDerivatives(); // Step time and calculate new dt tk = tk + dtk; dtk = adaptive_timestep(); } void Shallow_Water_Eq::doParticle () { // Execute the particle interpolation and timestepper particle_interp_evolve_better<<<1+((particles->num_particles-1)/tpb),tpb>>>(u_k->d_var, v_k->d_var, particles->d_part_pos_x, particles->d_part_pos_y, particles->interp_u, particles->interp_v, Nx, Ny, particles->num_particles, dtk, dx, dy); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); } double Shallow_Water_Eq::adaptive_timestep () { // Calculate the new dt based on 10% of the cfl condition return fmin(dx/max(u_k->MaxVar(Nx, Ny),c0), dy/max(v_k->MaxVar(Nx, Ny),c0) ) /10.; } void Shallow_Water_Eq::doEnergy () { double* tmp; tmp = new double[Nx*Ny]; for (int ii=0; ii<Nx*Ny; ii++) { tmp[ii] = H_xy->h_var[ii]*(u_k->h_var[ii]*u_k->h_var[ii] + v_k->h_var[ii]*v_k->h_var[ii]); } total_ke = 0.5 * std::accumulate(tmp, tmp + Nx*Ny, 0.0); for (int ii=0; ii<Nx*Ny; ii++) { tmp[ii] = e_k->h_var[ii]*e_k->h_var[ii]; } total_pe = 0.5 * g * std::accumulate(tmp, tmp + Nx*Ny, 0.0); } void Shallow_Water_Eq::doVort () { for (int ii=0; ii<Nx*Ny; ii++) { vort[ii] = v_k->h_varx[ii]-u_k->h_vary[ii]; } } void Shallow_Water_Eq::read_parameters(int* Nx, int* Ny, int* num_particles, double* parint, int* cntr, double* Hmax, double* g, double* f, double* Lx, double* Ly, double* Ti, double* Tf, double* plot_interval) { // Open the NETCDF file int FLAG = NC_NOWRITE; int ncid=0; nc_open("initial_conditions.nc", FLAG, &ncid); int Nx_varid; nc_inq_varid(ncid, "Nx", &Nx_varid); nc_get_var_int(ncid, Nx_varid, Nx); int Ny_varid; nc_inq_varid(ncid, "Ny", &Ny_varid); nc_get_var_int(ncid, Ny_varid, Ny); int num_particles_varid; nc_inq_varid(ncid, "num_particles", &num_particles_varid); nc_get_var_int(ncid, num_particles_varid, num_particles); int output_varid; nc_inq_varid(ncid, "output", &output_varid); nc_get_var_int(ncid, output_varid, cntr); int g_varid; nc_inq_varid(ncid, "g", &g_varid); nc_get_var_double(ncid, g_varid, g); int H_varid; nc_inq_varid(ncid, "Hmax", &H_varid); nc_get_var_double(ncid, H_varid, Hmax); int parint_varid; nc_inq_varid(ncid, "par_int", &parint_varid); nc_get_var_double(ncid, parint_varid, parint); int f_varid; nc_inq_varid(ncid, "f", &f_varid); nc_get_var_double(ncid, f_varid, f); int Lx_varid; nc_inq_varid(ncid, "Lx", &Lx_varid); nc_get_var_double(ncid, Lx_varid, Lx); int Ly_varid; nc_inq_varid(ncid, "Ly", &Ly_varid); nc_get_var_double(ncid, Ly_varid, Ly); int time_varid; nc_inq_varid(ncid, "time", &time_varid); nc_get_var_double(ncid, time_varid, Ti); int Tf_varid; nc_inq_varid(ncid, "Tf", &Tf_varid); nc_get_var_double(ncid, Tf_varid, Tf); int plot_interval_varid; nc_inq_varid(ncid, "plot_interval", &plot_interval_varid); nc_get_var_double(ncid, plot_interval_varid, plot_interval); } void Shallow_Water_Eq::read_variables(double** my_u, double** my_v, double** my_eta, double** my_t, double** my_xp, double** my_yp, double** my_Hxy) { // Open the NETCDF file int FLAG = NC_NOWRITE; int ncid=0; nc_open("initial_conditions.nc", FLAG, &ncid); // Declare variables int u_varid, v_varid, eta_varid, t_varid, xp_varid, yp_varid, Hxy_varid; nc_inq_varid(ncid, "u", &u_varid); nc_inq_varid(ncid, "v", &v_varid); nc_inq_varid(ncid, "eta", &eta_varid); nc_inq_varid(ncid, "tracer", &t_varid); nc_inq_varid(ncid, "H", &Hxy_varid); nc_inq_varid(ncid, "particle_x_position", &xp_varid); nc_inq_varid(ncid, "particle_y_position", &yp_varid); nc_get_var_double(ncid, u_varid, my_u[0]); nc_get_var_double(ncid, v_varid, my_v[0]); nc_get_var_double(ncid, eta_varid, my_eta[0]); nc_get_var_double(ncid, t_varid, my_t[0]); nc_get_var_double(ncid, Hxy_varid, my_Hxy[0]); nc_get_var_double(ncid, xp_varid, my_xp[0]); nc_get_var_double(ncid, yp_varid, my_yp[0]); }
d70845a930ad10c3fd8568c6a275d70115426386.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include <cmath> namespace caffe { // The constant NUM_THREADS should be equal to the value in CCMomentCalc template <typename Dtype> __global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* count ) { const int NUM_THREADS = 512; __shared__ Dtype param [4*NUM_THREADS]; __shared__ unsigned int tcount [2*NUM_THREADS]; unsigned int t = threadIdx.x; unsigned int s = 2 * blockIdx.x * NUM_THREADS; if (s+t < n){ param[t] = fabs(mask[s+t]*wb[s+t]); param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t]; if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1; else tcount[t] = 0; } else{ param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0; } if (s+t+NUM_THREADS < n){ param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]); param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]; if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1; else tcount[t+NUM_THREADS] = 0; } else{ param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0; } __syncthreads(); for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { if (t < stride ){ param[t] += param[t+stride]; param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride]; tcount[t] += tcount[t+stride]; } __syncthreads(); } if (t == 0){ mu [blockIdx.x] = param[0]; std [blockIdx.x] = param[2*NUM_THREADS]; count[blockIdx.x] = tcount[0]; } } // The constant NUM_THREADS should be equal to the value in CCMomentCalc template <typename Dtype> __global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) { const int NUM_THREADS = 512; __shared__ unsigned int tcount [2*NUM_THREADS]; unsigned int t = threadIdx.x; unsigned int s = 2 * blockIdx.x * NUM_THREADS; tcount[t] = 0; if (s+t < n && mask[s+t]!=0){ tcount[t] = 1; } tcount[t+NUM_THREADS] = 0; if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){ tcount[t+NUM_THREADS] = 1; } __syncthreads(); for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { if (t < stride ){ tcount[t] += tcount[t+stride]; } __syncthreads(); } if (t == 0){ count[blockIdx.x] = tcount[0]; } } template <typename Dtype> __global__ void CCMaskCalc(const int n, const Dtype* wb, Dtype* mask, Dtype mu, Dtype std, Dtype r) { CUDA_KERNEL_LOOP(index, n) { if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0))) mask[index] = 0; else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0))) mask[index] = 1; } } template <typename Dtype> __global__ void CCMaskApply(const int n, const Dtype* wb, const Dtype* mask, Dtype* wb_t) { CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; } } template <typename Dtype> void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){ const unsigned int NUM_THREADS = 512; Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g; Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c; int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); hipMalloc(&pmu_g, sizeof(Dtype) * num_p); hipMalloc(&pstd_g, sizeof(Dtype) * num_p); hipMalloc(&pncount_g, sizeof(unsigned int) * num_p); pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype)); pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype)); pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); hipLaunchKernelGGL(( CCMomentCollect<Dtype>), dim3(num_p),dim3(NUM_THREADS), 0, 0, n, wb, mask, pmu_g, pstd_g, pncount_g); CUDA_POST_KERNEL_CHECK; hipMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost); hipMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost); hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, hipMemcpyDeviceToHost); for (int i = 0; i < num_p; i++) { *mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i]; } hipFree(pmu_g);hipFree(pstd_g);hipFree(pncount_g); free(pmu_c);free(pstd_c);free(pncount_c); } template <typename Dtype> void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){ const unsigned int NUM_THREADS = 512; unsigned int* pncount_g; unsigned int* pncount_c; int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); hipMalloc(&pncount_g, sizeof(unsigned int) * num_p); pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); hipLaunchKernelGGL(( CCNzeroCollect<Dtype>), dim3(num_p),dim3(NUM_THREADS), 0, 0, n, mask, pncount_g); CUDA_POST_KERNEL_CHECK; hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, hipMemcpyDeviceToHost); for (int i = 0; i < num_p; i++) { *ncount += pncount_c[i]; } hipFree(pncount_g); free(pncount_c); } template <typename Dtype> __global__ void absdata(const int n, Dtype* mask ) { CUDA_KERNEL_LOOP (index, n) { mask[index] = fabs(mask[index]); } } template <typename Dtype> __global__ void CUnewMaskCalc(const int n, const Dtype* wb, Dtype* mask, Dtype cutLeft, Dtype cutRight) { CUDA_KERNEL_LOOP(index, n) { if (mask[index]==1 && fabs(wb[index])<= max(cutLeft,Dtype(0))) mask[index] = 0; else if (mask[index]==0 && fabs(wb[index])> max(cutRight,Dtype(0))) mask[index] = 1; } } template <typename Dtype> __global__ void CUmaskCombine(const int n, const Dtype* lastMask, Dtype* mask) { CUDA_KERNEL_LOOP(index, n) { if (lastMask[index]== 0 ) mask[index] = 0; } } template <typename Dtype> __global__ void CCMaskAdjust(const int n, Dtype* mask) { CUDA_KERNEL_LOOP(index, n) { if (mask[index] > 0) { mask[index] = 1; } else { mask[index] = 0; } } } template <typename Dtype> int partition(Dtype * data, int p ,int r) { Dtype x = data[r]; Dtype temp; int i = p - 1; for(int j = p; j<r; j++) { if (data[j] <= x) { i = i + 1; temp = data[i]; data[i] = data[j]; data[j] = temp; } } temp = data[i+1]; data[i+1] = data[r]; data[r] = temp; return i+1; } template <typename Dtype> Dtype findMedian(Dtype * data, int p, int r, int i){ if (p == r) { return data[p]; } int q = partition(data, p, r); int k = q - p + 1; if (i == k) { return data[q]; } else if (i < k) { return findMedian(data, p, q - 1, i); } else { return findMedian(data, q+1, r, i-k); } } template <typename Dtype> void CConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weightMask = this->blobs_[2]->mutable_gpu_data(); Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data(); const Dtype* bias = NULL; Dtype* biasMask = NULL; Dtype* biasTmp = NULL; if (this->bias_term_) { bias = this->blobs_[1]->mutable_gpu_data(); biasMask = this->blobs_[3]->mutable_gpu_data(); biasTmp = this->bias_tmp_.mutable_gpu_data(); } hipLaunchKernelGGL(( CCMaskAdjust<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[2]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[2]->count(), weightMask); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { hipLaunchKernelGGL(( CCMaskAdjust<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[3]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[3]->count(), biasMask); CUDA_POST_KERNEL_CHECK; } if (this->phase_ == TRAIN){ // Calculate the mean and standard deviation of learnable parameters if (this->std==0 && this->iter_==0){ /* code */ this->rand_weight_m_.Reshape(this->blobs_[2]->shape()); this->rand_bias_m_.Reshape(this->blobs_[3]->shape()); this->rand_weight_m_.CopyFrom(*(this->blobs_[2])); this->rand_bias_m_.CopyFrom(*(this->blobs_[3])); // this->crate = 3; // if (false) // { Blob<Dtype> wsort(this->blobs_[0]->shape()); Blob<Dtype> bsort(this->blobs_[1]->shape()); wsort.CopyFrom(*(this->blobs_[0])); bsort.CopyFrom(*(this->blobs_[1])); hipLaunchKernelGGL(( absdata<Dtype>), dim3(CAFFE_GET_BLOCKS(wsort.count())),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, wsort.count(), wsort.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( absdata<Dtype>), dim3(CAFFE_GET_BLOCKS(bsort.count())),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bsort.count(), bsort.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; const Dtype* cpuWeightClass = this->blobs_[2]->cpu_data(); const Dtype* cpuBiasClass = this->blobs_[3]->cpu_data(); int paramnums = wsort.count() + bsort.count(); Dtype* allparams = (Dtype*) malloc(paramnums * sizeof(Dtype)); int temcount = 0; for (int i = 0; i < wsort.count(); ++i) { if (cpuWeightClass[i] > 0) { allparams[temcount] = wsort.mutable_cpu_data()[i]; temcount++; } } for (int i = 0; i < bsort.count(); ++i) { if (cpuBiasClass[i] > 0) { allparams[temcount] = bsort.mutable_cpu_data()[i]; temcount++; } } for (int i = temcount; i < paramnums; ++i) { allparams[i] = 0; } std::cout<<"temcount:"<<temcount<<std::endl; std::cout<<"paramnums:"<<paramnums<<std::endl; this->cutLeft = findMedian(allparams, 0, temcount - 1, int(temcount * this->crate)); this->cutRight = findMedian(allparams, 0, temcount - 1, int(temcount * (this->crate + 0.05))); free(allparams); std::cout<<"cutLeft:"<<this->cutLeft<<std::endl; std::cout<<"cutRight:"<<this->cutRight<<std::endl; // } // unsigned int ncount = 0; // CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount); // if (this->bias_term_) { // CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount); // } // this->mu /= ncount; this->std -= ncount*mu*mu; // this->std /= ncount; this->std = sqrt(std); // LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n"; } // Demonstrate the sparsity of compressed convolutional layer /********************************************************/ if(this->iter_%100==0){ unsigned int wNoneZero = 0; unsigned int wAll = 0; unsigned int bNoneZero = 0; unsigned int bAll = 0; CCNZeroCalc(this->blobs_[0]->count(), weightMask, &wNoneZero); wAll = this->blobs_[0]->count(); if (this->bias_term_) { CCNZeroCalc(this->blobs_[1]->count(), biasMask, &bNoneZero); bAll = this->blobs_[1]->count(); } //LOG(INFO)<<ncount<<"\n"; LOG(INFO)<<wNoneZero<<" "<<wAll<<" "<<bNoneZero<<" "<<bAll<<"\n"; } /********************************************************/ // Calculate the weight mask and bias mask with probability Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX); if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) { // CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), // CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, this->mu, this->std, this->crate); // CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( CUnewMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight, weightMask, this->cutLeft, this->cutRight); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { // CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()), // CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, this->mu, this->std, this->crate); // CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( CUnewMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias, biasMask, this->cutLeft, this->cutRight); CUDA_POST_KERNEL_CHECK; } } hipLaunchKernelGGL(( CUmaskCombine<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), this->rand_weight_m_.gpu_data(), weightMask); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( CUmaskCombine<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), this->rand_bias_m_.gpu_data(), biasMask); CUDA_POST_KERNEL_CHECK; } // Calculate the current (masked) weight and bias hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[0]->count(), weight, weightMask, weightTmp); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(), bias, biasMask, biasTmp); CUDA_POST_KERNEL_CHECK; } // Forward calculation with (masked) weight and bias for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp, top_data + top[i]->offset(n)); if (this->bias_term_) { this->forward_gpu_bias(top_data + top[i]->offset(n), biasTmp); } } } this->iter_++; } template <typename Dtype> void CConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weightTmp = this->weight_tmp_.gpu_data(); const Dtype* weightMask = this->blobs_[2]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { const Dtype* biasMask = this->blobs_[3]->gpu_data(); Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[3]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[3]->count(), bias_diff, biasMask, bias_diff); CUDA_POST_KERNEL_CHECK; for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[2]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[2]->count(), weight_diff, weightMask, weight_diff); CUDA_POST_KERNEL_CHECK; for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n), top_diff + top[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_gpu_gemm(top_diff + top[i]->offset(n), weightTmp, bottom_diff + bottom[i]->offset(n)); } } } } } INSTANTIATE_LAYER_GPU_FUNCS(CConvolutionLayer); } // namespace caffe
d70845a930ad10c3fd8568c6a275d70115426386.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include <cmath> namespace caffe { // The constant NUM_THREADS should be equal to the value in CCMomentCalc template <typename Dtype> __global__ void CCMomentCollect(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* count ) { const int NUM_THREADS = 512; __shared__ Dtype param [4*NUM_THREADS]; __shared__ unsigned int tcount [2*NUM_THREADS]; unsigned int t = threadIdx.x; unsigned int s = 2 * blockIdx.x * NUM_THREADS; if (s+t < n){ param[t] = fabs(mask[s+t]*wb[s+t]); param[t+2*NUM_THREADS] = mask[s+t]*wb[s+t]*wb[s+t]; if(mask[s+t]*wb[s+t]!=0) tcount[t] = 1; else tcount[t] = 0; } else{ param[t] = 0;param[t+2*NUM_THREADS] = 0;tcount[t] = 0; } if (s+t+NUM_THREADS < n){ param[t+NUM_THREADS] = fabs(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]); param[t+3*NUM_THREADS] = mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]; if(mask[s+t+NUM_THREADS]*wb[s+t+NUM_THREADS]!=0) tcount[t+NUM_THREADS] = 1; else tcount[t+NUM_THREADS] = 0; } else{ param[t+NUM_THREADS] = 0;param[t+3*NUM_THREADS] = 0;tcount[t+NUM_THREADS] = 0; } __syncthreads(); for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { if (t < stride ){ param[t] += param[t+stride]; param[t+2*NUM_THREADS] += param[t+2*NUM_THREADS+stride]; tcount[t] += tcount[t+stride]; } __syncthreads(); } if (t == 0){ mu [blockIdx.x] = param[0]; std [blockIdx.x] = param[2*NUM_THREADS]; count[blockIdx.x] = tcount[0]; } } // The constant NUM_THREADS should be equal to the value in CCMomentCalc template <typename Dtype> __global__ void CCNzeroCollect(const int n, const Dtype* mask, unsigned int* count ) { const int NUM_THREADS = 512; __shared__ unsigned int tcount [2*NUM_THREADS]; unsigned int t = threadIdx.x; unsigned int s = 2 * blockIdx.x * NUM_THREADS; tcount[t] = 0; if (s+t < n && mask[s+t]!=0){ tcount[t] = 1; } tcount[t+NUM_THREADS] = 0; if (s+t+NUM_THREADS < n && mask[s+t+NUM_THREADS]!=0){ tcount[t+NUM_THREADS] = 1; } __syncthreads(); for(unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) { if (t < stride ){ tcount[t] += tcount[t+stride]; } __syncthreads(); } if (t == 0){ count[blockIdx.x] = tcount[0]; } } template <typename Dtype> __global__ void CCMaskCalc(const int n, const Dtype* wb, Dtype* mask, Dtype mu, Dtype std, Dtype r) { CUDA_KERNEL_LOOP(index, n) { if (mask[index]==1 && fabs(wb[index])<=0.9*max(mu+r*std,Dtype(0))) mask[index] = 0; else if (mask[index]==0 && fabs(wb[index])>1.1*max(mu+r*std,Dtype(0))) mask[index] = 1; } } template <typename Dtype> __global__ void CCMaskApply(const int n, const Dtype* wb, const Dtype* mask, Dtype* wb_t) { CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; } } template <typename Dtype> void CCMomentCalc(const int n, const Dtype* wb, const Dtype* mask, Dtype* mu, Dtype* std, unsigned int* ncount){ const unsigned int NUM_THREADS = 512; Dtype* pmu_g; Dtype* pstd_g; unsigned int* pncount_g; Dtype* pmu_c; Dtype* pstd_c; unsigned int* pncount_c; int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); cudaMalloc(&pmu_g, sizeof(Dtype) * num_p); cudaMalloc(&pstd_g, sizeof(Dtype) * num_p); cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p); pmu_c = (Dtype*) malloc(num_p * sizeof(Dtype)); pstd_c = (Dtype*) malloc(num_p * sizeof(Dtype)); pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); CCMomentCollect<Dtype><<<num_p,NUM_THREADS>>>(n, wb, mask, pmu_g, pstd_g, pncount_g); CUDA_POST_KERNEL_CHECK; cudaMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost); cudaMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost); cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost); for (int i = 0; i < num_p; i++) { *mu += pmu_c[i];*std += pstd_c[i];*ncount += pncount_c[i]; } cudaFree(pmu_g);cudaFree(pstd_g);cudaFree(pncount_g); free(pmu_c);free(pstd_c);free(pncount_c); } template <typename Dtype> void CCNZeroCalc(const int n, const Dtype* mask, unsigned int* ncount ){ const unsigned int NUM_THREADS = 512; unsigned int* pncount_g; unsigned int* pncount_c; int num_p = (n+(NUM_THREADS<<1)-1)/(NUM_THREADS<<1); cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p); pncount_c = (unsigned int*) malloc(num_p * sizeof(unsigned int)); CCNzeroCollect<Dtype><<<num_p,NUM_THREADS>>>(n, mask, pncount_g); CUDA_POST_KERNEL_CHECK; cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p, cudaMemcpyDeviceToHost); for (int i = 0; i < num_p; i++) { *ncount += pncount_c[i]; } cudaFree(pncount_g); free(pncount_c); } template <typename Dtype> __global__ void absdata(const int n, Dtype* mask ) { CUDA_KERNEL_LOOP (index, n) { mask[index] = fabs(mask[index]); } } template <typename Dtype> __global__ void CUnewMaskCalc(const int n, const Dtype* wb, Dtype* mask, Dtype cutLeft, Dtype cutRight) { CUDA_KERNEL_LOOP(index, n) { if (mask[index]==1 && fabs(wb[index])<= max(cutLeft,Dtype(0))) mask[index] = 0; else if (mask[index]==0 && fabs(wb[index])> max(cutRight,Dtype(0))) mask[index] = 1; } } template <typename Dtype> __global__ void CUmaskCombine(const int n, const Dtype* lastMask, Dtype* mask) { CUDA_KERNEL_LOOP(index, n) { if (lastMask[index]== 0 ) mask[index] = 0; } } template <typename Dtype> __global__ void CCMaskAdjust(const int n, Dtype* mask) { CUDA_KERNEL_LOOP(index, n) { if (mask[index] > 0) { mask[index] = 1; } else { mask[index] = 0; } } } template <typename Dtype> int partition(Dtype * data, int p ,int r) { Dtype x = data[r]; Dtype temp; int i = p - 1; for(int j = p; j<r; j++) { if (data[j] <= x) { i = i + 1; temp = data[i]; data[i] = data[j]; data[j] = temp; } } temp = data[i+1]; data[i+1] = data[r]; data[r] = temp; return i+1; } template <typename Dtype> Dtype findMedian(Dtype * data, int p, int r, int i){ if (p == r) { return data[p]; } int q = partition(data, p, r); int k = q - p + 1; if (i == k) { return data[q]; } else if (i < k) { return findMedian(data, p, q - 1, i); } else { return findMedian(data, q+1, r, i-k); } } template <typename Dtype> void CConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weightMask = this->blobs_[2]->mutable_gpu_data(); Dtype* weightTmp = this->weight_tmp_.mutable_gpu_data(); const Dtype* bias = NULL; Dtype* biasMask = NULL; Dtype* biasTmp = NULL; if (this->bias_term_) { bias = this->blobs_[1]->mutable_gpu_data(); biasMask = this->blobs_[3]->mutable_gpu_data(); biasTmp = this->bias_tmp_.mutable_gpu_data(); } CCMaskAdjust<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[2]->count()), CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[2]->count(), weightMask); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { CCMaskAdjust<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()), CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[3]->count(), biasMask); CUDA_POST_KERNEL_CHECK; } if (this->phase_ == TRAIN){ // Calculate the mean and standard deviation of learnable parameters if (this->std==0 && this->iter_==0){ /* code */ this->rand_weight_m_.Reshape(this->blobs_[2]->shape()); this->rand_bias_m_.Reshape(this->blobs_[3]->shape()); this->rand_weight_m_.CopyFrom(*(this->blobs_[2])); this->rand_bias_m_.CopyFrom(*(this->blobs_[3])); // this->crate = 3; // if (false) // { Blob<Dtype> wsort(this->blobs_[0]->shape()); Blob<Dtype> bsort(this->blobs_[1]->shape()); wsort.CopyFrom(*(this->blobs_[0])); bsort.CopyFrom(*(this->blobs_[1])); absdata<Dtype><<<CAFFE_GET_BLOCKS(wsort.count()),CAFFE_CUDA_NUM_THREADS>>>( wsort.count(), wsort.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; absdata<Dtype><<<CAFFE_GET_BLOCKS(bsort.count()),CAFFE_CUDA_NUM_THREADS>>>( bsort.count(), bsort.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; const Dtype* cpuWeightClass = this->blobs_[2]->cpu_data(); const Dtype* cpuBiasClass = this->blobs_[3]->cpu_data(); int paramnums = wsort.count() + bsort.count(); Dtype* allparams = (Dtype*) malloc(paramnums * sizeof(Dtype)); int temcount = 0; for (int i = 0; i < wsort.count(); ++i) { if (cpuWeightClass[i] > 0) { allparams[temcount] = wsort.mutable_cpu_data()[i]; temcount++; } } for (int i = 0; i < bsort.count(); ++i) { if (cpuBiasClass[i] > 0) { allparams[temcount] = bsort.mutable_cpu_data()[i]; temcount++; } } for (int i = temcount; i < paramnums; ++i) { allparams[i] = 0; } std::cout<<"temcount:"<<temcount<<std::endl; std::cout<<"paramnums:"<<paramnums<<std::endl; this->cutLeft = findMedian(allparams, 0, temcount - 1, int(temcount * this->crate)); this->cutRight = findMedian(allparams, 0, temcount - 1, int(temcount * (this->crate + 0.05))); free(allparams); std::cout<<"cutLeft:"<<this->cutLeft<<std::endl; std::cout<<"cutRight:"<<this->cutRight<<std::endl; // } // unsigned int ncount = 0; // CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu, &std, &ncount); // if (this->bias_term_) { // CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu, &std, &ncount); // } // this->mu /= ncount; this->std -= ncount*mu*mu; // this->std /= ncount; this->std = sqrt(std); // LOG(INFO)<<mu<<" "<<std<<" "<<ncount<<"\n"; } // Demonstrate the sparsity of compressed convolutional layer /********************************************************/ if(this->iter_%100==0){ unsigned int wNoneZero = 0; unsigned int wAll = 0; unsigned int bNoneZero = 0; unsigned int bAll = 0; CCNZeroCalc(this->blobs_[0]->count(), weightMask, &wNoneZero); wAll = this->blobs_[0]->count(); if (this->bias_term_) { CCNZeroCalc(this->blobs_[1]->count(), biasMask, &bNoneZero); bAll = this->blobs_[1]->count(); } //LOG(INFO)<<ncount<<"\n"; LOG(INFO)<<wNoneZero<<" "<<wAll<<" "<<bNoneZero<<" "<<bAll<<"\n"; } /********************************************************/ // Calculate the weight mask and bias mask with probability Dtype r = static_cast<Dtype>(rand())/static_cast<Dtype>(RAND_MAX); if (pow(1+(this->gamma)*(this->iter_),-(this->power))>r && (this->iter_)<(this->iter_stop_)) { // CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), // CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, this->mu, this->std, this->crate); // CUDA_POST_KERNEL_CHECK; CUnewMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, this->cutLeft, this->cutRight); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { // CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()), // CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, this->mu, this->std, this->crate); // CUDA_POST_KERNEL_CHECK; CUnewMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, this->cutLeft, this->cutRight); CUDA_POST_KERNEL_CHECK; } } CUmaskCombine<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), this->rand_weight_m_.gpu_data(), weightMask); CUDA_POST_KERNEL_CHECK; CUmaskCombine<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), this->rand_bias_m_.gpu_data(), biasMask); CUDA_POST_KERNEL_CHECK; } // Calculate the current (masked) weight and bias CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[0]->count(), weight, weightMask, weightTmp); CUDA_POST_KERNEL_CHECK; if (this->bias_term_) { CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[1]->count(), bias, biasMask, biasTmp); CUDA_POST_KERNEL_CHECK; } // Forward calculation with (masked) weight and bias for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_gpu_gemm(bottom_data + bottom[i]->offset(n), weightTmp, top_data + top[i]->offset(n)); if (this->bias_term_) { this->forward_gpu_bias(top_data + top[i]->offset(n), biasTmp); } } } this->iter_++; } template <typename Dtype> void CConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weightTmp = this->weight_tmp_.gpu_data(); const Dtype* weightMask = this->blobs_[2]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { const Dtype* biasMask = this->blobs_[3]->gpu_data(); Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff(); CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[3]->count(), bias_diff, biasMask, bias_diff); CUDA_POST_KERNEL_CHECK; for (int n = 0; n < this->num_; ++n) { this->backward_gpu_bias(bias_diff, top_diff + top[i]->offset(n)); } } if (this->param_propagate_down_[0] || propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[2]->count()), CAFFE_CUDA_NUM_THREADS>>>( this->blobs_[2]->count(), weight_diff, weightMask, weight_diff); CUDA_POST_KERNEL_CHECK; for (int n = 0; n < this->num_; ++n) { // gradient w.r.t. weight. Note that we will accumulate diffs. if (this->param_propagate_down_[0]) { this->weight_gpu_gemm(bottom_data + bottom[i]->offset(n), top_diff + top[i]->offset(n), weight_diff); } // gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_gpu_gemm(top_diff + top[i]->offset(n), weightTmp, bottom_diff + bottom[i]->offset(n)); } } } } } INSTANTIATE_LAYER_GPU_FUNCS(CConvolutionLayer); } // namespace caffe
50355aa151fb4a91d27f6de464cbca069bf78e57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<hip/hip_runtime_api.h> #include<handler_cuda_error.h> #include<Common.h> #include <rocblas.h> #include<stdio.h> #include"dfp_cu.h" __device__ double newton_solver(LogCost * cost, double *tt,double *rho, double *x){ double er = 1; double tol=1e-12; int iter=0; double a; double xn; double grad, hess; while (er>tol){ grad = cost->grad(x,tt,rho); hess = cost->hess(x,tt,rho); a = grad/hess; xn= *x - a ; er = abs( a / *x); *x=xn; iter++; } return *x; } __global__ void my_newton(double *res, double *tt, double *rho,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; double x0 = 1e-5; if (x>=n) return; LogCost cost; res[x] = newton_solver( &cost, &tt[x], rho, &x0); return ; } __device__ double dfp_solver(Cost * cost, double *tt,double *rho, double *x0,double *w_last,double *C){ int maxk=100; double rho0=0.55; const double sigma=0.4; double epsilon=1e-10; int k=0; double Hk=1.0; double x= *x0; double gk = 0.0; double dk = 0.0; while(k < maxk){ gk = cost->grad(x0,w_last,tt,rho,C); // printf("gk = %.4f\n",gk); // printf(" k = %d\n",k); if( (gk*gk) < epsilon ) break; dk = -Hk * gk; int m=0; int mk=0; while( m < 15){ double p = pow(rho0,m); double temp = (*x0) + p * dk; double temp1 = cost->cost(&temp,w_last,tt,rho,C); double temp2 = cost->cost(x0,w_last,tt,rho,C) + sigma * p * gk * dk; if( temp1 < temp2){ mk=m;break; } m++; } double p = pow(rho0,mk); x = *x0 + p * dk; double sk= x - *x0; double yk=cost->grad(&x,w_last,tt,rho,C) - gk; Hk = sk / (yk); k++; *x0 = x; } return x; } __global__ void dfp_choose(double *res,int indss_s,int indss_e, int indsb_s,int indsb_e, int indsm_s, int indsm_e, double *tt, double *rho, double *lb,double *ub, Account * account,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; double x0 = 0.0; if (x>=n) return; if ((x < indss_e) && ( x>= indss_s) ){ Ss_cost cost_ss; res[x] = dfp_solver( &cost_ss, &tt[x], rho, &x0, &account->w_last[x], &account->C); if(res[x] < lb[x]) res[x] = lb[x]; if(res[x] > ub[x]) res[x] = ub[x]; return ; } if ( (x < indsb_e) && (x >= indsb_s)){ Sb_cost cost_sb; res[x] = dfp_solver( &cost_sb, &tt[x], rho, &x0, &account->w_last[x], &account->C); if(res[x] < lb[x]) res[x] = lb[x]; if(res[x] > ub[x]) res[x] = ub[x]; return ; } if ( (x < indsm_e) && (x >= indsm_s)){ Sm_cost cost_sm; res[x] = dfp_solver( &cost_sm, &tt[x], rho, &x0, &account->w_last[x], &account->C); if(res[x] < lb[x]) res[x] = lb[x]; if(res[x] > ub[x]) res[x] = ub[x]; return; } return ; } __global__ void callogcost(double *res,double *x_cu,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x>=n) return; res[x] = log(x_cu[x]); } __global__ void calcost(double *res,double *x_cu,int indss_s,int indss_e, int indsb_s,int indsb_e, int indsm_s, int indsm_e, Account * account,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x>=n) return; res[x] = 0.0; if ((x < indss_e) && ( x>= indss_s) ){ double temp = pow(( account->C * ( x_cu[x] - account->w_last[x] )+142.1)/1004.0,2.0); res[x] = 0.01477 * exp(-temp) * max(( x_cu[x] - account->w_last[x] ),0.0); return ; } if ( (x < indsb_e) && (x >= indsb_s)){ double temp = pow(( account->C * ( x_cu[x] - account->w_last[x] )+1621.0)/1627.0,2.0); res[x] = 0.02079 * exp(-temp) * max(( x_cu[x] - account->w_last[x] ),0.0); return ; } if ( (x < indsm_e) && (x >= indsm_s)){ double temp = pow(( account->C * ( x_cu[x] - account->w_last[x] )+198.9)/648.0, 2.0); res[x] = 0.02079 * exp(-temp) * max(( x_cu[x] - account->w_last[x] ),0.0); return; } return ; }
50355aa151fb4a91d27f6de464cbca069bf78e57.cu
#include<cuda_runtime_api.h> #include<handler_cuda_error.h> #include<Common.h> #include <cublas_v2.h> #include<stdio.h> #include"dfp_cu.h" __device__ double newton_solver(LogCost * cost, double *tt,double *rho, double *x){ double er = 1; double tol=1e-12; int iter=0; double a; double xn; double grad, hess; while (er>tol){ grad = cost->grad(x,tt,rho); hess = cost->hess(x,tt,rho); a = grad/hess; xn= *x - a ; er = abs( a / *x); *x=xn; iter++; } return *x; } __global__ void my_newton(double *res, double *tt, double *rho,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; double x0 = 1e-5; if (x>=n) return; LogCost cost; res[x] = newton_solver( &cost, &tt[x], rho, &x0); return ; } __device__ double dfp_solver(Cost * cost, double *tt,double *rho, double *x0,double *w_last,double *C){ int maxk=100; double rho0=0.55; const double sigma=0.4; double epsilon=1e-10; int k=0; double Hk=1.0; double x= *x0; double gk = 0.0; double dk = 0.0; while(k < maxk){ gk = cost->grad(x0,w_last,tt,rho,C); // printf("gk = %.4f\n",gk); // printf(" k = %d\n",k); if( (gk*gk) < epsilon ) break; dk = -Hk * gk; int m=0; int mk=0; while( m < 15){ double p = pow(rho0,m); double temp = (*x0) + p * dk; double temp1 = cost->cost(&temp,w_last,tt,rho,C); double temp2 = cost->cost(x0,w_last,tt,rho,C) + sigma * p * gk * dk; if( temp1 < temp2){ mk=m;break; } m++; } double p = pow(rho0,mk); x = *x0 + p * dk; double sk= x - *x0; double yk=cost->grad(&x,w_last,tt,rho,C) - gk; Hk = sk / (yk); k++; *x0 = x; } return x; } __global__ void dfp_choose(double *res,int indss_s,int indss_e, int indsb_s,int indsb_e, int indsm_s, int indsm_e, double *tt, double *rho, double *lb,double *ub, Account * account,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; double x0 = 0.0; if (x>=n) return; if ((x < indss_e) && ( x>= indss_s) ){ Ss_cost cost_ss; res[x] = dfp_solver( &cost_ss, &tt[x], rho, &x0, &account->w_last[x], &account->C); if(res[x] < lb[x]) res[x] = lb[x]; if(res[x] > ub[x]) res[x] = ub[x]; return ; } if ( (x < indsb_e) && (x >= indsb_s)){ Sb_cost cost_sb; res[x] = dfp_solver( &cost_sb, &tt[x], rho, &x0, &account->w_last[x], &account->C); if(res[x] < lb[x]) res[x] = lb[x]; if(res[x] > ub[x]) res[x] = ub[x]; return ; } if ( (x < indsm_e) && (x >= indsm_s)){ Sm_cost cost_sm; res[x] = dfp_solver( &cost_sm, &tt[x], rho, &x0, &account->w_last[x], &account->C); if(res[x] < lb[x]) res[x] = lb[x]; if(res[x] > ub[x]) res[x] = ub[x]; return; } return ; } __global__ void callogcost(double *res,double *x_cu,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x>=n) return; res[x] = log(x_cu[x]); } __global__ void calcost(double *res,double *x_cu,int indss_s,int indss_e, int indsb_s,int indsb_e, int indsm_s, int indsm_e, Account * account,int n){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x>=n) return; res[x] = 0.0; if ((x < indss_e) && ( x>= indss_s) ){ double temp = pow(( account->C * ( x_cu[x] - account->w_last[x] )+142.1)/1004.0,2.0); res[x] = 0.01477 * exp(-temp) * max(( x_cu[x] - account->w_last[x] ),0.0); return ; } if ( (x < indsb_e) && (x >= indsb_s)){ double temp = pow(( account->C * ( x_cu[x] - account->w_last[x] )+1621.0)/1627.0,2.0); res[x] = 0.02079 * exp(-temp) * max(( x_cu[x] - account->w_last[x] ),0.0); return ; } if ( (x < indsm_e) && (x >= indsm_s)){ double temp = pow(( account->C * ( x_cu[x] - account->w_last[x] )+198.9)/648.0, 2.0); res[x] = 0.02079 * exp(-temp) * max(( x_cu[x] - account->w_last[x] ),0.0); return; } return ; }
7121be29cd02d1bcb47b91cddf6bcaa79987baa4.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ //KC_FP_TYPE can be assumed to mean "double", but originally //this definition could also work with "float" for faster speed. //float compatability is no longer supported in this function. #include "kcArrayFunctions.h" #define MAX_P 1e25 #define MIN_P 1e-25 __device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) { //return a; if(isinf(a)) return MAX_P; else return fmin(fmax(a,MIN_P),MAX_P); } __device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt) { KC_FP_TYPE ex = KC_MAX(KC_MIN(KC_EXP(gamma*z),KC_MAXN),KC_MINN); return ex*dt; } __device__ KC_FP_TYPE hinv(KC_FP_TYPE y, KC_FP_TYPE gamma, KC_FP_TYPE dt) { return log(y/dt)/gamma; } //one thread per particle <<< nTrials,nParticles >>> __global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; KC_FP_TYPE cb = b[betaIdxVector[row]]; KC_FP_TYPE sw = sqrt(w); KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb); KC_FP_TYPE mu = mup; KC_FP_TYPE sig2 = sigMult*w; KC_FP_TYPE sig = sqrt(sig2); KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 )); pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu); posc[pidx] = pos[idx]; KC_FP_TYPE dpos = pos[idx]-mu; KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos); //to be stored for each particle: ncdf, lw, lw2 ncdf[idx] = normcdf((1-mup)/sw); KC_FP_TYPE dposp = pos[idx]-mup; KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp); log_li[pidx] = -h(pos[idx],g,dt)+y[row]*(log(fmax(h(pos[idx],g,1.0),1e-30))+log(dt))-lgamma(y[row]+1); KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) ); lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k); lw2[pidx] = exp(pw+log_p -log_pi_k); //safety checks for numerical errors if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) { lw[pidx] = 0; lw2[pidx] = 0; pos[idx] = mup; posc[pidx] = mup; } } } } //one thread per trial <<< nTrials,1 >>> __global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; //sum up and normalize weights KC_FP_TYPE weightSum = 0; KC_FP_TYPE weightSum2 = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; weightSum += lw[pidx]; weightSum2 += lw2[pidx]; } KC_FP_TYPE n_eff_den = 0; weightSum = fmax(weightSum,1e-20); weightSum2 = fmax(weightSum2,1e-20); for(int p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt[idx] = lw[pidx] /weightSum; wt_p[pidx] = lw2[pidx]/weightSum2; n_eff_den += wt[idx]*wt[idx]; cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling } nEff[tr_num] = 1/n_eff_den; } } } //initial calculation - probability of each spike count coming from a rate at the bound __global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < TT) { lg[idx] = exp( -h(1,g, dt) + y[idx]*log(fmax(h(1,g,dt),1e-30)) - lgamma(y[idx]+1)); } } //one thread per particle <<< nTrials,nParticles >>> // if particles look bad, resamples them from the distribution before the next step __global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int pidx = tr_num*numParticles+p_num; int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx_new = pidx; if(nEff[tr_num] < minEffParticles) { int p_num_new; for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) { //everything taken care of in loop statement } pidx_new = tr_num*numParticles+p_num_new; wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again pos[idx] = posc[pidx_new]; } KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]); p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old; p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new? p_cgt_0b[pidx] = ncdf[idx]*wt_old; } } } //one thread per trial <<< nTrials,1 >>> //move bound crossing probabilities forward in time __global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; KC_FP_TYPE p_cet_s = 0; KC_FP_TYPE p_cgt_sa = 0; KC_FP_TYPE p_cgt_sb = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; //int idx = TT*p_num + row; p_cet_s += p_cet_0[pidx]; p_cgt_sa += p_cgt_0a[pidx]; p_cgt_sb += p_cgt_0b[pidx]; //finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial if(nEff[tr_num] < minEffParticles && t-1==trLength) { cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num); } } KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1])); KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1])); KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old; KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s; KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb; p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1); p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later? p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code } } } //Finally do that backwards sampling, <<< NT, 1 >>> __global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; int row = trIdx[tr_num] + t; if(t == trLength-1) { //if t=end of trial, start off the backwards sampling crossingTimes[tr_num] = trLength; //decide whether end trial has hit boundary if(randUb[tr_num] < p_clte[row]) { sample[row] = 1; crossingTimes[tr_num] = t; } //else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles) else { int p_num; for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) { } int idx = TT*p_num + row; sample[row] = pos[idx]; } } else if(t < trLength-1 && t >= 0) { //else, propagate backwards //if previous sample had hit threshold if(sample[row+1] >= 1) { //if boundary already reached if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) { crossingTimes[tr_num] = t; sample[row] = 1; } //gets pre-crossing particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } //else, samples a particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 )); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } } } /* Performs a forward sweep of the path after backwards sampling Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters) Calculates som statistics for later sampling trial number given by CUDA thread */ __global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) { int tr_num = blockIdx.x*blockDim.x+threadIdx.x; if(tr_num < NT) { int t_0 = trIdx[tr_num]; beta_sum[tr_num] = 0; int trLength = trIdx[tr_num+1] - trIdx[tr_num]; KC_FP_TYPE cb = b[betaIndVec[t_0]]; for(int t = 0; t < trLength; t++) { if(t == crossingTimes[tr_num]) { //samples the first value of lambda to cross the bound (truncated normal, > 1) KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0; KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w)); if(minS >= 1.0-1e-5) { lambda[t_0 + t] = 1; } else { lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]); } } else if(t > crossingTimes[tr_num]) { lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]); } beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates } } } //single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0 __global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx == 0) { for(int trNum = 0; trNum < NT; trNum++) { int t_0 = trIdx[trNum]; int cb = betaIndVec[t_0]; int trLength = trIdx[trNum+1] - trIdx[trNum]; sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w; sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w; muVec[cb] += beta_sum[trNum]/w; muVec[numBetas] += lambda[t_0]/w; } } } //Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters //args // 0 = new lambda (output, should be pre-allocated on GPU, same size as y) // 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials) // 2 = y (observations) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 5 = betas (the beta values) // 6 = w (variance of diffusion process) // 7 = l_0 (starting lambda value) // 8 = g (absorbing boundary effective height) // 9 = dt (bin/timestep size) // 10 = numParticles // 11 = minEffParticles (how many effective particles per trial to keep around) // 12 = sigMult (used for particle proposals, proposal variance is sigMult*w) // 13 = maxTrialLength // 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0) // 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipError_t ce; hiprandStatus_t cre; /*ce = hipSetDevice(KC_GPU_DEVICE); if(ce != hipSuccess) { mexPrintf("Error initializing device (kcParticleFilterProp.cu) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); }*/ //init data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]); int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; int * betaIdxVector = kcGetArrayDataInt(prhs[4]); KC_FP_TYPE * b = mxGetPr(prhs[5]); int numBetas = mxGetNumberOfElements(prhs[5]); KC_FP_TYPE * b_gpu; ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != hipSuccess) { mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } ce = hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice); if(ce != hipSuccess) { mexPrintf("Error moving betas to GPU (particle filter) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE w = mxGetScalar(prhs[6]); KC_FP_TYPE l_0 = mxGetScalar(prhs[7]); KC_FP_TYPE g = mxGetScalar(prhs[8]); KC_FP_TYPE dt = mxGetScalar(prhs[9]); int numParticles = mxGetScalar(prhs[10]); int minEffParticles = mxGetScalar(prhs[11]); int sigMult = mxGetScalar(prhs[12]); int maxTrialLength = mxGetScalar(prhs[13]); //particle weights/probabilities of hitting the bound KC_FP_TYPE * p_clte; KC_FP_TYPE * p_cet; KC_FP_TYPE * p_cgt; KC_FP_TYPE * p_clt; KC_FP_TYPE * p_cpr; checkCudaErrors(hipMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * wt; KC_FP_TYPE * wt_p; KC_FP_TYPE * pos;//particle positions checkCudaErrors(hipMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE))); ce = hipMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating pos "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE * log_li; KC_FP_TYPE * posc; //for resampling KC_FP_TYPE * lw; //unnormalized weights KC_FP_TYPE * lw2; KC_FP_TYPE * ncdf; KC_FP_TYPE * p_cet_0; KC_FP_TYPE * p_cgt_0a; KC_FP_TYPE * p_cgt_0b; KC_FP_TYPE * lg; //log p(y|at boundary) KC_FP_TYPE * cumsum; KC_FP_TYPE * beta_sum; checkCudaErrors(hipMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE))); //checkCudaErrors(hipMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE))); checkCudaErrors(hipMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * nEff; checkCudaErrors(hipMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE))); int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1); int randSizeS = (NT) + (NT%2==0?0:1); int randSizeT = (TT) + (TT%2==0?0:1); KC_FP_TYPE * randN; KC_FP_TYPE * randNs; KC_FP_TYPE * randTs; ce = hipMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating randN "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = hipMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating randNs "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = hipMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE)); if(ce != hipSuccess) { mexPrintf("Error allocating randTs "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } //setup the random number generator hiprandGenerator_t curandGen = 0; hiprandStatus_t hiprandStatus_t; hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error initializing random number generator (%d).\n",(int)hiprandStatus_t); mexErrMsgTxt(buffer); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed); //hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL)); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number seed (%d).\n",(int)hiprandStatus_t); mexErrMsgTxt(buffer); } hiprandStatus_t = hiprandGenerateSeeds(curandGen); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number generating seed (%d).\n",(int)hiprandStatus_t); mexErrMsgTxt(buffer); } //hipThreadSetLimit(hipLimitStackSize, 1024); //setup initial particle positions int blockSize , nBlocks; int blockSizeT, nBlocksT; int blockSizeN, nBlocksN; blockSizeT = 4; nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1); blockSizeN = 1; nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error before kcSetupLG "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) { hipLaunchKernelGGL(( kcSetupLG) , dim3(nBlocksT), dim3(blockSizeT) , 0, 0, y,lg,g,dt,TT); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } blockSize = 8; int totalThreads = numParticles*NT; nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1); //mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN); //forward pass loop for (int ii = 0; ii < maxTrialLength;ii++) { //move all particles foward cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles ce = hipDeviceSynchronize(); if(ce != hipSuccess) { int currDev; hipGetDevice(&currDev); mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } hipLaunchKernelGGL(( kcMoveParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { int currDev; hipGetDevice(&currDev); mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //normalize weights hipLaunchKernelGGL(( kcNormalizeWeights) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //check effective num particles, resample when necessary cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } hipLaunchKernelGGL(( kcResampleParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //move passage density foward //__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) { hipLaunchKernelGGL(( kcPropogateBoundaryDensity) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } //backwards sample the particles for (int jj = maxTrialLength-1; jj >= 0; jj--) { cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS); //ce = hipDeviceSynchronize(); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } hipLaunchKernelGGL(( kcBackwardsSample) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT); //ce = hipDeviceSynchronize(); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in final sampler (2). "); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //samples all latent variables beyond bound hit time hipLaunchKernelGGL(( kcForwardFinalPass) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error after kcForwardFinalPass "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //gets some statistics about the latent variables put together to be able to sample the drift rates KC_FP_TYPE * sampling_c; KC_FP_TYPE * sampling_p; checkCudaErrors(hipMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1))); checkCudaErrors(hipMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1))); checkCudaErrors(hipMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kcAssembleSamplingStatistics), dim3(1),dim3(1), 0, 0, sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyDeviceToHost)); //free up memory cre = hiprandDestroyGenerator(curandGen); if(cre != HIPRAND_STATUS_SUCCESS) { mexPrintf("Error destroying rand generator (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(hipFree(b_gpu)); checkCudaErrors(hipFree(p_clte)); checkCudaErrors(hipFree(p_cet)); checkCudaErrors(hipFree(p_cgt)); checkCudaErrors(hipFree(p_clt)); checkCudaErrors(hipFree(p_cpr)); checkCudaErrors(hipFree(pos)); checkCudaErrors(hipFree(wt)); ce = hipFree(wt_p); if(ce != hipSuccess) { mexPrintf("Error freeing memory in particle filter (wt_p) "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(hipFree(log_li)); checkCudaErrors(hipFree(posc)); checkCudaErrors(hipFree(lw)); checkCudaErrors(hipFree(lw2)); checkCudaErrors(hipFree(ncdf)); checkCudaErrors(hipFree(p_cet_0)); checkCudaErrors(hipFree(p_cgt_0a)); checkCudaErrors(hipFree(p_cgt_0b)); checkCudaErrors(hipFree(lg)); checkCudaErrors(hipFree(cumsum)); checkCudaErrors(hipFree(beta_sum)); checkCudaErrors(hipFree(sampling_c)); checkCudaErrors(hipFree(sampling_p)); checkCudaErrors(hipFree(nEff)); checkCudaErrors(hipFree(randN)); checkCudaErrors(hipFree(randNs)); checkCudaErrors(hipFree(randTs)); ce = hipDeviceSynchronize(); if(ce != hipSuccess) { mexPrintf("Error at the end ofthe particle filter "); mexPrintf(hipGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } }
7121be29cd02d1bcb47b91cddf6bcaa79987baa4.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ //KC_FP_TYPE can be assumed to mean "double", but originally //this definition could also work with "float" for faster speed. //float compatability is no longer supported in this function. #include "kcArrayFunctions.h" #define MAX_P 1e25 #define MIN_P 1e-25 __device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) { //return a; if(isinf(a)) return MAX_P; else return fmin(fmax(a,MIN_P),MAX_P); } __device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt) { KC_FP_TYPE ex = KC_MAX(KC_MIN(KC_EXP(gamma*z),KC_MAXN),KC_MINN); return ex*dt; } __device__ KC_FP_TYPE hinv(KC_FP_TYPE y, KC_FP_TYPE gamma, KC_FP_TYPE dt) { return log(y/dt)/gamma; } //one thread per particle <<< nTrials,nParticles >>> __global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; KC_FP_TYPE cb = b[betaIdxVector[row]]; KC_FP_TYPE sw = sqrt(w); KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb); KC_FP_TYPE mu = mup; KC_FP_TYPE sig2 = sigMult*w; KC_FP_TYPE sig = sqrt(sig2); KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 )); pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu); posc[pidx] = pos[idx]; KC_FP_TYPE dpos = pos[idx]-mu; KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos); //to be stored for each particle: ncdf, lw, lw2 ncdf[idx] = normcdf((1-mup)/sw); KC_FP_TYPE dposp = pos[idx]-mup; KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp); log_li[pidx] = -h(pos[idx],g,dt)+y[row]*(log(fmax(h(pos[idx],g,1.0),1e-30))+log(dt))-lgamma(y[row]+1); KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) ); lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k); lw2[pidx] = exp(pw+log_p -log_pi_k); //safety checks for numerical errors if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) { lw[pidx] = 0; lw2[pidx] = 0; pos[idx] = mup; posc[pidx] = mup; } } } } //one thread per trial <<< nTrials,1 >>> __global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; //sum up and normalize weights KC_FP_TYPE weightSum = 0; KC_FP_TYPE weightSum2 = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; weightSum += lw[pidx]; weightSum2 += lw2[pidx]; } KC_FP_TYPE n_eff_den = 0; weightSum = fmax(weightSum,1e-20); weightSum2 = fmax(weightSum2,1e-20); for(int p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt[idx] = lw[pidx] /weightSum; wt_p[pidx] = lw2[pidx]/weightSum2; n_eff_den += wt[idx]*wt[idx]; cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling } nEff[tr_num] = 1/n_eff_den; } } } //initial calculation - probability of each spike count coming from a rate at the bound __global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < TT) { lg[idx] = exp( -h(1,g, dt) + y[idx]*log(fmax(h(1,g,dt),1e-30)) - lgamma(y[idx]+1)); } } //one thread per particle <<< nTrials,nParticles >>> // if particles look bad, resamples them from the distribution before the next step __global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) { int threadNum = blockIdx.x*blockDim.x + threadIdx.x; int tr_num = (int)threadNum / (int)numParticles; int p_num = threadNum % numParticles; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int pidx = tr_num*numParticles+p_num; int row = trIdx[tr_num] + t; int idx = TT*p_num + row; int pidx_new = pidx; if(nEff[tr_num] < minEffParticles) { int p_num_new; for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) { //everything taken care of in loop statement } pidx_new = tr_num*numParticles+p_num_new; wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again pos[idx] = posc[pidx_new]; } KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]); p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old; p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new? p_cgt_0b[pidx] = ncdf[idx]*wt_old; } } } //one thread per trial <<< nTrials,1 >>> //move bound crossing probabilities forward in time __global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; if(t < trLength) { int row = trIdx[tr_num] + t; KC_FP_TYPE p_cet_s = 0; KC_FP_TYPE p_cgt_sa = 0; KC_FP_TYPE p_cgt_sb = 0; for(int p_num = 0; p_num < numParticles; p_num++) { int pidx = tr_num*numParticles+p_num; //int idx = TT*p_num + row; p_cet_s += p_cet_0[pidx]; p_cgt_sa += p_cgt_0a[pidx]; p_cgt_sb += p_cgt_0b[pidx]; //finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial if(nEff[tr_num] < minEffParticles && t-1==trLength) { cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num); } } KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1])); KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1])); KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old; KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s; KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb; p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1); p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later? p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1); p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code } } } //Finally do that backwards sampling, <<< NT, 1 >>> __global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) { int tr_num = blockIdx.x*blockDim.x + threadIdx.x; if(tr_num < NT) { int trLength = trIdx[tr_num+1] - trIdx[tr_num]; int row = trIdx[tr_num] + t; if(t == trLength-1) { //if t=end of trial, start off the backwards sampling crossingTimes[tr_num] = trLength; //decide whether end trial has hit boundary if(randUb[tr_num] < p_clte[row]) { sample[row] = 1; crossingTimes[tr_num] = t; } //else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles) else { int p_num; for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) { } int idx = TT*p_num + row; sample[row] = pos[idx]; } } else if(t < trLength-1 && t >= 0) { //else, propagate backwards //if previous sample had hit threshold if(sample[row+1] >= 1) { //if boundary already reached if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) { crossingTimes[tr_num] = t; sample[row] = 1; } //gets pre-crossing particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } //else, samples a particle else { KC_FP_TYPE wtSum = 0; int p_num; for(p_num = 0; p_num < numParticles; p_num++) { int idx = TT*p_num + row; int pidx = tr_num*numParticles+p_num; wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 )); wtSum += wt_p[pidx]; } wtSum = fmax(wtSum,1e-30); KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum; for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) { int pidx = tr_num*numParticles+p_num+1; csum += wt_p[pidx]/wtSum; } int idx = TT*p_num + row; sample[row] = pos[idx]; } } } } /* Performs a forward sweep of the path after backwards sampling Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters) Calculates som statistics for later sampling trial number given by CUDA thread */ __global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) { int tr_num = blockIdx.x*blockDim.x+threadIdx.x; if(tr_num < NT) { int t_0 = trIdx[tr_num]; beta_sum[tr_num] = 0; int trLength = trIdx[tr_num+1] - trIdx[tr_num]; KC_FP_TYPE cb = b[betaIndVec[t_0]]; for(int t = 0; t < trLength; t++) { if(t == crossingTimes[tr_num]) { //samples the first value of lambda to cross the bound (truncated normal, > 1) KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0; KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w)); if(minS >= 1.0-1e-5) { lambda[t_0 + t] = 1; } else { lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]); } } else if(t > crossingTimes[tr_num]) { lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]); } beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates } } } //single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0 __global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx == 0) { for(int trNum = 0; trNum < NT; trNum++) { int t_0 = trIdx[trNum]; int cb = betaIndVec[t_0]; int trLength = trIdx[trNum+1] - trIdx[trNum]; sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w; sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w; muVec[cb] += beta_sum[trNum]/w; muVec[numBetas] += lambda[t_0]/w; } } } //Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters //args // 0 = new lambda (output, should be pre-allocated on GPU, same size as y) // 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials) // 2 = y (observations) // 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) // 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) // 5 = betas (the beta values) // 6 = w (variance of diffusion process) // 7 = l_0 (starting lambda value) // 8 = g (absorbing boundary effective height) // 9 = dt (bin/timestep size) // 10 = numParticles // 11 = minEffParticles (how many effective particles per trial to keep around) // 12 = sigMult (used for particle proposals, proposal variance is sigMult*w) // 13 = maxTrialLength // 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0) // 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0) void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaError_t ce; curandStatus_t cre; /*ce = cudaSetDevice(KC_GPU_DEVICE); if(ce != cudaSuccess) { mexPrintf("Error initializing device (kcParticleFilterProp.cu) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); }*/ //init data unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]); int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]); KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT); int * trIdx = kcGetArrayDataInt(prhs[3]); unsigned int NT = kcGetArrayNumEl(prhs[3])-1; int * betaIdxVector = kcGetArrayDataInt(prhs[4]); KC_FP_TYPE * b = mxGetPr(prhs[5]); int numBetas = mxGetNumberOfElements(prhs[5]); KC_FP_TYPE * b_gpu; ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); if(ce != cudaSuccess) { mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } ce = cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice); if(ce != cudaSuccess) { mexPrintf("Error moving betas to GPU (particle filter) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE w = mxGetScalar(prhs[6]); KC_FP_TYPE l_0 = mxGetScalar(prhs[7]); KC_FP_TYPE g = mxGetScalar(prhs[8]); KC_FP_TYPE dt = mxGetScalar(prhs[9]); int numParticles = mxGetScalar(prhs[10]); int minEffParticles = mxGetScalar(prhs[11]); int sigMult = mxGetScalar(prhs[12]); int maxTrialLength = mxGetScalar(prhs[13]); //particle weights/probabilities of hitting the bound KC_FP_TYPE * p_clte; KC_FP_TYPE * p_cet; KC_FP_TYPE * p_cgt; KC_FP_TYPE * p_clt; KC_FP_TYPE * p_cpr; checkCudaErrors(cudaMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * wt; KC_FP_TYPE * wt_p; KC_FP_TYPE * pos;//particle positions checkCudaErrors(cudaMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE))); ce = cudaMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating pos "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } KC_FP_TYPE * log_li; KC_FP_TYPE * posc; //for resampling KC_FP_TYPE * lw; //unnormalized weights KC_FP_TYPE * lw2; KC_FP_TYPE * ncdf; KC_FP_TYPE * p_cet_0; KC_FP_TYPE * p_cgt_0a; KC_FP_TYPE * p_cgt_0b; KC_FP_TYPE * lg; //log p(y|at boundary) KC_FP_TYPE * cumsum; KC_FP_TYPE * beta_sum; checkCudaErrors(cudaMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE))); //checkCudaErrors(cudaMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE))); checkCudaErrors(cudaMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE))); KC_FP_TYPE * nEff; checkCudaErrors(cudaMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE))); int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1); int randSizeS = (NT) + (NT%2==0?0:1); int randSizeT = (TT) + (TT%2==0?0:1); KC_FP_TYPE * randN; KC_FP_TYPE * randNs; KC_FP_TYPE * randTs; ce = cudaMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating randN "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = cudaMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating randNs "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } ce = cudaMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE)); if(ce != cudaSuccess) { mexPrintf("Error allocating randTs "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); } //setup the random number generator curandGenerator_t curandGen = 0; curandStatus_t curandStatus; curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); if(curandStatus != CURAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error initializing random number generator (%d).\n",(int)curandStatus); mexErrMsgTxt(buffer); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); //curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL)); if(curandStatus != CURAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number seed (%d).\n",(int)curandStatus); mexErrMsgTxt(buffer); } curandStatus = curandGenerateSeeds(curandGen); if(curandStatus != CURAND_STATUS_SUCCESS) { char buffer [50]; sprintf(buffer, "Error random number generating seed (%d).\n",(int)curandStatus); mexErrMsgTxt(buffer); } //cudaThreadSetLimit(cudaLimitStackSize, 1024); //setup initial particle positions int blockSize , nBlocks; int blockSizeT, nBlocksT; int blockSizeN, nBlocksN; blockSizeT = 4; nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1); blockSizeN = 1; nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error before kcSetupLG "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) { kcSetupLG <<< nBlocksT, blockSizeT >>> (y,lg,g,dt,TT); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } blockSize = 8; int totalThreads = numParticles*NT; nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1); //mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN); //forward pass loop for (int ii = 0; ii < maxTrialLength;ii++) { //move all particles foward cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { int currDev; cudaGetDevice(&currDev); mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } kcMoveParticles <<< nBlocks, blockSize >>> (y,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { int currDev; cudaGetDevice(&currDev); mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //normalize weights kcNormalizeWeights <<< nBlocksN,blockSizeN >>> (y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //check effective num particles, resample when necessary cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } kcResampleParticles <<< nBlocks, blockSize >>> (y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //move passage density foward //__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) { kcPropogateBoundaryDensity <<< nBlocksN,blockSizeN >>> (y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } //backwards sample the particles for (int jj = maxTrialLength-1; jj >= 0; jj--) { cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS); //ce = cudaDeviceSynchronize(); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } kcBackwardsSample <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } } cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT); //ce = cudaDeviceSynchronize(); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error after rand generation in final sampler (2). "); mexPrintf(" (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //samples all latent variables beyond bound hit time kcForwardFinalPass <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error after kcForwardFinalPass "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } //gets some statistics about the latent variables put together to be able to sample the drift rates KC_FP_TYPE * sampling_c; KC_FP_TYPE * sampling_p; checkCudaErrors(cudaMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1))); checkCudaErrors(cudaMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1))); checkCudaErrors(cudaMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyHostToDevice)); kcAssembleSamplingStatistics<<<1,1>>>(sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyDeviceToHost)); //free up memory cre = curandDestroyGenerator(curandGen); if(cre != CURAND_STATUS_SUCCESS) { mexPrintf("Error destroying rand generator (%d)\n", (int)cre); mexErrMsgTxt("CUDA Errors"); } ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(cudaFree(b_gpu)); checkCudaErrors(cudaFree(p_clte)); checkCudaErrors(cudaFree(p_cet)); checkCudaErrors(cudaFree(p_cgt)); checkCudaErrors(cudaFree(p_clt)); checkCudaErrors(cudaFree(p_cpr)); checkCudaErrors(cudaFree(pos)); checkCudaErrors(cudaFree(wt)); ce = cudaFree(wt_p); if(ce != cudaSuccess) { mexPrintf("Error freeing memory in particle filter (wt_p) "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } checkCudaErrors(cudaFree(log_li)); checkCudaErrors(cudaFree(posc)); checkCudaErrors(cudaFree(lw)); checkCudaErrors(cudaFree(lw2)); checkCudaErrors(cudaFree(ncdf)); checkCudaErrors(cudaFree(p_cet_0)); checkCudaErrors(cudaFree(p_cgt_0a)); checkCudaErrors(cudaFree(p_cgt_0b)); checkCudaErrors(cudaFree(lg)); checkCudaErrors(cudaFree(cumsum)); checkCudaErrors(cudaFree(beta_sum)); checkCudaErrors(cudaFree(sampling_c)); checkCudaErrors(cudaFree(sampling_p)); checkCudaErrors(cudaFree(nEff)); checkCudaErrors(cudaFree(randN)); checkCudaErrors(cudaFree(randNs)); checkCudaErrors(cudaFree(randTs)); ce = cudaDeviceSynchronize(); if(ce != cudaSuccess) { mexPrintf("Error at the end ofthe particle filter "); mexPrintf(cudaGetErrorString(ce)); mexPrintf(" (%d)\n", (int)ce); mexErrMsgTxt("CUDA Errors"); } }
59a2b814ee901570ce937ae1d18b964a3347658b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <cstdio> #include <iostream> #include "util.hpp" // TODO : implement a kernel that reverses a string of length n in place __global__ void reverse_string(char* str, int n) { int i = threadIdx.x; __shared__ char buf[1024]; buf[i] = str[i]; __syncthreads(); str[i] = buf[n-1-i]; } int main(int argc, char** argv) { // check that the user has passed a string to reverse if(argc<2) { std::cout << "useage : ./string_reverse \"string to reverse\"\n" << std::endl; exit(0); } // determine the length of the string, and copy in to buffer auto n = strlen(argv[1]); auto string = malloc_managed<char>(n+1); std::copy(argv[1], argv[1]+n, string); string[n] = 0; // add null terminator std::cout << "string to reverse:\n" << string << "\n"; // TODO : call the string reverse function hipLaunchKernelGGL(( reverse_string), dim3(1), dim3(n), 0, 0, string, n); // print reversed string hipDeviceSynchronize(); std::cout << "reversed string:\n" << string << "\n"; // free memory hipFree(string); return 0; }
59a2b814ee901570ce937ae1d18b964a3347658b.cu
#include <cstdlib> #include <cstdio> #include <iostream> #include "util.hpp" // TODO : implement a kernel that reverses a string of length n in place __global__ void reverse_string(char* str, int n) { int i = threadIdx.x; __shared__ char buf[1024]; buf[i] = str[i]; __syncthreads(); str[i] = buf[n-1-i]; } int main(int argc, char** argv) { // check that the user has passed a string to reverse if(argc<2) { std::cout << "useage : ./string_reverse \"string to reverse\"\n" << std::endl; exit(0); } // determine the length of the string, and copy in to buffer auto n = strlen(argv[1]); auto string = malloc_managed<char>(n+1); std::copy(argv[1], argv[1]+n, string); string[n] = 0; // add null terminator std::cout << "string to reverse:\n" << string << "\n"; // TODO : call the string reverse function reverse_string<<<1, n>>>(string, n); // print reversed string cudaDeviceSynchronize(); std::cout << "reversed string:\n" << string << "\n"; // free memory cudaFree(string); return 0; }
b8c90280f3a3c1e6f834d3d0b8f4987367056d2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "Header.h" hipError_t classifiedPointsMemoryWithCuda(Cluster* clusters, Point *points, unsigned int n, unsigned int k, bool* isPointChangedCluster); __device__ double Distance(double x1, double y1, double x2, double y2) { return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)); } __global__ void classifiedPointsKernel(Cluster* clusters, Point *points, bool *flags, unsigned int k, unsigned int n) { int idx = blockIdx.x * NUM_THREADS_IN_BLOCK + threadIdx.x; if (idx < n) { int minIndex = -1; double minDistance = DBL_MAX; for (int i = 0; i < k; i++) { double distanceTmp = Distance(points[idx].x, points[idx].y, clusters[i].centerX, clusters[i].centerY); if (distanceTmp < minDistance) { minDistance = distanceTmp; minIndex = i; } } if (points[idx].clusterID != minIndex) flags[idx] = true; points[idx].clusterID = minIndex; } } int cudaClassifiedPoints(Cluster* clusters, Point* points, int n, int k, bool* isPointChangedCluster) { *isPointChangedCluster = false; hipError_t cudaStatus = classifiedPointsMemoryWithCuda(clusters, points, n, k, isPointChangedCluster); if (cudaStatus != hipSuccess) { fprintf(stderr, "groupPointsMemoryWithCuda failed!"); return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t classifiedPointsMemoryWithCuda(Cluster* clusters, Point *points, unsigned int n, unsigned int k, bool* isPointChangedCluster) { Cluster *dev_clusters; Point *dev_points; hipError_t cudaStatus; bool* dev_flags; bool* flags = (bool*)malloc(n * sizeof(bool)); int numBlocks; initFlagsArrOmp(&flags, n); /*for (int i = 0; i < n; i++) flags[i] = false;*/ //dim3 dimGrid(numBlocks, 4, 4); if (n % NUM_THREADS_IN_BLOCK == 0) numBlocks = n / NUM_THREADS_IN_BLOCK; else numBlocks = (n / NUM_THREADS_IN_BLOCK) + 1; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_clusters, k * sizeof(Cluster)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc clusters failed!"); freeResources(1, dev_clusters); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_points, n * sizeof(Point)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc points failed!"); freeResources(2, dev_points, dev_clusters); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_flags, n * sizeof(bool)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc flags failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_points, points, n * sizeof(Point), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy points failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_clusters, clusters, k * sizeof(Cluster), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_flags, flags, n * sizeof(bool), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy flags failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Launch a kernel on the GPU with one thread for each point. classifiedPointsKernel << <numBlocks, NUM_THREADS_IN_BLOCK >> >(dev_clusters, dev_points, dev_flags, k, n); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "classifiedPointsKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching classifiedPointsKernel!\n", cudaStatus); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(points, dev_points, n * sizeof(Point), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy points failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } cudaStatus = hipMemcpy(flags, dev_flags, n * sizeof(bool), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy flags failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } *isPointChangedCluster = mergeFlagsArrOmp(flags, n); free(flags); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } int freeResources(int size, ...) { hipError_t cudaStatus; va_list list; va_start(list, size); for (int i = 0; i < size; i++) { cudaStatus = hipFree(va_arg(list, void*)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipFree failed!"); return 1; } } va_end(list); return 0; }
b8c90280f3a3c1e6f834d3d0b8f4987367056d2a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Header.h" cudaError_t classifiedPointsMemoryWithCuda(Cluster* clusters, Point *points, unsigned int n, unsigned int k, bool* isPointChangedCluster); __device__ double Distance(double x1, double y1, double x2, double y2) { return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2)); } __global__ void classifiedPointsKernel(Cluster* clusters, Point *points, bool *flags, unsigned int k, unsigned int n) { int idx = blockIdx.x * NUM_THREADS_IN_BLOCK + threadIdx.x; if (idx < n) { int minIndex = -1; double minDistance = DBL_MAX; for (int i = 0; i < k; i++) { double distanceTmp = Distance(points[idx].x, points[idx].y, clusters[i].centerX, clusters[i].centerY); if (distanceTmp < minDistance) { minDistance = distanceTmp; minIndex = i; } } if (points[idx].clusterID != minIndex) flags[idx] = true; points[idx].clusterID = minIndex; } } int cudaClassifiedPoints(Cluster* clusters, Point* points, int n, int k, bool* isPointChangedCluster) { *isPointChangedCluster = false; cudaError_t cudaStatus = classifiedPointsMemoryWithCuda(clusters, points, n, k, isPointChangedCluster); if (cudaStatus != cudaSuccess) { fprintf(stderr, "groupPointsMemoryWithCuda failed!"); return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t classifiedPointsMemoryWithCuda(Cluster* clusters, Point *points, unsigned int n, unsigned int k, bool* isPointChangedCluster) { Cluster *dev_clusters; Point *dev_points; cudaError_t cudaStatus; bool* dev_flags; bool* flags = (bool*)malloc(n * sizeof(bool)); int numBlocks; initFlagsArrOmp(&flags, n); /*for (int i = 0; i < n; i++) flags[i] = false;*/ //dim3 dimGrid(numBlocks, 4, 4); if (n % NUM_THREADS_IN_BLOCK == 0) numBlocks = n / NUM_THREADS_IN_BLOCK; else numBlocks = (n / NUM_THREADS_IN_BLOCK) + 1; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_clusters, k * sizeof(Cluster)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc clusters failed!"); freeResources(1, dev_clusters); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_points, n * sizeof(Point)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc points failed!"); freeResources(2, dev_points, dev_clusters); return cudaStatus; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_flags, n * sizeof(bool)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc flags failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_points, points, n * sizeof(Point), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy points failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_clusters, clusters, k * sizeof(Cluster), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_flags, flags, n * sizeof(bool), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy flags failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Launch a kernel on the GPU with one thread for each point. classifiedPointsKernel << <numBlocks, NUM_THREADS_IN_BLOCK >> >(dev_clusters, dev_points, dev_flags, k, n); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "classifiedPointsKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching classifiedPointsKernel!\n", cudaStatus); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(points, dev_points, n * sizeof(Point), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy points failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } cudaStatus = cudaMemcpy(flags, dev_flags, n * sizeof(bool), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy flags failed!"); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } *isPointChangedCluster = mergeFlagsArrOmp(flags, n); free(flags); freeResources(3, dev_points, dev_clusters, dev_flags); return cudaStatus; } int freeResources(int size, ...) { cudaError cudaStatus; va_list list; va_start(list, size); for (int i = 0; i < size; i++) { cudaStatus = cudaFree(va_arg(list, void*)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaFree failed!"); return 1; } } va_end(list); return 0; }
e70fa76377c047db70da57fbede9e0f0e3fd640f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/defines.h> #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include "shared.hpp" #include <convolve.hpp> namespace cuda { namespace kernel { static const int THREADS = 256; static const int THREADS_X = 16; static const int THREADS_Y = 16; static const int CUBE_X = 8; static const int CUBE_Y = 8; static const int CUBE_Z = 4; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const int MAX_CONV1_FILTER_LEN = 129; static const int MAX_CONV2_FILTER_LEN = 17; static const int MAX_CONV3_FILTER_LEN = 5; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)]; template<typename T, typename aT, bool expand> __global__ void convolve1(Param<T> out, CParam<T> signal, int fLen, int nBBS0, int nBBS1, int o1, int o2, int o3, int s1, int s2, int s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); const int padding = fLen-1; const int shrdLen = blockDim.x + 2*padding; const unsigned b1 = blockIdx.x/nBBS0; /* [0 {1} 2 3] */ const unsigned b3 = blockIdx.y/nBBS1; /* [0 1 2 {3}] */ const unsigned b2 = blockIdx.y-nBBS1*b3;/* [0 1 {2} 3] */ T *dst = (T *)out.ptr + (b1 * out.strides[1] + /* activated with batched input signal */ o1 * out.strides[1] + /* activated with batched input filter */ b2 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b3 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b1 * signal.strides[1] + /* activated with batched input signal */ s1 * signal.strides[1] + /* activated with batched input filter */ b2 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b3 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; int gx = blockDim.x*(blockIdx.x-b1*nBBS0); int s0 = signal.strides[0]; int d0 = signal.dims[0]; for (int i=threadIdx.x; i<shrdLen; i+=blockDim.x) { int idx= gx-padding + i; shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0); } __syncthreads(); gx += threadIdx.x; if (gx<out.dims[0]) { int lx = threadIdx.x + padding + (expand ? 0 : fLen>>1); aT accum = scalar<aT>(0); for(int f=0; f<fLen; ++f) { accum = accum + (shrdMem[lx-f]*impulse[f]); } dst[gx] = (T)accum; } } template<typename T, typename aT, bool expand, int fLen0, int fLen1> __global__ void convolve2(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1, int o2, int o3, int s2, int s3) { const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1)); __shared__ T shrdMem[C_SIZE]; const int radius0 = fLen0-1; const int radius1 = fLen1-1; const int padding0 = 2*radius0; const int padding1 = 2*radius1; const int shrdLen0 = THREADS_X + padding0; const int shrdLen1 = THREADS_Y + padding1; unsigned b0 = blockIdx.x/nBBS0; unsigned b1 = blockIdx.y/nBBS1; T *dst = (T *)out.ptr + (b0 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b1 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b0 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b1 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; int lx = threadIdx.x; int ly = threadIdx.y; int gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx; int gy = THREADS_Y * (blockIdx.y-b1*nBBS1) + ly; int s0 = signal.strides[0]; int s1 = signal.strides[1]; int d0 = signal.dims[0]; int d1 = signal.dims[1]; // below loops are traditional loops, they only run multiple // times filter length is more than launch size #pragma unroll for (int b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) { int j = gy2-radius1; bool is_j = j>=0 && j<d1; // move row_set THREADS_Y along coloumns #pragma unroll for (int a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) { int i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0)); } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1]) { int ci = lx + radius0 + (expand ? 0 : fLen0>>1); int cj = ly + radius1 + (expand ? 0 : fLen1>>1); aT accum = scalar<aT>(0); #pragma unroll for(int fj=0; fj<fLen1; ++fj) { #pragma unroll for(int fi=0; fi<fLen0; ++fi) { aT f_val = impulse[fj*fLen0+fi]; T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)]; accum = accum + s_val*f_val; } } dst[gy*out.strides[1]+gx] = (T)accum; } } __inline__ __device__ int index(int i, int j, int k, int jstride, int kstride) { return i+j*jstride+k*kstride; } template<typename T, typename aT, bool expand> __global__ void convolve3(Param<T> out, CParam<T> signal, int fLen0, int fLen1, int fLen2, int nBBS, int o3, int s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); int radius0 = fLen0-1; int radius1 = fLen1-1; int radius2 = fLen2-1; int shrdLen0 = blockDim.x + 2*radius0; int shrdLen1 = blockDim.y + 2*radius1; int shrdLen2 = blockDim.z + 2*radius2; int skStride = shrdLen0 * shrdLen1; int fStride = fLen0 * fLen1; unsigned b2 = blockIdx.x/nBBS; T *dst = (T *)out.ptr + (b2 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b2 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; int lx = threadIdx.x; int ly = threadIdx.y; int lz = threadIdx.z; int gx = blockDim.x * (blockIdx.x-b2*nBBS) + lx; int gy = blockDim.y * blockIdx.y + ly; int gz = blockDim.z * blockIdx.z + lz; int s0 = signal.strides[0]; int s1 = signal.strides[1]; int s2 = signal.strides[2]; int d0 = signal.dims[0]; int d1 = signal.dims[1]; int d2 = signal.dims[2]; #pragma unroll for (int c=lz, gz2=gz; c<shrdLen2; c+=CUBE_Z, gz2+=CUBE_Z) { int k = gz2-radius2; bool is_k = k>=0 && k<d2; #pragma unroll for (int b=ly, gy2=gy; b<shrdLen1; b+=CUBE_Y, gy2+=CUBE_Y) { int j = gy2-radius1; bool is_j = j>=0 && j<d1; #pragma unroll for (int a=lx, gx2=gx; a<shrdLen0; a+=CUBE_X, gx2+=CUBE_X) { int i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[c*skStride+b*shrdLen0+a] = (is_i && is_j && is_k ? src[i*s0+j*s1+k*s2] : scalar<T>(0)); } } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) { int ci = lx + radius0 + (expand ? 0 : fLen0>>1); int cj = ly + radius1 + (expand ? 0 : fLen1>>1); int ck = lz + radius2 + (expand ? 0 : fLen2>>1); aT accum = scalar<aT>(0); #pragma unroll for(int fk=0; fk<fLen2; ++fk) { #pragma unroll for(int fj=0; fj<fLen1; ++fj) { #pragma unroll for(int fi=0; fi<fLen0; ++fi) { aT f_val = impulse[index(fi, fj, fk, fLen0, fStride)]; T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)]; accum = accum + s_val*f_val; } } } dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum; } } struct conv_kparam_t { dim3 mBlocks; dim3 mThreads; size_t mSharedSize; int mBlk_x; int mBlk_y; bool outHasNoOffset; bool inHasNoOffset; bool launchMoreBlocks; int o[3]; int s[3]; }; template<typename T> void prepareKernelArgs(conv_kparam_t &params, dim_t oDims[], dim_t fDims[], int baseDim) { int batchDims[4] = {1, 1, 1, 1}; for(int i=baseDim; i<4; ++i) { batchDims[i] = (params.launchMoreBlocks ? 1 : oDims[i]); } if (baseDim==1) { params.mThreads = dim3(THREADS, 1); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = batchDims[2]; params.mBlocks = dim3(params.mBlk_x * batchDims[1], params.mBlk_y * batchDims[3]); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * sizeof(T); } else if (baseDim==2) { params.mThreads = dim3(THREADS_X, THREADS_Y); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); params.mBlocks = dim3(params.mBlk_x * batchDims[2], params.mBlk_y * batchDims[3]); } else if (baseDim==3) { params.mThreads = dim3(CUBE_X, CUBE_Y, CUBE_Z); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); int blk_z = divup(oDims[2], params.mThreads.z); params.mBlocks = dim3(params.mBlk_x * batchDims[3], params.mBlk_y, blk_z); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * (params.mThreads.y+2*(fDims[1]-1)) * (params.mThreads.z+2*(fDims[2]-1)) * sizeof(T); } } template<typename T, typename aT, bool expand, int f0, int f1> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig) { CUDA_LAUNCH((convolve2<T, aT, expand, f0, f1>), p.mBlocks, p.mThreads, out, sig, p.mBlk_x, p.mBlk_y, p.o[1], p.o[2], p.s[1], p.s[2]); POST_LAUNCH_CHECK(); } template<typename T, typename aT, bool expand, int f0> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f1) { switch(f1) { case 1: conv2Helper<T, aT, expand, f0, 1>(p, out, sig); break; case 2: conv2Helper<T, aT, expand, f0, 2>(p, out, sig); break; case 3: conv2Helper<T, aT, expand, f0, 3>(p, out, sig); break; case 4: conv2Helper<T, aT, expand, f0, 4>(p, out, sig); break; case 5: conv2Helper<T, aT, expand, f0, 5>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } template<typename T, typename aT, bool expand> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f0, int f1) { switch(f0) { case 1: conv2Helper<T, aT, expand, 1>(p, out, sig, f1); break; case 2: conv2Helper<T, aT, expand, 2>(p, out, sig, f1); break; case 3: conv2Helper<T, aT, expand, 3>(p, out, sig, f1); break; case 4: conv2Helper<T, aT, expand, 4>(p, out, sig, f1); break; case 5: conv2Helper<T, aT, expand, 5>(p, out, sig, f1); break; default: { if (f0==f1) { switch(f1) { case 6: conv2Helper<T, aT, expand, 6, 6>(p, out, sig); break; case 7: conv2Helper<T, aT, expand, 7, 7>(p, out, sig); break; case 8: conv2Helper<T, aT, expand, 8, 8>(p, out, sig); break; case 9: conv2Helper<T, aT, expand, 9, 9>(p, out, sig); break; case 10: conv2Helper<T, aT, expand, 10, 10>(p, out, sig); break; case 11: conv2Helper<T, aT, expand, 11, 11>(p, out, sig); break; case 12: conv2Helper<T, aT, expand, 12, 12>(p, out, sig); break; case 13: conv2Helper<T, aT, expand, 13, 13>(p, out, sig); break; case 14: conv2Helper<T, aT, expand, 14, 14>(p, out, sig); break; case 15: conv2Helper<T, aT, expand, 15, 15>(p, out, sig); break; case 16: conv2Helper<T, aT, expand, 16, 16>(p, out, sig); break; case 17: conv2Helper<T, aT, expand, 17, 17>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } else CUDA_NOT_SUPPORTED(); } break; } } template<typename T, typename aT, bool expand> void convolve_1d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 1); int filterLen = filt.dims[0]; for (int b3=0; b3<filt.dims[3]; ++b3) { int f3Off = b3 * filt.strides[3]; for (int b2=0; b2<filt.dims[2]; ++b2) { int f2Off = b2 * filt.strides[2]; for (int b1=0; b1<filt.dims[1]; ++b1) { int f1Off = b1 * filt.strides[1]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbolAsync(kernel::cFilter, filt.ptr+(f1Off+f2Off+f3Off), filterLen*sizeof(aT), 0, hipMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId()))); p.o[0] = (p.outHasNoOffset ? 0 : b1); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[0] = (p.inHasNoOffset ? 0 : b1); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); CUDA_LAUNCH_SMEM((convolve1<T, aT, expand>), p.mBlocks, p.mThreads, p.mSharedSize, out, sig, filt.dims[0], p.mBlk_x, p.mBlk_y, p.o[0], p.o[1], p.o[2], p.s[0], p.s[1], p.s[2]); POST_LAUNCH_CHECK(); } } } } template<typename T, typename aT, bool expand> void convolve_2d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 2); int filterLen = filt.dims[0] * filt.dims[1]; for (int b3=0; b3<filt.dims[3]; ++b3) { int f3Off = b3 * filt.strides[3]; for (int b2=0; b2<filt.dims[2]; ++b2) { int f2Off = b2 * filt.strides[2]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbolAsync(kernel::cFilter, filt.ptr+(f2Off+f3Off), filterLen*sizeof(aT), 0, hipMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId()))); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); conv2Helper<T, aT, expand>(p, out, sig, filt.dims[0], filt.dims[1]); } } } template<typename T, typename aT, bool expand> void convolve_3d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 3); int filterLen = filt.dims[0] * filt.dims[1] * filt.dims[2]; for (int b3=0; b3<filt.dims[3]; ++b3) { int f3Off = b3 * filt.strides[3]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(hipMemcpyToSymbolAsync(kernel::cFilter, filt.ptr+f3Off, filterLen*sizeof(aT), 0, hipMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId()))); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[2] = (p.inHasNoOffset ? 0 : b3); CUDA_LAUNCH_SMEM((convolve3<T, aT, expand>), p.mBlocks, p.mThreads, p.mSharedSize, out, sig, filt.dims[0], filt.dims[1], filt.dims[2], p.mBlk_x, p.o[2], p.s[2]); POST_LAUNCH_CHECK(); } } template<typename T, typename aT, int baseDim, bool expand> void convolve_nd(Param<T> out, CParam<T> signal, CParam<aT> filt, ConvolveBatchKind kind) { bool callKernel = true; int MCFL2 = kernel::MAX_CONV2_FILTER_LEN; int MCFL3 = kernel::MAX_CONV3_FILTER_LEN; switch(baseDim) { case 1: if (filt.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break; case 2: if ((filt.dims[0]*filt.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break; case 3: if ((filt.dims[0]*filt.dims[1]*filt.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break; } if (!callKernel) { CUDA_NOT_SUPPORTED(); } conv_kparam_t param; for (int i=0; i<3; ++i) { param.o[i] = 0; param.s[i] = 0; } param.launchMoreBlocks = kind==CONVOLVE_BATCH_SAME || kind==CONVOLVE_BATCH_KERNEL; param.outHasNoOffset = kind==CONVOLVE_BATCH_SIGNAL || kind==CONVOLVE_BATCH_NONE; param.inHasNoOffset = kind!=CONVOLVE_BATCH_SAME; switch(baseDim) { case 1: convolve_1d<T, aT, expand>(param, out, signal, filt); break; case 2: convolve_2d<T, aT, expand>(param, out, signal, filt); break; case 3: convolve_3d<T, aT, expand>(param, out, signal, filt); break; } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, aT) \ template void convolve_nd<T, aT, 1, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 1, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) INSTANTIATE(ushort , float) INSTANTIATE(short , float) } }
e70fa76377c047db70da57fbede9e0f0e3fd640f.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <af/defines.h> #include <backend.hpp> #include <dispatch.hpp> #include <Param.hpp> #include <debug_cuda.hpp> #include <math.hpp> #include "shared.hpp" #include <convolve.hpp> namespace cuda { namespace kernel { static const int THREADS = 256; static const int THREADS_X = 16; static const int THREADS_Y = 16; static const int CUBE_X = 8; static const int CUBE_Y = 8; static const int CUBE_Z = 4; // below shared MAX_*_LEN's are calculated based on // a maximum shared memory configuration of 48KB per block // considering complex types as well static const int MAX_CONV1_FILTER_LEN = 129; static const int MAX_CONV2_FILTER_LEN = 17; static const int MAX_CONV3_FILTER_LEN = 5; // we shall declare the maximum size required of above all three cases // and re-use the same constant memory locations for every case __constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)]; template<typename T, typename aT, bool expand> __global__ void convolve1(Param<T> out, CParam<T> signal, int fLen, int nBBS0, int nBBS1, int o1, int o2, int o3, int s1, int s2, int s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); const int padding = fLen-1; const int shrdLen = blockDim.x + 2*padding; const unsigned b1 = blockIdx.x/nBBS0; /* [0 {1} 2 3] */ const unsigned b3 = blockIdx.y/nBBS1; /* [0 1 2 {3}] */ const unsigned b2 = blockIdx.y-nBBS1*b3;/* [0 1 {2} 3] */ T *dst = (T *)out.ptr + (b1 * out.strides[1] + /* activated with batched input signal */ o1 * out.strides[1] + /* activated with batched input filter */ b2 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b3 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b1 * signal.strides[1] + /* activated with batched input signal */ s1 * signal.strides[1] + /* activated with batched input filter */ b2 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b3 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; int gx = blockDim.x*(blockIdx.x-b1*nBBS0); int s0 = signal.strides[0]; int d0 = signal.dims[0]; for (int i=threadIdx.x; i<shrdLen; i+=blockDim.x) { int idx= gx-padding + i; shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0); } __syncthreads(); gx += threadIdx.x; if (gx<out.dims[0]) { int lx = threadIdx.x + padding + (expand ? 0 : fLen>>1); aT accum = scalar<aT>(0); for(int f=0; f<fLen; ++f) { accum = accum + (shrdMem[lx-f]*impulse[f]); } dst[gx] = (T)accum; } } template<typename T, typename aT, bool expand, int fLen0, int fLen1> __global__ void convolve2(Param<T> out, CParam<T> signal, int nBBS0, int nBBS1, int o2, int o3, int s2, int s3) { const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1)); __shared__ T shrdMem[C_SIZE]; const int radius0 = fLen0-1; const int radius1 = fLen1-1; const int padding0 = 2*radius0; const int padding1 = 2*radius1; const int shrdLen0 = THREADS_X + padding0; const int shrdLen1 = THREADS_Y + padding1; unsigned b0 = blockIdx.x/nBBS0; unsigned b1 = blockIdx.y/nBBS1; T *dst = (T *)out.ptr + (b0 * out.strides[2] + /* activated with batched input signal */ o2 * out.strides[2] + /* activated with batched input filter */ b1 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b0 * signal.strides[2] + /* activated with batched input signal */ s2 * signal.strides[2] + /* activated with batched input filter */ b1 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; int lx = threadIdx.x; int ly = threadIdx.y; int gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx; int gy = THREADS_Y * (blockIdx.y-b1*nBBS1) + ly; int s0 = signal.strides[0]; int s1 = signal.strides[1]; int d0 = signal.dims[0]; int d1 = signal.dims[1]; // below loops are traditional loops, they only run multiple // times filter length is more than launch size #pragma unroll for (int b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) { int j = gy2-radius1; bool is_j = j>=0 && j<d1; // move row_set THREADS_Y along coloumns #pragma unroll for (int a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) { int i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0)); } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1]) { int ci = lx + radius0 + (expand ? 0 : fLen0>>1); int cj = ly + radius1 + (expand ? 0 : fLen1>>1); aT accum = scalar<aT>(0); #pragma unroll for(int fj=0; fj<fLen1; ++fj) { #pragma unroll for(int fi=0; fi<fLen0; ++fi) { aT f_val = impulse[fj*fLen0+fi]; T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)]; accum = accum + s_val*f_val; } } dst[gy*out.strides[1]+gx] = (T)accum; } } __inline__ __device__ int index(int i, int j, int k, int jstride, int kstride) { return i+j*jstride+k*kstride; } template<typename T, typename aT, bool expand> __global__ void convolve3(Param<T> out, CParam<T> signal, int fLen0, int fLen1, int fLen2, int nBBS, int o3, int s3) { SharedMemory<T> shared; T * shrdMem = shared.getPointer(); int radius0 = fLen0-1; int radius1 = fLen1-1; int radius2 = fLen2-1; int shrdLen0 = blockDim.x + 2*radius0; int shrdLen1 = blockDim.y + 2*radius1; int shrdLen2 = blockDim.z + 2*radius2; int skStride = shrdLen0 * shrdLen1; int fStride = fLen0 * fLen1; unsigned b2 = blockIdx.x/nBBS; T *dst = (T *)out.ptr + (b2 * out.strides[3] + /* activated with batched input signal */ o3 * out.strides[3]); /* activated with batched input filter */ const T *src = (const T *)signal.ptr + (b2 * signal.strides[3] + /* activated with batched input signal */ s3 * signal.strides[3]); /* activated with batched input filter */ const aT *impulse = (const aT *)cFilter; int lx = threadIdx.x; int ly = threadIdx.y; int lz = threadIdx.z; int gx = blockDim.x * (blockIdx.x-b2*nBBS) + lx; int gy = blockDim.y * blockIdx.y + ly; int gz = blockDim.z * blockIdx.z + lz; int s0 = signal.strides[0]; int s1 = signal.strides[1]; int s2 = signal.strides[2]; int d0 = signal.dims[0]; int d1 = signal.dims[1]; int d2 = signal.dims[2]; #pragma unroll for (int c=lz, gz2=gz; c<shrdLen2; c+=CUBE_Z, gz2+=CUBE_Z) { int k = gz2-radius2; bool is_k = k>=0 && k<d2; #pragma unroll for (int b=ly, gy2=gy; b<shrdLen1; b+=CUBE_Y, gy2+=CUBE_Y) { int j = gy2-radius1; bool is_j = j>=0 && j<d1; #pragma unroll for (int a=lx, gx2=gx; a<shrdLen0; a+=CUBE_X, gx2+=CUBE_X) { int i = gx2-radius0; bool is_i = i>=0 && i<d0; shrdMem[c*skStride+b*shrdLen0+a] = (is_i && is_j && is_k ? src[i*s0+j*s1+k*s2] : scalar<T>(0)); } } } __syncthreads(); if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) { int ci = lx + radius0 + (expand ? 0 : fLen0>>1); int cj = ly + radius1 + (expand ? 0 : fLen1>>1); int ck = lz + radius2 + (expand ? 0 : fLen2>>1); aT accum = scalar<aT>(0); #pragma unroll for(int fk=0; fk<fLen2; ++fk) { #pragma unroll for(int fj=0; fj<fLen1; ++fj) { #pragma unroll for(int fi=0; fi<fLen0; ++fi) { aT f_val = impulse[index(fi, fj, fk, fLen0, fStride)]; T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)]; accum = accum + s_val*f_val; } } } dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum; } } struct conv_kparam_t { dim3 mBlocks; dim3 mThreads; size_t mSharedSize; int mBlk_x; int mBlk_y; bool outHasNoOffset; bool inHasNoOffset; bool launchMoreBlocks; int o[3]; int s[3]; }; template<typename T> void prepareKernelArgs(conv_kparam_t &params, dim_t oDims[], dim_t fDims[], int baseDim) { int batchDims[4] = {1, 1, 1, 1}; for(int i=baseDim; i<4; ++i) { batchDims[i] = (params.launchMoreBlocks ? 1 : oDims[i]); } if (baseDim==1) { params.mThreads = dim3(THREADS, 1); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = batchDims[2]; params.mBlocks = dim3(params.mBlk_x * batchDims[1], params.mBlk_y * batchDims[3]); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * sizeof(T); } else if (baseDim==2) { params.mThreads = dim3(THREADS_X, THREADS_Y); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); params.mBlocks = dim3(params.mBlk_x * batchDims[2], params.mBlk_y * batchDims[3]); } else if (baseDim==3) { params.mThreads = dim3(CUBE_X, CUBE_Y, CUBE_Z); params.mBlk_x = divup(oDims[0], params.mThreads.x); params.mBlk_y = divup(oDims[1], params.mThreads.y); int blk_z = divup(oDims[2], params.mThreads.z); params.mBlocks = dim3(params.mBlk_x * batchDims[3], params.mBlk_y, blk_z); params.mSharedSize = (params.mThreads.x+2*(fDims[0]-1)) * (params.mThreads.y+2*(fDims[1]-1)) * (params.mThreads.z+2*(fDims[2]-1)) * sizeof(T); } } template<typename T, typename aT, bool expand, int f0, int f1> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig) { CUDA_LAUNCH((convolve2<T, aT, expand, f0, f1>), p.mBlocks, p.mThreads, out, sig, p.mBlk_x, p.mBlk_y, p.o[1], p.o[2], p.s[1], p.s[2]); POST_LAUNCH_CHECK(); } template<typename T, typename aT, bool expand, int f0> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f1) { switch(f1) { case 1: conv2Helper<T, aT, expand, f0, 1>(p, out, sig); break; case 2: conv2Helper<T, aT, expand, f0, 2>(p, out, sig); break; case 3: conv2Helper<T, aT, expand, f0, 3>(p, out, sig); break; case 4: conv2Helper<T, aT, expand, f0, 4>(p, out, sig); break; case 5: conv2Helper<T, aT, expand, f0, 5>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } template<typename T, typename aT, bool expand> void conv2Helper(const conv_kparam_t &p, Param<T> out, CParam<T> sig, int f0, int f1) { switch(f0) { case 1: conv2Helper<T, aT, expand, 1>(p, out, sig, f1); break; case 2: conv2Helper<T, aT, expand, 2>(p, out, sig, f1); break; case 3: conv2Helper<T, aT, expand, 3>(p, out, sig, f1); break; case 4: conv2Helper<T, aT, expand, 4>(p, out, sig, f1); break; case 5: conv2Helper<T, aT, expand, 5>(p, out, sig, f1); break; default: { if (f0==f1) { switch(f1) { case 6: conv2Helper<T, aT, expand, 6, 6>(p, out, sig); break; case 7: conv2Helper<T, aT, expand, 7, 7>(p, out, sig); break; case 8: conv2Helper<T, aT, expand, 8, 8>(p, out, sig); break; case 9: conv2Helper<T, aT, expand, 9, 9>(p, out, sig); break; case 10: conv2Helper<T, aT, expand, 10, 10>(p, out, sig); break; case 11: conv2Helper<T, aT, expand, 11, 11>(p, out, sig); break; case 12: conv2Helper<T, aT, expand, 12, 12>(p, out, sig); break; case 13: conv2Helper<T, aT, expand, 13, 13>(p, out, sig); break; case 14: conv2Helper<T, aT, expand, 14, 14>(p, out, sig); break; case 15: conv2Helper<T, aT, expand, 15, 15>(p, out, sig); break; case 16: conv2Helper<T, aT, expand, 16, 16>(p, out, sig); break; case 17: conv2Helper<T, aT, expand, 17, 17>(p, out, sig); break; default: CUDA_NOT_SUPPORTED(); } } else CUDA_NOT_SUPPORTED(); } break; } } template<typename T, typename aT, bool expand> void convolve_1d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 1); int filterLen = filt.dims[0]; for (int b3=0; b3<filt.dims[3]; ++b3) { int f3Off = b3 * filt.strides[3]; for (int b2=0; b2<filt.dims[2]; ++b2) { int f2Off = b2 * filt.strides[2]; for (int b1=0; b1<filt.dims[1]; ++b1) { int f1Off = b1 * filt.strides[1]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbolAsync(kernel::cFilter, filt.ptr+(f1Off+f2Off+f3Off), filterLen*sizeof(aT), 0, cudaMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId()))); p.o[0] = (p.outHasNoOffset ? 0 : b1); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[0] = (p.inHasNoOffset ? 0 : b1); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); CUDA_LAUNCH_SMEM((convolve1<T, aT, expand>), p.mBlocks, p.mThreads, p.mSharedSize, out, sig, filt.dims[0], p.mBlk_x, p.mBlk_y, p.o[0], p.o[1], p.o[2], p.s[0], p.s[1], p.s[2]); POST_LAUNCH_CHECK(); } } } } template<typename T, typename aT, bool expand> void convolve_2d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 2); int filterLen = filt.dims[0] * filt.dims[1]; for (int b3=0; b3<filt.dims[3]; ++b3) { int f3Off = b3 * filt.strides[3]; for (int b2=0; b2<filt.dims[2]; ++b2) { int f2Off = b2 * filt.strides[2]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbolAsync(kernel::cFilter, filt.ptr+(f2Off+f3Off), filterLen*sizeof(aT), 0, cudaMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId()))); p.o[1] = (p.outHasNoOffset ? 0 : b2); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[1] = (p.inHasNoOffset ? 0 : b2); p.s[2] = (p.inHasNoOffset ? 0 : b3); conv2Helper<T, aT, expand>(p, out, sig, filt.dims[0], filt.dims[1]); } } } template<typename T, typename aT, bool expand> void convolve_3d(conv_kparam_t &p, Param<T> out, CParam<T> sig, CParam<aT> filt) { prepareKernelArgs<T>(p, out.dims, filt.dims, 3); int filterLen = filt.dims[0] * filt.dims[1] * filt.dims[2]; for (int b3=0; b3<filt.dims[3]; ++b3) { int f3Off = b3 * filt.strides[3]; // FIXME: if the filter array is strided, direct copy of symbols // might cause issues CUDA_CHECK(cudaMemcpyToSymbolAsync(kernel::cFilter, filt.ptr+f3Off, filterLen*sizeof(aT), 0, cudaMemcpyDeviceToDevice, cuda::getStream(cuda::getActiveDeviceId()))); p.o[2] = (p.outHasNoOffset ? 0 : b3); p.s[2] = (p.inHasNoOffset ? 0 : b3); CUDA_LAUNCH_SMEM((convolve3<T, aT, expand>), p.mBlocks, p.mThreads, p.mSharedSize, out, sig, filt.dims[0], filt.dims[1], filt.dims[2], p.mBlk_x, p.o[2], p.s[2]); POST_LAUNCH_CHECK(); } } template<typename T, typename aT, int baseDim, bool expand> void convolve_nd(Param<T> out, CParam<T> signal, CParam<aT> filt, ConvolveBatchKind kind) { bool callKernel = true; int MCFL2 = kernel::MAX_CONV2_FILTER_LEN; int MCFL3 = kernel::MAX_CONV3_FILTER_LEN; switch(baseDim) { case 1: if (filt.dims[0]>kernel::MAX_CONV1_FILTER_LEN) callKernel = false; break; case 2: if ((filt.dims[0]*filt.dims[1]) > (MCFL2 * MCFL2)) callKernel = false; break; case 3: if ((filt.dims[0]*filt.dims[1]*filt.dims[2]) > (MCFL3 * MCFL3 * MCFL3)) callKernel = false; break; } if (!callKernel) { CUDA_NOT_SUPPORTED(); } conv_kparam_t param; for (int i=0; i<3; ++i) { param.o[i] = 0; param.s[i] = 0; } param.launchMoreBlocks = kind==CONVOLVE_BATCH_SAME || kind==CONVOLVE_BATCH_KERNEL; param.outHasNoOffset = kind==CONVOLVE_BATCH_SIGNAL || kind==CONVOLVE_BATCH_NONE; param.inHasNoOffset = kind!=CONVOLVE_BATCH_SAME; switch(baseDim) { case 1: convolve_1d<T, aT, expand>(param, out, signal, filt); break; case 2: convolve_2d<T, aT, expand>(param, out, signal, filt); break; case 3: convolve_3d<T, aT, expand>(param, out, signal, filt); break; } POST_LAUNCH_CHECK(); } #define INSTANTIATE(T, aT) \ template void convolve_nd<T, aT, 1, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 1, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 2, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, true >(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ template void convolve_nd<T, aT, 3, false>(Param<T> out, CParam<T> signal, CParam<aT> filter, ConvolveBatchKind kind);\ INSTANTIATE(cdouble, cdouble) INSTANTIATE(cfloat , cfloat) INSTANTIATE(double , double) INSTANTIATE(float , float) INSTANTIATE(uint , float) INSTANTIATE(int , float) INSTANTIATE(uchar , float) INSTANTIATE(char , float) INSTANTIATE(ushort , float) INSTANTIATE(short , float) } }
34b316479113b66e2f0f3e5f0dc6d8bbcecc2c76.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <assert.h> #include <algorithm> #include <cmath> #include <hip/hip_runtime.h> #include <rocblas.h> #include "integral-strided-cuda.hpp" #define BLOCK_SIZE 32 #define BLOCK_CHANNELS (1024 / (BLOCK_SIZE * BLOCK_SIZE)) using std::max; using std::min; using std::floor; using std::ceil; hipblasHandle_t cublasHandle; float *CUDA_ZERO_FLOAT, *CUDA_ONE_FLOAT; // for cublas in device pointer mode extern "C" void _initCublasHandle() { if (hipblasCreate(&cublasHandle) != HIPBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); } hipblasSetPointerMode(cublasHandle, HIPBLAS_POINTER_MODE_DEVICE); // TODO: at shutdown, `hipblasDestroy(handle);` // TODO: deallocate this! float zeroOne[] = {0, 1}; hipMalloc((void**)&CUDA_ZERO_FLOAT, sizeof(zeroOne)); CUDA_ONE_FLOAT = CUDA_ZERO_FLOAT + 1; hipMemcpy(CUDA_ZERO_FLOAT, zeroOne, sizeof(zeroOne), hipMemcpyHostToDevice); } // TODO remove this code #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = hipDeviceSynchronize(); if( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } /************************ Integral image computation ************************/ __global__ void accumulateRowsKernel( float *input, float *output, int channels, int totalRows, int w); __global__ void accumulateColsKernel( float *input, float *output, int channels, int h, int w); __global__ void accumulateColsInplaceKernel( float *input, int channels, int h, int w); __global__ void accumulateColsInplaceTransposedKernel( float *input, int channels, int h, int w); extern "C" void integralImageCuda(float *input, float *output, int channels, int h, int w, float *tmp) { int blockSize1D, gridSize1D; int totalCols = channels * w; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalCols + blockSize1D - 1) / blockSize1D; hipLaunchKernelGGL(( accumulateColsKernel) , dim3(gridSize1D), dim3(blockSize1D), 0, 0, input, output, channels, h, w); hipblasSgeam( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, channels * (h+1), w+1, CUDA_ONE_FLOAT, output, w+1, CUDA_ZERO_FLOAT, tmp, channels * (h+1), tmp, channels * (h+1)); int totalRows = channels * h; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalRows + blockSize1D - 1) / blockSize1D; hipLaunchKernelGGL(( accumulateColsInplaceTransposedKernel) , dim3(gridSize1D), dim3(blockSize1D), 0, 0, tmp, channels, h, w); hipblasSgeam( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, w+1, channels * (h+1), CUDA_ONE_FLOAT, tmp, channels * (h+1), CUDA_ZERO_FLOAT, output, w+1, output, w+1); } /* extern "C" void integralImageInplaceCuda(float *input, float *output, int channels, int h, int w) { int blockSize1D, gridSize1D; int totalCols = channels * w; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalCols + blockSize1D - 1) / blockSize1D; hipLaunchKernelGGL(( accumulateColsKernel) , dim3(gridSize1D), dim3(blockSize1D), 0, 0, input, output, channels, h, w); inplace::transpose(true, output, channels * (h+1), w+1); int totalRows = channels * h; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalRows + blockSize1D - 1) / blockSize1D; hipLaunchKernelGGL(( accumulateColsInplaceTransposedKernel) , dim3(gridSize1D), dim3(blockSize1D), 0, 0, output, channels, h, w); inplace::transpose(true, output, w+1, channels * (h+1)); } */ __global__ void accumulateRowsKernel( float *input, float *output, int channels, int h, int w) { // view multichannel image as a multiline single-channel image int globalRowIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (globalRowIdx < channels * h) { float *outputRow = output + (globalRowIdx + globalRowIdx / h + 1) * (w+1) + 1; outputRow[-1] = 0; double sum = 0; for (int i = 0; i < w; ++i) { sum += input[globalRowIdx * w + i]; outputRow[i] = static_cast<float>(sum); } // need to zero the (0,0) corner of the output separately >:( output[(globalRowIdx / h) * (w+1) * (h+1)] = 0; } } __global__ void accumulateColsKernel(float *input, float *output, int channels, int h, int w) { // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * w) { // jump to current channel input += (colIdx / w) * h * w; output += (colIdx / w) * (h+1) * (w+1); colIdx %= w; // switch to local column index, ++colIdx; // it's 1-indexed because first output column is always zero output[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= h; ++i) { sum += static_cast<double>(input[(i-1) * w + colIdx - 1]); output[i * (w+1) + colIdx] = static_cast<float>(sum); } } } __global__ void accumulateColsInplaceTransposedKernel(float *input, int channels, int h, int w) { // in-place. // input is a `(w+1) x channels * (h+1)` array // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * h) { // need to zero the (0,0) corner of the output separately >:( input[(colIdx / h) * (h+1)] = 0; colIdx += colIdx / h + 1; // make `colIdx` the (h+1)-array indexer input[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= w; ++i) { float *currentElement = &input[i * channels * (h+1) + colIdx]; sum += static_cast<double>(*currentElement); *currentElement = static_cast<float>(sum); } } } __global__ void accumulateColsInplaceKernel(float *input, int channels, int h, int w) { // in-place. // input is already a `channels * (h+1) x (w+1)` array // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * w) { input += (colIdx / w) * (h+1) * (w+1); // jump to current channel colIdx %= w; // switch to local column index, ++colIdx; // it's 1-indexed because first output column is always zero input[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= h; ++i) { float *currentElement = &input[i * (w+1) + colIdx]; sum += static_cast<double>(*currentElement); *currentElement = static_cast<float>(sum); } } } /************************ updateOutput ************************/ __global__ void forwardKernel( float *intData, float *outData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, float *areaCoeff) { int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; int z = BLOCK_CHANNELS * blockIdx.z + threadIdx.z; if (x < h and y < w and z < nWindows) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at indices // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. int t = max(0, min(x+(int)ceil (xMin[z] ), h) ); int b = max(0, min(x+(int)floor(xMax[z]+1), h) ); int l = max(0, min(y+(int)ceil (yMin[z] ), w) ); int r = max(0, min(y+(int)floor(yMax[z]+1), w) ); outData[z*w*h + x*w + y] = areaCoeff[z] * ( intData[b*(w+1) + r] - intData[t*(w+1) + r] - intData[b*(w+1) + l] + intData[t*(w+1) + l]); } } __global__ void forwardNoNormReplicateKernel( float *intData, int intDataStrideChannel, float *outData, int h, int w, int nInputPlane, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; const int z = BLOCK_CHANNELS * blockIdx.z + threadIdx.z; const int inPlaneIdx = z / nWindows; intData += intDataStrideChannel * inPlaneIdx; if (x < h and y < w and z < nInputPlane*nWindows) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int t = max(0, min(x+(int) ceil(xMin[z]) , h-1) ); const int b = max(1, min(x+(int)floor(xMax[z])+1, h ) ); const int l = max(0, min(y+(int) ceil(yMin[z]) , w-1) ); const int r = max(1, min(y+(int)floor(yMax[z])+1, w ) ); double outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; outData[z*w*h + x*w + y] = outValue; } } __global__ void forwardNoNormReplicateFracKernel( const float *intData, const int intDataStrideChannel, float *const outData, const int h, const int w, const int nInputPlane, const int nWindows, const float *xMin, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int inDataStrideChannel) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w; id /= w; const int x = id % h; id /= h; const int windowIdx = id % nWindows; id /= nWindows; const int & inPlaneIdx = id; intData += intDataStrideChannel * inPlaneIdx; inData += inDataStrideChannel * inPlaneIdx; if (x < h and y < w and windowIdx < nWindows and inPlaneIdx < nInputPlane) { const int rem = windowIdx % 4; const float xMinStretched = rem == 0 ? -h : xMin[inPlaneIdx*(nWindows-(nWindows-0+3) / 4) + 3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[inPlaneIdx*(nWindows-(nWindows-1+3) / 4) + 3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[inPlaneIdx*(nWindows-(nWindows-2+3) / 4) + 3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[inPlaneIdx*(nWindows-(nWindows-3+3) / 4) + 3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int xMinCurr = (int)ceil(xMinStretched); const float xMinCurrFrac = (float)xMinCurr - xMinStretched; const int yMinCurr = (int)ceil(yMinStretched); const float yMinCurrFrac = (float)yMinCurr - yMinStretched; const float xMaxCurrFrac = xMaxStretched - floor(xMaxStretched); const int xMaxCurr = (int)floor(xMaxStretched) + 1; const float yMaxCurrFrac = yMaxStretched - floor(yMaxStretched); const int yMaxCurr = (int)floor(yMaxStretched) + 1; const int t = max(0, min(x+xMinCurr, h-1) ); const int b = max(1, min(x+xMaxCurr, h) ); const int l = max(0, min(y+yMinCurr, w-1) ); const int r = max(1, min(y+yMaxCurr, w) ); double outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; // -- xMax border outValue += ( intData[max(1,min(x+xMaxCurr+1,h))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(1,min(x+xMaxCurr+1,h))*(w+1) + max(0,min(y+yMinCurr,w-1))] + intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(0,min(y+yMinCurr,w-1))] ) * xMaxCurrFrac; // -- yMax border outValue += ( intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(1,min(y+yMaxCurr+1,w))] - intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(1,min(y+yMaxCurr+1,w))] + intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(1,min(y+yMaxCurr,w))] ) * yMaxCurrFrac; // -- xMin border outValue += ( intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(0,min(x+xMinCurr-1,h-1))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(0,min(y+yMinCurr,w-1))] + intData[max(0,min(x+xMinCurr-1,h-1))*(w+1) + max(0,min(y+yMinCurr,w-1))] ) * xMinCurrFrac; // -- yMin border outValue += ( intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(0,min(y+yMinCurr,w-1))] - intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(0,min(y+yMinCurr-1,w-1))] - intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(0,min(y+yMinCurr,w-1))] + intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(0,min(y+yMinCurr-1,w-1))] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x+xMaxCurr > h-1 or y+yMaxCurr > w-1 or x+xMaxCurr <= 0 or y+yMaxCurr <= 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMaxCurr)]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x+xMinCurr-1 >= h-1 or y+yMaxCurr > w-1 or x+xMinCurr-1 < 0 or y+yMaxCurr <= 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMaxCurr)]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x+xMaxCurr > h-1 or y+yMinCurr-1 >= w-1 or x+xMaxCurr <= 0 or y+yMinCurr-1 < 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMinCurr-1)]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x+xMinCurr-1 >= h-1 or y+yMinCurr-1 >= w-1 or x+xMinCurr-1 < 0 or y+yMinCurr-1 < 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMinCurr-1)]); outData[(inPlaneIdx*nWindows+windowIdx)*w*h + x*w + y] = outValue; } } extern "C" { void forwardCuda( float *intData, int h, int w, int nWindows, float *outData, float *xMin, float *xMax, float *yMin, float *yMax, float *areaCoeff) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid((h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y, (nWindows + dimBlock.z - 1) / dimBlock.z); hipLaunchKernelGGL(( forwardKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, outData, h, w, nWindows, xMin, xMax, yMin, yMax, areaCoeff); } void forwardNoNormReplicateCuda(THCState *state, float *intData, int intDataStrideChannel, float *outData, int h, int w, int nInputPlane, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::forwardNoNormReplicateCuda(state, intData, intDataStrideChannel, outData, 1, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, strideH, strideW); return; } // TODO: 1D grid dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y, (nInputPlane*nWindows + dimBlock.z - 1) / dimBlock.z); hipLaunchKernelGGL(( forwardNoNormReplicateKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, intDataStrideChannel, outData, h, w, nInputPlane, nWindows, xMin, xMax, yMin, yMax); } void forwardNoNormReplicateFracCuda(THCState *state, float *intData, int intDataStrideChannel, float *outData, int h, int w, int nInputPlane, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, float *inData, int inDataStrideRow, int inDataStrideChannel, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::forwardNoNormReplicateFracCuda(state, intData, intDataStrideChannel, outData, 1, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, inData, inDataStrideRow, inDataStrideChannel, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nInputPlane*nWindows*h*w + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( forwardNoNormReplicateFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, intDataStrideChannel, outData, h, w, nInputPlane, nWindows, xMin, xMax, yMin, yMax, inData, inDataStrideRow, inDataStrideChannel); } /************************ updateGradInput ************************/ __global__ void updateGradInputPlanewiseKernel( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { xMinCurr = (int)ceil(-xMax[windowIdx]); yMinCurr = (int)ceil(-yMax[windowIdx]); xMaxCurr = (int)floor(-xMin[windowIdx]) + 1; yMaxCurr = (int)floor(-yMin[windowIdx]) + 1; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(x+xMinCurr, h) ); const int b = max(0, min(x+xMaxCurr, h) ); const int l = max(0, min(y+yMinCurr, w) ); const int r = max(0, min(y+yMaxCurr, w) ); outValue += gradOutputIntData[b*(w+1) + r]; outValue -= gradOutputIntData[t*(w+1) + r]; outValue -= gradOutputIntData[b*(w+1) + l]; outValue += gradOutputIntData[t*(w+1) + l]; // go to the next channel gradOutputIntData += (h+1)*(w+1); } gradInputData[x*w + y] = outValue; } } __global__ void updateGradInputPlanewiseFracKernel( const float *gradOutputIntData, float *const gradInputData, const int h, const int w, const int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, const float *gradOutputData, const int gradOutputStrideRow, const int gradOutputStrideChannel) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { const int rem = windowIdx % 4; const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. xMinCurr = (int)ceil(-xMaxStretched); yMinCurr = (int)ceil(-yMaxStretched); const float xMinCurrFrac = (float)xMinCurr + xMaxStretched; const float yMinCurrFrac = (float)yMinCurr + yMaxStretched; xMaxCurr = (int)floor(-xMinStretched) + 1; yMaxCurr = (int)floor(-yMinStretched) + 1; const float xMaxCurrFrac = -xMinStretched + 1 - xMaxCurr; const float yMaxCurrFrac = -yMinStretched + 1 - yMaxCurr; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(x+xMinCurr, h) ); const int b = max(0, min(x+xMaxCurr, h) ); const int l = max(0, min(y+yMinCurr, w) ); const int r = max(0, min(y+yMaxCurr, w) ); const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t; const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b; const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l; const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r; // TODO: 1D grid outValue += gradOutputIntData[b*(w+1) + r]; outValue -= gradOutputIntData[t*(w+1) + r]; outValue -= gradOutputIntData[b*(w+1) + l]; outValue += gradOutputIntData[t*(w+1) + l]; // -- xMax border outValue += ( gradOutputIntData[bAdv*(w+1) + r] - gradOutputIntData[b *(w+1) + r] - gradOutputIntData[bAdv*(w+1) + l] + gradOutputIntData[b *(w+1) + l] ) * xMaxCurrFrac; // -- yMax border outValue += ( gradOutputIntData[b*(w+1) + rAdv] - gradOutputIntData[b*(w+1) + r ] - gradOutputIntData[t*(w+1) + rAdv] + gradOutputIntData[t*(w+1) + r ] ) * yMaxCurrFrac; // -- xMin border outValue += ( gradOutputIntData[t *(w+1) + r] - gradOutputIntData[tAdv*(w+1) + r] - gradOutputIntData[t *(w+1) + l] + gradOutputIntData[tAdv*(w+1) + l] ) * xMinCurrFrac; // -- yMin border outValue += ( gradOutputIntData[b*(w+1) + l ] - gradOutputIntData[b*(w+1) + lAdv] - gradOutputIntData[t*(w+1) + l ] + gradOutputIntData[t*(w+1) + lAdv] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x+xMaxCurr > h-1 or y+yMaxCurr > w-1 or x+xMaxCurr < 0 or y+yMaxCurr < 0 or b == bAdv or r == rAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + r]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMaxCurr > w-1 or x+xMinCurr-1 < 0 or y+yMaxCurr < 0 or t == tAdv or r == rAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + r]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x+xMaxCurr > h-1 or y+yMinCurr-1 > w-1 or x+xMaxCurr < 0 or y+yMinCurr-1 < 0 or b == bAdv or l == lAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + lAdv]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMinCurr-1 > w-1 or x+xMinCurr-1 < 0 or y+yMinCurr-1 < 0 or t == tAdv or l == lAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + lAdv]); // go to the next channel gradOutputIntData += (h+1)*(w+1); gradOutputData += gradOutputStrideChannel; } gradInputData[x*w + y] = outValue; } } void updateGradInputPlanewiseCuda( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::updateGradInputReplicatePlanewiseCuda( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( updateGradInputPlanewiseKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax); } void updateGradInputPlanewiseFracCuda( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::updateGradInputReplicatePlanewiseFracCuda( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, gradOutputData, gradOutputStrideRow, gradOutputStrideChannel, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); hipLaunchKernelGGL(( updateGradInputPlanewiseFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, gradOutputData, gradOutputStrideRow, gradOutputStrideChannel); } /************************ accGradParameters ************************/ __global__ void xMaxDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 1) { tmpArray[(x-1)*w + (y-1)] = 0; } else { // const float xMinStretched = rem == 0 ? -h : // xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; // const int xMinInt = (int)ceil(xMinStretched-1); // const float xMinFrac = xMinInt-xMinStretched+1; const int yMinInt = (int)ceil(yMinStretched-1); const float yMinFrac = yMinInt-yMinStretched+1; const int xMaxInt = (int)floor(xMaxStretched); // const float xMaxFrac = xMaxStretched-xMaxInt; const int yMaxInt = (int)floor(yMaxStretched); const float yMaxFrac = yMaxStretched-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[(x-1)*w + (y-1)] = delta; } } } __global__ void xMinDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 0) { tmpArray[(x-1)*w + (y-1)] = 0; } else { const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; // const float xMaxStretched = rem == 1 ? h : // xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; const int xMinInt = (int)ceil(xMinStretched-1); // const float xMinFrac = xMinInt-xMinStretched+1; const int yMinInt = (int)ceil(yMinStretched-1); const float yMinFrac = yMinInt-yMinStretched+1; // const int xMaxInt = (int)floor(xMaxStretched); // const float xMaxFrac = xMaxStretched-xMaxInt; const int yMaxInt = (int)floor(yMaxStretched); const float yMaxFrac = yMaxStretched-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[(x-1)*w + (y-1)] *= -delta; } } } __global__ void yMaxDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 3) { tmpArray[(x-1)*w + (y-1)] = 0; } else { const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; // const float yMinStretched = rem == 2 ? -w : // yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; const int xMinInt = (int)ceil(xMinStretched-1); const float xMinFrac = xMinInt-xMinStretched+1; // const int yMinInt = (int)ceil(yMinStretched-1); // const float yMinFrac = yMinInt-yMinStretched+1; const int xMaxInt = (int)floor(xMaxStretched); const float xMaxFrac = xMaxStretched-xMaxInt; const int yMaxInt = (int)floor(yMaxStretched); // const float yMaxFrac = yMaxStretched-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[(x-1)*w + (y-1)] = delta; } } } __global__ void yMinDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 2) { tmpArray[(x-1)*w + (y-1)] = 0; } else { const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; // const float yMaxStretched = rem == 3 ? w : // yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; const int xMinInt = (int)ceil(xMinStretched-1); const float xMinFrac = xMinInt-xMinStretched+1; const int yMinInt = (int)ceil(yMinStretched-1); // const float yMinFrac = yMinInt-yMinStretched+1; const int xMaxInt = (int)floor(xMaxStretched); const float xMaxFrac = xMaxStretched-xMaxInt; // const int yMaxInt = (int)floor(yMaxStretched); // const float yMaxFrac = yMaxStretched-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[(x-1)*w + (y-1)] *= -delta; } } } void backwardFracCuda( float *intData, float *tmpArray, int nWindows, int h, int w, float *xMin, float *xMax, float *yMin, float *yMax, float *inData, int inDataStrideRow, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::backwardReplicateFracCuda( intData, tmpArray, nWindows, h, w, xMin, xMax, yMin, yMax, inData, inDataStrideRow, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( xMaxDeltaIntegralFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 0*nWindows*h*w, nWindows, h, w, xMax, yMin, yMax, inData, inDataStrideRow); hipLaunchKernelGGL(( xMinDeltaIntegralFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 1*nWindows*h*w, nWindows, h, w, xMin, yMin, yMax, inData, inDataStrideRow); hipLaunchKernelGGL(( yMaxDeltaIntegralFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 2*nWindows*h*w, nWindows, h, w, xMin, xMax, yMax, inData, inDataStrideRow); hipLaunchKernelGGL(( yMinDeltaIntegralFracKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 3*nWindows*h*w, nWindows, h, w, xMin, xMax, yMin, inData, inDataStrideRow); } __global__ void xMaxDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; // const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[(x-1)*w + (y-1)] = delta; } } __global__ void xMinDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[(x-1)*w + (y-1)] *= -delta; } } __global__ void yMaxDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[(x-1)*w + (y-1)] = delta; } } __global__ void yMinDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); // const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[(x-1)*w + (y-1)] *= -delta; } } void backwardCuda( float *intData, float *tmpArray, int nWindows, int h, int w, float *xMin, float *xMax, float *yMin, float *yMax, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::backwardReplicateCuda( intData, tmpArray, nWindows, h, w, xMin, xMax, yMin, yMax, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( xMaxDeltaIntegralKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 0*nWindows*h*w, nWindows, h, w, xMax, yMin, yMax); hipLaunchKernelGGL(( xMinDeltaIntegralKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 1*nWindows*h*w, nWindows, h, w, xMin, yMin, yMax); hipLaunchKernelGGL(( yMaxDeltaIntegralKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 2*nWindows*h*w, nWindows, h, w, xMin, xMax, yMax); hipLaunchKernelGGL(( yMinDeltaIntegralKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, intData, tmpArray + 3*nWindows*h*w, nWindows, h, w, xMin, xMax, yMin); } __global__ void toBorderAddGradParamsKernel( const int nWindows, float *const gradXMax, float *const gradXMin, float *const gradYMax, float *const gradYMin, const float scale, const float *tmpArraySumGPU) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (id < 4*nWindows) { int paramIdx = id / nWindows; float *const gradParam = (float*[]){gradXMax, gradXMin, gradYMax, gradYMin}[paramIdx]; const int windowIdx = id % nWindows; const int rem = windowIdx % 4; // use streams, not this arithmetic insanity if ((5-rem) % 4 != paramIdx) { gradParam[3*(windowIdx/4) + (rem > (5-paramIdx) % 4 ? (rem-1) : rem)] += scale * tmpArraySumGPU[id]; } } } // TODO: hey...use streams...would you be USING STREAMS INSTEAD please! void toBorderAddGradParams( const int nWindows, float *const gradXMax, float *const gradXMin, float *const gradYMax, float *const gradYMin, const float scale, const float *const tmpArraySumGPU) { dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((4*nWindows + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( toBorderAddGradParamsKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, nWindows, gradXMax, gradXMin, gradYMax, gradYMin, scale, tmpArraySumGPU); } /************************ Other stuff ************************/ __global__ void dirtyFixWindowsKernel( float *const xMin, float *const xMax, float *const yMin, float *const yMax, const int nInputPlane, const int nWindows, const float h, const float w, const float minWidth) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const bool correctingY = id >= nInputPlane*nWindows; if (correctingY) { id -= nInputPlane*nWindows; } const int windowIdx = id % nWindows; id /= nWindows; const int & inPlaneIdx = id; const int rem = windowIdx % 4; const int xMinIdx = inPlaneIdx*(nWindows-(nWindows-0+3) / 4) + 3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem); const int xMaxIdx = inPlaneIdx*(nWindows-(nWindows-1+3) / 4) + 3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem); const int yMinIdx = inPlaneIdx*(nWindows-(nWindows-2+3) / 4) + 3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem); const int yMaxIdx = inPlaneIdx*(nWindows-(nWindows-3+3) / 4) + 3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem); if (inPlaneIdx < nInputPlane and windowIdx < nWindows) { float paramMin, paramMax; if (not correctingY) { if (rem == 2 or rem == 3) { paramMin = max(-h+1, min(h-1, xMin[xMinIdx])); paramMax = max(-h+1, min(h-1, xMax[xMaxIdx])); if (paramMin + minWidth - 0.99 > paramMax) { const float mean = 0.5 * (paramMin + paramMax); paramMin = mean - 0.5 * (minWidth - 0.9); paramMax = mean + 0.5 * (minWidth - 0.9); } xMin[xMinIdx] = paramMin; xMax[xMaxIdx] = paramMax; } else if (rem == 0) { xMax[xMaxIdx] = max(-h+1, min(h-1, xMax[xMaxIdx])); } else if (rem == 1) { xMin[xMinIdx] = max(-h+1, min(h-1, xMin[xMinIdx])); } } else { if (rem == 0 or rem == 1) { paramMin = max(-w+1, min(w-1, yMin[yMinIdx])); paramMax = max(-w+1, min(w-1, yMax[yMaxIdx])); if (paramMin + minWidth - 0.99 > paramMax) { const float mean = 0.5 * (paramMin + paramMax); paramMin = mean - 0.5 * (minWidth - 0.9); paramMax = mean + 0.5 * (minWidth - 0.9); } yMin[yMinIdx] = paramMin; yMax[yMaxIdx] = paramMax; } else if (rem == 2) { yMax[yMaxIdx] = max(-w+1, min(w-1, yMax[yMaxIdx])); } else if (rem == 3) { yMin[yMinIdx] = max(-w+1, min(w-1, yMin[yMinIdx])); } } } } void dirtyFixWindows( float *const xMin, float *const xMax, float *const yMin, float *const yMax, const int nInputPlane, const int nWindows, const int h, const int w, const float minWidth) { dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((2*nInputPlane*nWindows + dimBlock.x - 1) / dimBlock.x); hipLaunchKernelGGL(( dirtyFixWindowsKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, xMin, xMax, yMin, yMax, nInputPlane, nWindows, (float)h, (float)w, minWidth); } } // extern "C"
34b316479113b66e2f0f3e5f0dc6d8bbcecc2c76.cu
#include <iostream> #include <stdio.h> #include <assert.h> #include <algorithm> #include <cmath> #include <cuda_runtime.h> #include <cublas_v2.h> #include "integral-strided-cuda.hpp" #define BLOCK_SIZE 32 #define BLOCK_CHANNELS (1024 / (BLOCK_SIZE * BLOCK_SIZE)) using std::max; using std::min; using std::floor; using std::ceil; cublasHandle_t cublasHandle; float *CUDA_ZERO_FLOAT, *CUDA_ONE_FLOAT; // for cublas in device pointer mode extern "C" void _initCublasHandle() { if (cublasCreate(&cublasHandle) != CUBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); } cublasSetPointerMode(cublasHandle, CUBLAS_POINTER_MODE_DEVICE); // TODO: at shutdown, `cublasDestroy(handle);` // TODO: deallocate this! float zeroOne[] = {0, 1}; cudaMalloc((void**)&CUDA_ZERO_FLOAT, sizeof(zeroOne)); CUDA_ONE_FLOAT = CUDA_ZERO_FLOAT + 1; cudaMemcpy(CUDA_ZERO_FLOAT, zeroOne, sizeof(zeroOne), cudaMemcpyHostToDevice); } // TODO remove this code #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment away if needed. err = cudaDeviceSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } /************************ Integral image computation ************************/ __global__ void accumulateRowsKernel( float *input, float *output, int channels, int totalRows, int w); __global__ void accumulateColsKernel( float *input, float *output, int channels, int h, int w); __global__ void accumulateColsInplaceKernel( float *input, int channels, int h, int w); __global__ void accumulateColsInplaceTransposedKernel( float *input, int channels, int h, int w); extern "C" void integralImageCuda(float *input, float *output, int channels, int h, int w, float *tmp) { int blockSize1D, gridSize1D; int totalCols = channels * w; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalCols + blockSize1D - 1) / blockSize1D; accumulateColsKernel <<<gridSize1D, blockSize1D>>> (input, output, channels, h, w); cublasSgeam( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, channels * (h+1), w+1, CUDA_ONE_FLOAT, output, w+1, CUDA_ZERO_FLOAT, tmp, channels * (h+1), tmp, channels * (h+1)); int totalRows = channels * h; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalRows + blockSize1D - 1) / blockSize1D; accumulateColsInplaceTransposedKernel <<<gridSize1D, blockSize1D>>> (tmp, channels, h, w); cublasSgeam( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, w+1, channels * (h+1), CUDA_ONE_FLOAT, tmp, channels * (h+1), CUDA_ZERO_FLOAT, output, w+1, output, w+1); } /* extern "C" void integralImageInplaceCuda(float *input, float *output, int channels, int h, int w) { int blockSize1D, gridSize1D; int totalCols = channels * w; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalCols + blockSize1D - 1) / blockSize1D; accumulateColsKernel <<<gridSize1D, blockSize1D>>> (input, output, channels, h, w); inplace::transpose(true, output, channels * (h+1), w+1); int totalRows = channels * h; blockSize1D = BLOCK_SIZE * BLOCK_SIZE; gridSize1D = (totalRows + blockSize1D - 1) / blockSize1D; accumulateColsInplaceTransposedKernel <<<gridSize1D, blockSize1D>>> (output, channels, h, w); inplace::transpose(true, output, w+1, channels * (h+1)); } */ __global__ void accumulateRowsKernel( float *input, float *output, int channels, int h, int w) { // view multichannel image as a multiline single-channel image int globalRowIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (globalRowIdx < channels * h) { float *outputRow = output + (globalRowIdx + globalRowIdx / h + 1) * (w+1) + 1; outputRow[-1] = 0; double sum = 0; for (int i = 0; i < w; ++i) { sum += input[globalRowIdx * w + i]; outputRow[i] = static_cast<float>(sum); } // need to zero the (0,0) corner of the output separately >:( output[(globalRowIdx / h) * (w+1) * (h+1)] = 0; } } __global__ void accumulateColsKernel(float *input, float *output, int channels, int h, int w) { // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * w) { // jump to current channel input += (colIdx / w) * h * w; output += (colIdx / w) * (h+1) * (w+1); colIdx %= w; // switch to local column index, ++colIdx; // it's 1-indexed because first output column is always zero output[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= h; ++i) { sum += static_cast<double>(input[(i-1) * w + colIdx - 1]); output[i * (w+1) + colIdx] = static_cast<float>(sum); } } } __global__ void accumulateColsInplaceTransposedKernel(float *input, int channels, int h, int w) { // in-place. // input is a `(w+1) x channels * (h+1)` array // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * h) { // need to zero the (0,0) corner of the output separately >:( input[(colIdx / h) * (h+1)] = 0; colIdx += colIdx / h + 1; // make `colIdx` the (h+1)-array indexer input[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= w; ++i) { float *currentElement = &input[i * channels * (h+1) + colIdx]; sum += static_cast<double>(*currentElement); *currentElement = static_cast<float>(sum); } } } __global__ void accumulateColsInplaceKernel(float *input, int channels, int h, int w) { // in-place. // input is already a `channels * (h+1) x (w+1)` array // global column index (of all `channels * w` columns in this image) int colIdx = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (colIdx < channels * w) { input += (colIdx / w) * (h+1) * (w+1); // jump to current channel colIdx %= w; // switch to local column index, ++colIdx; // it's 1-indexed because first output column is always zero input[colIdx] = 0; // first element of every column is always zero double sum = 0; for (int i = 1; i <= h; ++i) { float *currentElement = &input[i * (w+1) + colIdx]; sum += static_cast<double>(*currentElement); *currentElement = static_cast<float>(sum); } } } /************************ updateOutput ************************/ __global__ void forwardKernel( float *intData, float *outData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, float *areaCoeff) { int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; int z = BLOCK_CHANNELS * blockIdx.z + threadIdx.z; if (x < h and y < w and z < nWindows) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at indices // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. int t = max(0, min(x+(int)ceil (xMin[z] ), h) ); int b = max(0, min(x+(int)floor(xMax[z]+1), h) ); int l = max(0, min(y+(int)ceil (yMin[z] ), w) ); int r = max(0, min(y+(int)floor(yMax[z]+1), w) ); outData[z*w*h + x*w + y] = areaCoeff[z] * ( intData[b*(w+1) + r] - intData[t*(w+1) + r] - intData[b*(w+1) + l] + intData[t*(w+1) + l]); } } __global__ void forwardNoNormReplicateKernel( float *intData, int intDataStrideChannel, float *outData, int h, int w, int nInputPlane, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; const int z = BLOCK_CHANNELS * blockIdx.z + threadIdx.z; const int inPlaneIdx = z / nWindows; intData += intDataStrideChannel * inPlaneIdx; if (x < h and y < w and z < nInputPlane*nWindows) { // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int t = max(0, min(x+(int) ceil(xMin[z]) , h-1) ); const int b = max(1, min(x+(int)floor(xMax[z])+1, h ) ); const int l = max(0, min(y+(int) ceil(yMin[z]) , w-1) ); const int r = max(1, min(y+(int)floor(yMax[z])+1, w ) ); double outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; outData[z*w*h + x*w + y] = outValue; } } __global__ void forwardNoNormReplicateFracKernel( const float *intData, const int intDataStrideChannel, float *const outData, const int h, const int w, const int nInputPlane, const int nWindows, const float *xMin, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow, const int inDataStrideChannel) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w; id /= w; const int x = id % h; id /= h; const int windowIdx = id % nWindows; id /= nWindows; const int & inPlaneIdx = id; intData += intDataStrideChannel * inPlaneIdx; inData += inDataStrideChannel * inPlaneIdx; if (x < h and y < w and windowIdx < nWindows and inPlaneIdx < nInputPlane) { const int rem = windowIdx % 4; const float xMinStretched = rem == 0 ? -h : xMin[inPlaneIdx*(nWindows-(nWindows-0+3) / 4) + 3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[inPlaneIdx*(nWindows-(nWindows-1+3) / 4) + 3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[inPlaneIdx*(nWindows-(nWindows-2+3) / 4) + 3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[inPlaneIdx*(nWindows-(nWindows-3+3) / 4) + 3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. const int xMinCurr = (int)ceil(xMinStretched); const float xMinCurrFrac = (float)xMinCurr - xMinStretched; const int yMinCurr = (int)ceil(yMinStretched); const float yMinCurrFrac = (float)yMinCurr - yMinStretched; const float xMaxCurrFrac = xMaxStretched - floor(xMaxStretched); const int xMaxCurr = (int)floor(xMaxStretched) + 1; const float yMaxCurrFrac = yMaxStretched - floor(yMaxStretched); const int yMaxCurr = (int)floor(yMaxStretched) + 1; const int t = max(0, min(x+xMinCurr, h-1) ); const int b = max(1, min(x+xMaxCurr, h) ); const int l = max(0, min(y+yMinCurr, w-1) ); const int r = max(1, min(y+yMaxCurr, w) ); double outValue = 0; outValue += intData[b*(w+1) + r]; outValue -= intData[t*(w+1) + r]; outValue -= intData[b*(w+1) + l]; outValue += intData[t*(w+1) + l]; // -- xMax border outValue += ( intData[max(1,min(x+xMaxCurr+1,h))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(1,min(x+xMaxCurr+1,h))*(w+1) + max(0,min(y+yMinCurr,w-1))] + intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(0,min(y+yMinCurr,w-1))] ) * xMaxCurrFrac; // -- yMax border outValue += ( intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(1,min(y+yMaxCurr+1,w))] - intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(1,min(y+yMaxCurr+1,w))] + intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(1,min(y+yMaxCurr,w))] ) * yMaxCurrFrac; // -- xMin border outValue += ( intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(0,min(x+xMinCurr-1,h-1))*(w+1) + max(1,min(y+yMaxCurr,w))] - intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(0,min(y+yMinCurr,w-1))] + intData[max(0,min(x+xMinCurr-1,h-1))*(w+1) + max(0,min(y+yMinCurr,w-1))] ) * xMinCurrFrac; // -- yMin border outValue += ( intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(0,min(y+yMinCurr,w-1))] - intData[max(1,min(x+xMaxCurr,h))*(w+1) + max(0,min(y+yMinCurr-1,w-1))] - intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(0,min(y+yMinCurr,w-1))] + intData[max(0,min(x+xMinCurr,h-1))*(w+1) + max(0,min(y+yMinCurr-1,w-1))] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x+xMaxCurr > h-1 or y+yMaxCurr > w-1 or x+xMaxCurr <= 0 or y+yMaxCurr <= 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMaxCurr)]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x+xMinCurr-1 >= h-1 or y+yMaxCurr > w-1 or x+xMinCurr-1 < 0 or y+yMaxCurr <= 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMaxCurr)]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x+xMaxCurr > h-1 or y+yMinCurr-1 >= w-1 or x+xMaxCurr <= 0 or y+yMinCurr-1 < 0) ? 0 : inData[(x+xMaxCurr)*inDataStrideRow + (y+yMinCurr-1)]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x+xMinCurr-1 >= h-1 or y+yMinCurr-1 >= w-1 or x+xMinCurr-1 < 0 or y+yMinCurr-1 < 0) ? 0 : inData[(x+xMinCurr-1)*inDataStrideRow + (y+yMinCurr-1)]); outData[(inPlaneIdx*nWindows+windowIdx)*w*h + x*w + y] = outValue; } } extern "C" { void forwardCuda( float *intData, int h, int w, int nWindows, float *outData, float *xMin, float *xMax, float *yMin, float *yMax, float *areaCoeff) { dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid((h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y, (nWindows + dimBlock.z - 1) / dimBlock.z); forwardKernel <<<dimGrid, dimBlock>>> (intData, outData, h, w, nWindows, xMin, xMax, yMin, yMax, areaCoeff); } void forwardNoNormReplicateCuda(THCState *state, float *intData, int intDataStrideChannel, float *outData, int h, int w, int nInputPlane, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::forwardNoNormReplicateCuda(state, intData, intDataStrideChannel, outData, 1, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, strideH, strideW); return; } // TODO: 1D grid dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y, (nInputPlane*nWindows + dimBlock.z - 1) / dimBlock.z); forwardNoNormReplicateKernel <<<dimGrid, dimBlock>>> ( intData, intDataStrideChannel, outData, h, w, nInputPlane, nWindows, xMin, xMax, yMin, yMax); } void forwardNoNormReplicateFracCuda(THCState *state, float *intData, int intDataStrideChannel, float *outData, int h, int w, int nInputPlane, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, float *inData, int inDataStrideRow, int inDataStrideChannel, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::forwardNoNormReplicateFracCuda(state, intData, intDataStrideChannel, outData, 1, nInputPlane, nWindows, h, w, xMin, xMax, yMin, yMax, inData, inDataStrideRow, inDataStrideChannel, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nInputPlane*nWindows*h*w + dimBlock.x - 1) / dimBlock.x); forwardNoNormReplicateFracKernel <<<dimGrid, dimBlock>>> ( intData, intDataStrideChannel, outData, h, w, nInputPlane, nWindows, xMin, xMax, yMin, yMax, inData, inDataStrideRow, inDataStrideChannel); } /************************ updateGradInput ************************/ __global__ void updateGradInputPlanewiseKernel( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { xMinCurr = (int)ceil(-xMax[windowIdx]); yMinCurr = (int)ceil(-yMax[windowIdx]); xMaxCurr = (int)floor(-xMin[windowIdx]) + 1; yMaxCurr = (int)floor(-yMin[windowIdx]) + 1; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(x+xMinCurr, h) ); const int b = max(0, min(x+xMaxCurr, h) ); const int l = max(0, min(y+yMinCurr, w) ); const int r = max(0, min(y+yMaxCurr, w) ); outValue += gradOutputIntData[b*(w+1) + r]; outValue -= gradOutputIntData[t*(w+1) + r]; outValue -= gradOutputIntData[b*(w+1) + l]; outValue += gradOutputIntData[t*(w+1) + l]; // go to the next channel gradOutputIntData += (h+1)*(w+1); } gradInputData[x*w + y] = outValue; } } __global__ void updateGradInputPlanewiseFracKernel( const float *gradOutputIntData, float *const gradInputData, const int h, const int w, const int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, const float *gradOutputData, const int gradOutputStrideRow, const int gradOutputStrideChannel) { const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y; if (x < h and y < w) { int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr; double outValue = 0; for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) { const int rem = windowIdx % 4; const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; // Must add 1 to xMax/yMax/xMin/yMin due to OpenCV's // `integral()` behavior. Namely, I(x,0) and I(0,y) are // always 0 (so it's a C-style array sum). // However, when computing sums, we subtract values at points // like y+yMin-1 and x+xMin-1, so we also SUBTRACT 1 from xMin // and yMin, and thus finally they are not affected. xMinCurr = (int)ceil(-xMaxStretched); yMinCurr = (int)ceil(-yMaxStretched); const float xMinCurrFrac = (float)xMinCurr + xMaxStretched; const float yMinCurrFrac = (float)yMinCurr + yMaxStretched; xMaxCurr = (int)floor(-xMinStretched) + 1; yMaxCurr = (int)floor(-yMinStretched) + 1; const float xMaxCurrFrac = -xMinStretched + 1 - xMaxCurr; const float yMaxCurrFrac = -yMinStretched + 1 - yMaxCurr; // The following code block implements these lines // as if they were executed simultaneously (see `void updateGradInputFrac()`): // xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr); // xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr); // yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr); // yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr); bool needToChangeMin, needToChangeMax; needToChangeMin = x == 0 and xMaxCurr >= 0; needToChangeMax = x == h-1 and xMinCurr <= 0; if (needToChangeMin) xMinCurr = 0; if (needToChangeMax) xMaxCurr = h+66; needToChangeMin = y == 0 and yMaxCurr >= 0; needToChangeMax = y == w-1 and yMinCurr <= 0; if (needToChangeMin) yMinCurr = 0; if (needToChangeMax) yMaxCurr = w+66; const int t = max(0, min(x+xMinCurr, h) ); const int b = max(0, min(x+xMaxCurr, h) ); const int l = max(0, min(y+yMinCurr, w) ); const int r = max(0, min(y+yMaxCurr, w) ); const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t; const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b; const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l; const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r; // TODO: 1D grid outValue += gradOutputIntData[b*(w+1) + r]; outValue -= gradOutputIntData[t*(w+1) + r]; outValue -= gradOutputIntData[b*(w+1) + l]; outValue += gradOutputIntData[t*(w+1) + l]; // -- xMax border outValue += ( gradOutputIntData[bAdv*(w+1) + r] - gradOutputIntData[b *(w+1) + r] - gradOutputIntData[bAdv*(w+1) + l] + gradOutputIntData[b *(w+1) + l] ) * xMaxCurrFrac; // -- yMax border outValue += ( gradOutputIntData[b*(w+1) + rAdv] - gradOutputIntData[b*(w+1) + r ] - gradOutputIntData[t*(w+1) + rAdv] + gradOutputIntData[t*(w+1) + r ] ) * yMaxCurrFrac; // -- xMin border outValue += ( gradOutputIntData[t *(w+1) + r] - gradOutputIntData[tAdv*(w+1) + r] - gradOutputIntData[t *(w+1) + l] + gradOutputIntData[tAdv*(w+1) + l] ) * xMinCurrFrac; // -- yMin border outValue += ( gradOutputIntData[b*(w+1) + l ] - gradOutputIntData[b*(w+1) + lAdv] - gradOutputIntData[t*(w+1) + l ] + gradOutputIntData[t*(w+1) + lAdv] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x+xMaxCurr > h-1 or y+yMaxCurr > w-1 or x+xMaxCurr < 0 or y+yMaxCurr < 0 or b == bAdv or r == rAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + r]); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMaxCurr > w-1 or x+xMinCurr-1 < 0 or y+yMaxCurr < 0 or t == tAdv or r == rAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + r]); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x+xMaxCurr > h-1 or y+yMinCurr-1 > w-1 or x+xMaxCurr < 0 or y+yMinCurr-1 < 0 or b == bAdv or l == lAdv) ? 0 : gradOutputData[b*gradOutputStrideRow + lAdv]); outValue += xMinCurrFrac*yMinCurrFrac * ( (x+xMinCurr-1 > h-1 or y+yMinCurr-1 > w-1 or x+xMinCurr-1 < 0 or y+yMinCurr-1 < 0 or t == tAdv or l == lAdv) ? 0 : gradOutputData[tAdv*gradOutputStrideRow + lAdv]); // go to the next channel gradOutputIntData += (h+1)*(w+1); gradOutputData += gradOutputStrideChannel; } gradInputData[x*w + y] = outValue; } } void updateGradInputPlanewiseCuda( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::updateGradInputReplicatePlanewiseCuda( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); updateGradInputPlanewiseKernel <<<dimGrid, dimBlock>>> ( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax); } void updateGradInputPlanewiseFracCuda( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax, float *gradOutputData, int gradOutputStrideRow, int gradOutputStrideChannel, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::updateGradInputReplicatePlanewiseFracCuda( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, gradOutputData, gradOutputStrideRow, gradOutputStrideChannel, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, BLOCK_CHANNELS); dim3 dimGrid( (h + dimBlock.x - 1) / dimBlock.x, (w + dimBlock.y - 1) / dimBlock.y); updateGradInputPlanewiseFracKernel <<<dimGrid, dimBlock>>> ( gradOutputIntData, gradInputData, h, w, nWindows, xMin, xMax, yMin, yMax, gradOutputData, gradOutputStrideRow, gradOutputStrideChannel); } /************************ accGradParameters ************************/ __global__ void xMaxDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 1) { tmpArray[(x-1)*w + (y-1)] = 0; } else { // const float xMinStretched = rem == 0 ? -h : // xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; // const int xMinInt = (int)ceil(xMinStretched-1); // const float xMinFrac = xMinInt-xMinStretched+1; const int yMinInt = (int)ceil(yMinStretched-1); const float yMinFrac = yMinInt-yMinStretched+1; const int xMaxInt = (int)floor(xMaxStretched); // const float xMaxFrac = xMaxStretched-xMaxInt; const int yMaxInt = (int)floor(yMaxStretched); const float yMaxFrac = yMaxStretched-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += brCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += blCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[(x-1)*w + (y-1)] = delta; } } } __global__ void xMinDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 0) { tmpArray[(x-1)*w + (y-1)] = 0; } else { const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; // const float xMaxStretched = rem == 1 ? h : // xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; const int xMinInt = (int)ceil(xMinStretched-1); // const float xMinFrac = xMinInt-xMinStretched+1; const int yMinInt = (int)ceil(yMinStretched-1); const float yMinFrac = yMinInt-yMinStretched+1; // const int xMaxInt = (int)floor(xMaxStretched); // const float xMaxFrac = xMaxStretched-xMaxInt; const int yMaxInt = (int)floor(yMaxStretched); const float yMaxFrac = yMaxStretched-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (y+yMaxInt < 1 ? 1.0f : yMaxFrac); delta += tlCorner * (y+yMinInt >= w ? 1.0f : yMinFrac); delta += intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[(x-1)*w + (y-1)] *= -delta; } } } __global__ void yMaxDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 3) { tmpArray[(x-1)*w + (y-1)] = 0; } else { const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; // const float yMinStretched = rem == 2 ? -w : // yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; const float yMaxStretched = rem == 3 ? w : yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; const int xMinInt = (int)ceil(xMinStretched-1); const float xMinFrac = xMinInt-xMinStretched+1; // const int yMinInt = (int)ceil(yMinStretched-1); // const float yMinFrac = yMinInt-yMinStretched+1; const int xMaxInt = (int)floor(xMaxStretched); const float xMaxFrac = xMaxStretched-xMaxInt; const int yMaxInt = (int)floor(yMaxStretched); // const float yMaxFrac = yMaxStretched-yMaxInt; // const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; // const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMinInt-1))]; const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += trCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += brCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[(x-1)*w + (y-1)] = delta; } } } __global__ void yMinDeltaIntegralFracKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin, const float *inData, const int inDataStrideRow) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int rem = windowIdx % 4; if (rem == 2) { tmpArray[(x-1)*w + (y-1)] = 0; } else { const float xMinStretched = rem == 0 ? -h : xMin[3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem)]; const float xMaxStretched = rem == 1 ? h : xMax[3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem)]; const float yMinStretched = rem == 2 ? -w : yMin[3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem)]; // const float yMaxStretched = rem == 3 ? w : // yMax[3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem)]; const int xMinInt = (int)ceil(xMinStretched-1); const float xMinFrac = xMinInt-xMinStretched+1; const int yMinInt = (int)ceil(yMinStretched-1); // const float yMinFrac = yMinInt-yMinStretched+1; const int xMaxInt = (int)floor(xMaxStretched); const float xMaxFrac = xMaxStretched-xMaxInt; // const int yMaxInt = (int)floor(yMaxStretched); // const float yMaxFrac = yMaxStretched-yMaxInt; const float tlCorner = y+yMinInt < 1 or x+xMinInt < 1 ? 0 : inData[ max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; const float blCorner = y+yMinInt < 1 or x+xMaxInt >= h ? 0 : inData[ max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + max(0,min(w-1,y+yMinInt-1))]; // const float trCorner = y+yMaxInt >= w or x+xMinInt < 1 ? 0 : // inData[ // max(0,min(h-1,x+xMinInt-1)) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; // const float brCorner = y+yMaxInt >= w or x+xMaxInt >= h ? 0 : // inData[ // max(0,min(h-1,x+xMaxInt )) * inDataStrideRow + // max(0,min(w-1,y+yMaxInt ))]; float delta = 0; delta += tlCorner * (x+xMinInt >= h ? 1.0f : xMinFrac); delta += blCorner * (x+xMaxInt < 1 ? 1.0f : xMaxFrac); delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[(x-1)*w + (y-1)] *= -delta; } } } void backwardFracCuda( float *intData, float *tmpArray, int nWindows, int h, int w, float *xMin, float *xMax, float *yMin, float *yMax, float *inData, int inDataStrideRow, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::backwardReplicateFracCuda( intData, tmpArray, nWindows, h, w, xMin, xMax, yMin, yMax, inData, inDataStrideRow, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x); xMaxDeltaIntegralFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 0*nWindows*h*w, nWindows, h, w, xMax, yMin, yMax, inData, inDataStrideRow); xMinDeltaIntegralFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 1*nWindows*h*w, nWindows, h, w, xMin, yMin, yMax, inData, inDataStrideRow); yMaxDeltaIntegralFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 2*nWindows*h*w, nWindows, h, w, xMin, xMax, yMax, inData, inDataStrideRow); yMinDeltaIntegralFracKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 3*nWindows*h*w, nWindows, h, w, xMin, xMax, yMin, inData, inDataStrideRow); } __global__ void xMaxDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMax, const float *yMin, const float *yMax) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; // const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(1,min(x+xMaxInt+1, h))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMaxInt , h))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMaxInt >= 1 and x+xMaxInt < h); tmpArray[(x-1)*w + (y-1)] = delta; } } __global__ void xMinDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); // const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMaxInt, w))]; delta -= intData[max(0,min(x+xMinInt , h-1))*(w+1) + max(0,min(y+yMinInt, w))]; delta += intData[max(0,min(x+xMinInt-1, h ))*(w+1) + max(0,min(y+yMinInt, w))]; delta *= (x+xMinInt >= 1 and x+xMinInt < h); tmpArray[(x-1)*w + (y-1)] *= -delta; } } __global__ void yMaxDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int xMinInt = (int)ceil(xMin[windowIdx]-1); // const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(1,min(y+yMaxInt+1, w))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMaxInt , w))]; delta *= (y+yMaxInt >= 1 and y+yMaxInt < w); tmpArray[(x-1)*w + (y-1)] = delta; } } __global__ void yMinDeltaIntegralKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMin) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const int y = id % w + 1; id /= w; // 1-indexed const int x = id % h + 1; id /= h; // 1-indexed const int & windowIdx = id; if (windowIdx < nWindows and x <= h and y <= w) { tmpArray += windowIdx * h * w; const int xMinInt = (int)ceil(xMin[windowIdx]-1); const int yMinInt = (int)ceil(yMin[windowIdx]-1); const int xMaxInt = (int)floor(xMax[windowIdx]); // const int yMaxInt = (int)floor(yMax[windowIdx]); float delta = 0; delta += intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta -= intData[max(0,min(x+xMaxInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta -= intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt , w ))]; delta += intData[max(0,min(x+xMinInt, h))*(w+1) + max(0,min(y+yMinInt-1, w-1))]; delta *= (y+yMinInt >= 1 and y+yMinInt < w); tmpArray[(x-1)*w + (y-1)] *= -delta; } } void backwardCuda( float *intData, float *tmpArray, int nWindows, int h, int w, float *xMin, float *xMax, float *yMin, float *yMax, const int strideH, const int strideW) { if (strideH != 1 or strideW != 1) { strided::backwardReplicateCuda( intData, tmpArray, nWindows, h, w, xMin, xMax, yMin, yMax, strideH, strideW); return; } dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((nWindows * h * w + dimBlock.x - 1) / dimBlock.x); xMaxDeltaIntegralKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 0*nWindows*h*w, nWindows, h, w, xMax, yMin, yMax); xMinDeltaIntegralKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 1*nWindows*h*w, nWindows, h, w, xMin, yMin, yMax); yMaxDeltaIntegralKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 2*nWindows*h*w, nWindows, h, w, xMin, xMax, yMax); yMinDeltaIntegralKernel <<<dimGrid, dimBlock>>> ( intData, tmpArray + 3*nWindows*h*w, nWindows, h, w, xMin, xMax, yMin); } __global__ void toBorderAddGradParamsKernel( const int nWindows, float *const gradXMax, float *const gradXMin, float *const gradYMax, float *const gradYMin, const float scale, const float *tmpArraySumGPU) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; if (id < 4*nWindows) { int paramIdx = id / nWindows; float *const gradParam = (float*[]){gradXMax, gradXMin, gradYMax, gradYMin}[paramIdx]; const int windowIdx = id % nWindows; const int rem = windowIdx % 4; // use streams, not this arithmetic insanity if ((5-rem) % 4 != paramIdx) { gradParam[3*(windowIdx/4) + (rem > (5-paramIdx) % 4 ? (rem-1) : rem)] += scale * tmpArraySumGPU[id]; } } } // TODO: hey...use streams...would you be USING STREAMS INSTEAD please! void toBorderAddGradParams( const int nWindows, float *const gradXMax, float *const gradXMin, float *const gradYMax, float *const gradYMin, const float scale, const float *const tmpArraySumGPU) { dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((4*nWindows + dimBlock.x - 1) / dimBlock.x); toBorderAddGradParamsKernel <<<dimGrid, dimBlock>>> ( nWindows, gradXMax, gradXMin, gradYMax, gradYMin, scale, tmpArraySumGPU); } /************************ Other stuff ************************/ __global__ void dirtyFixWindowsKernel( float *const xMin, float *const xMax, float *const yMin, float *const yMax, const int nInputPlane, const int nWindows, const float h, const float w, const float minWidth) { int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x; const bool correctingY = id >= nInputPlane*nWindows; if (correctingY) { id -= nInputPlane*nWindows; } const int windowIdx = id % nWindows; id /= nWindows; const int & inPlaneIdx = id; const int rem = windowIdx % 4; const int xMinIdx = inPlaneIdx*(nWindows-(nWindows-0+3) / 4) + 3*(windowIdx/4) + (rem > 0 ? (rem-1) : rem); const int xMaxIdx = inPlaneIdx*(nWindows-(nWindows-1+3) / 4) + 3*(windowIdx/4) + (rem > 1 ? (rem-1) : rem); const int yMinIdx = inPlaneIdx*(nWindows-(nWindows-2+3) / 4) + 3*(windowIdx/4) + (rem > 2 ? (rem-1) : rem); const int yMaxIdx = inPlaneIdx*(nWindows-(nWindows-3+3) / 4) + 3*(windowIdx/4) + (rem > 3 ? (rem-1) : rem); if (inPlaneIdx < nInputPlane and windowIdx < nWindows) { float paramMin, paramMax; if (not correctingY) { if (rem == 2 or rem == 3) { paramMin = max(-h+1, min(h-1, xMin[xMinIdx])); paramMax = max(-h+1, min(h-1, xMax[xMaxIdx])); if (paramMin + minWidth - 0.99 > paramMax) { const float mean = 0.5 * (paramMin + paramMax); paramMin = mean - 0.5 * (minWidth - 0.9); paramMax = mean + 0.5 * (minWidth - 0.9); } xMin[xMinIdx] = paramMin; xMax[xMaxIdx] = paramMax; } else if (rem == 0) { xMax[xMaxIdx] = max(-h+1, min(h-1, xMax[xMaxIdx])); } else if (rem == 1) { xMin[xMinIdx] = max(-h+1, min(h-1, xMin[xMinIdx])); } } else { if (rem == 0 or rem == 1) { paramMin = max(-w+1, min(w-1, yMin[yMinIdx])); paramMax = max(-w+1, min(w-1, yMax[yMaxIdx])); if (paramMin + minWidth - 0.99 > paramMax) { const float mean = 0.5 * (paramMin + paramMax); paramMin = mean - 0.5 * (minWidth - 0.9); paramMax = mean + 0.5 * (minWidth - 0.9); } yMin[yMinIdx] = paramMin; yMax[yMaxIdx] = paramMax; } else if (rem == 2) { yMax[yMaxIdx] = max(-w+1, min(w-1, yMax[yMaxIdx])); } else if (rem == 3) { yMin[yMinIdx] = max(-w+1, min(w-1, yMin[yMinIdx])); } } } } void dirtyFixWindows( float *const xMin, float *const xMax, float *const yMin, float *const yMax, const int nInputPlane, const int nWindows, const int h, const int w, const float minWidth) { dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE); dim3 dimGrid((2*nInputPlane*nWindows + dimBlock.x - 1) / dimBlock.x); dirtyFixWindowsKernel <<<dimGrid, dimBlock>>> ( xMin, xMax, yMin, yMax, nInputPlane, nWindows, (float)h, (float)w, minWidth); } } // extern "C"
37071e9412bea1801128ecfa046ed0d5810301b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_rcbrtf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = rcbrtf(x[id]); } }
37071e9412bea1801128ecfa046ed0d5810301b8.cu
#include "includes.h" /* * JCudaVec - Vector operations for JCuda * http://www.jcuda.org * * Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org */ extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the floating point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in floating-point. extern "C" // Round to nearest integer value in floating-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two floating point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the floating-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision floating-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision floating-point remainder. extern "C" __global__ void vec_rcbrtf (size_t n, float *result, float *x) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < n) { result[id] = rcbrtf(x[id]); } }
9fbf361abc051d9ac5fc351467858ba58c6dc0ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <cstdio> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" #include "utils/float_math.cuh" #include "utils/geometry_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; float dist; float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } __device__ float FloatMin3(const float p1, const float p2, const float p3) { return fminf(p1, fminf(p2, p3)); } __device__ float FloatMax3(const float p1, const float p2, const float p3) { return fmaxf(p1, fmaxf(p2, p3)); } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Check if the current point is oustside the triangle bounding box. return (pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face is facing away from the camera // 3. the face has very small face area // 4. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); // Check if the face is visible to the camera. const bool back_face = face_area < 0.0; const bool zero_face_area = (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); if (zmax < 0 || cull_backfaces && back_face || outside_bbox || zero_face_area) { return; } // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float3 p_bary_clip = !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); const float pz = p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; if (pz < 0) { return; // Face is behind the image plane. } // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the unclipped bary coordinates to determine if the point is inside the // face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordiantes of pixel. const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesNaiveCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_faces_packed_first_idx, const at::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int num_closest, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK( num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_faces_packed_first_idx_t{ mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int N = num_faces_per_mesh.size(0); // batch size. const int H = image_size; // Assume square images. const int W = image_size; const int K = num_closest; auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesNaiveCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, perspective_correct, clip_barycentric_coords, cull_backfaces, N, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 b_pp = !perspective_correct ? b_w : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); const float3 b_w_clip = !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; // TODO(T52813608) Add support for non-square images. auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); float3 grad_bary0 = grad_bary_f_sum; if (clip_barycentric_coords) { grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); } float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( b_w, v0.z, v1.z, v2.z, grad_bary0); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * b_w_clip.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * b_w_clip.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * b_w_clip.z + dz2_persp); } } } at::Tensor RasterizeMeshesBackwardCuda( const at::Tensor& face_verts, // (F, 3, 3) const at::Tensor& pix_to_face, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_bary, // (N, H, W, K, 3) const at::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords) { // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, pix_to_face_t{pix_to_face, "pix_to_face", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_bary_t{grad_bary, "grad_bary", 4}, grad_dists_t{grad_dists, "grad_dists", 5}; at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; at::checkAllSameGPU( c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); at::checkAllSameType( c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); if (grad_face_verts.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return grad_face_verts; } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesBackwardCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), pix_to_face.contiguous().data_ptr<int64_t>(), perspective_correct, clip_barycentric_coords, N, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_bary.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_face_verts.data_ptr<float>()); AT_CUDA_CHECK(hipGetLastError()); return grad_face_verts; } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesCoarseCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const int N, const int F, const int H, const int W, const int bin_size, const int chunk_size, const int max_faces_per_bin, int* faces_per_bin, int* bin_faces) { extern __shared__ char sbuf[]; const int M = max_faces_per_bin; const int num_bins = 1 + (W - 1) / bin_size; // Integer divide round up const float half_pix = 1.0f / W; // Size of half a pixel in NDC units // This is a boolean array of shape (num_bins, num_bins, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size); // Have each block handle a chunk of faces const int chunks_per_batch = 1 + (F - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; // batch index const int chunk_idx = chunk % chunks_per_batch; const int face_start_idx = chunk_idx * chunk_size; binmask.block_clear(); const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx]; const int64_t mesh_face_stop_idx = mesh_face_start_idx + num_faces_per_mesh[batch_idx]; // Have each thread handle a different face within the chunk for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) { const int f_idx = face_start_idx + f; // Check if face index corresponds to the mesh in the batch given by // batch_idx if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) { continue; } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Compute screen-space bbox for the triangle expanded by blur. float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius); float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius); float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius); float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius); float zmax = FloatMax3(v0.z, v1.z, v2.z); if (zmax < 0) { continue; // Face is behind the camera. } // Brute-force search over all bins; TODO(T54294966) something smarter. for (int by = 0; by < num_bins; ++by) { // Y coordinate of the top and bottom of the bin. // PixToNdc gives the location of the center of each pixel, so we // need to add/subtract a half pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const float bin_y_min = PixToNdc(by * bin_size, H) - half_pix; const float bin_y_max = PixToNdc((by + 1) * bin_size - 1, H) + half_pix; const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); for (int bx = 0; bx < num_bins; ++bx) { // X coordinate of the left and right of the bin. // Reverse ordering of x axis so that +X is left. const float bin_x_max = PixToNdc((bx + 1) * bin_size - 1, W) + half_pix; const float bin_x_min = PixToNdc(bx * bin_size, W) - half_pix; const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); if (y_overlap && x_overlap) { binmask.set(by, bx, f); } } } } __syncthreads(); // Now we have processed every face in the current chunk. We need to // count the number of faces in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) { const int by = byx / num_bins; const int bx = byx % num_bins; const int count = binmask.count(by, bx); const int faces_per_bin_idx = batch_idx * num_bins * num_bins + by * num_bins + bx; // This atomically increments the (global) number of faces found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_faces array for the // faces in the current chunk that fall into this bin. const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_faces. int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M + bx * M + start; for (int f = 0; f < chunk_size; ++f) { if (binmask.get(by, bx, f)) { // TODO(T54296346) find the correct method for handling errors in // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. // Either decrease bin size or increase max_faces_per_bin bin_faces[next_idx] = face_start_idx + f; next_idx++; } } } __syncthreads(); } } at::Tensor RasterizeMeshesCoarseCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_face_first_idx, const at::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int bin_size, const int max_faces_per_bin) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_face_first_idx_t{ mesh_to_face_first_idx, "mesh_to_face_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesCoarseCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int W = image_size; const int H = image_size; const int F = face_verts.size(0); const int N = num_faces_per_mesh.size(0); const int num_bins = 1 + (image_size - 1) / bin_size; // Divide round up. const int M = max_faces_per_bin; if (num_bins >= kMaxFacesPerBin) { std::stringstream ss; ss << "Got " << num_bins << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = num_faces_per_mesh.options().dtype(at::kInt); at::Tensor faces_per_bin = at::zeros({N, num_bins, num_bins}, opts); at::Tensor bin_faces = at::full({N, num_bins, num_bins, M}, -1, opts); if (bin_faces.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return bin_faces; } const int chunk_size = 512; const size_t shared_size = num_bins * num_bins * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; hipLaunchKernelGGL(( RasterizeMeshesCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, stream, face_verts.contiguous().data_ptr<float>(), mesh_to_face_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, N, F, H, W, bin_size, chunk_size, M, faces_per_bin.data_ptr<int32_t>(), bin_faces.data_ptr<int32_t>()); AT_CUDA_CHECK(hipGetLastError()); return bin_faces; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, B, B, T) const float blur_radius, const int bin_size, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int B, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists, // (N, S, S, K) float* bary // (N, S, S, K, 3) ) { // This can be more than S^2 if S % bin_size != 0 int num_pixels = N * B * B * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (B * B * bin_size * bin_size); i %= B * B * bin_size * bin_size; const int by = i / (B * bin_size * bin_size); i %= B * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * B * B * M + by * B * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * H * K + xidx * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesFineCuda( const at::Tensor& face_verts, const at::Tensor& bin_faces, const int image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, bin_faces_t{bin_faces, "bin_faces", 2}; at::CheckedFrom c = "RasterizeMeshesFineCuda"; at::checkAllSameGPU(c, {face_verts_t, bin_faces_t}); // Set the device for the kernel launch based on the device of the input at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); const int N = bin_faces.size(0); const int B = bin_faces.size(1); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = image_size; // Assume square images only. const int W = image_size; if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto long_opts = bin_faces.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesFineCudaKernel), dim3(blocks), dim3(threads), 0, stream, face_verts.contiguous().data_ptr<float>(), bin_faces.contiguous().data_ptr<int32_t>(), blur_radius, bin_size, perspective_correct, clip_barycentric_coords, cull_backfaces, N, B, M, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
9fbf361abc051d9ac5fc351467858ba58c6dc0ee.cu
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <cstdio> #include <tuple> #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" #include "utils/float_math.cuh" #include "utils/geometry_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; float dist; float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } __device__ float FloatMin3(const float p1, const float p2, const float p3) { return fminf(p1, fminf(p2, p3)); } __device__ float FloatMax3(const float p1, const float p2, const float p3) { return fmaxf(p1, fmaxf(p2, p3)); } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Check if the current point is oustside the triangle bounding box. return (pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face is facing away from the camera // 3. the face has very small face area // 4. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); // Check if the face is visible to the camera. const bool back_face = face_area < 0.0; const bool zero_face_area = (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); if (zmax < 0 || cull_backfaces && back_face || outside_bbox || zero_face_area) { return; } // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float3 p_bary_clip = !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); const float pz = p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; if (pz < 0) { return; // Face is behind the image plane. } // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the unclipped bary coordinates to determine if the point is inside the // face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordiantes of pixel. const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesNaiveCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_faces_packed_first_idx, const at::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int num_closest, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK( num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_faces_packed_first_idx_t{ mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_faces_packed_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int N = num_faces_per_mesh.size(0); // batch size. const int H = image_size; // Assume square images. const int W = image_size; const int K = num_closest; auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesNaiveCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), mesh_to_faces_packed_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, perspective_correct, clip_barycentric_coords, cull_backfaces, N, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 b_pp = !perspective_correct ? b_w : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); const float3 b_w_clip = !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; // TODO(T52813608) Add support for non-square images. auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); float3 grad_bary0 = grad_bary_f_sum; if (clip_barycentric_coords) { grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); } float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( b_w, v0.z, v1.z, v2.z, grad_bary0); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * b_w_clip.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * b_w_clip.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * b_w_clip.z + dz2_persp); } } } at::Tensor RasterizeMeshesBackwardCuda( const at::Tensor& face_verts, // (F, 3, 3) const at::Tensor& pix_to_face, // (N, H, W, K) const at::Tensor& grad_zbuf, // (N, H, W, K) const at::Tensor& grad_bary, // (N, H, W, K, 3) const at::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct, const bool clip_barycentric_coords) { // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, pix_to_face_t{pix_to_face, "pix_to_face", 2}, grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, grad_bary_t{grad_bary, "grad_bary", 4}, grad_dists_t{grad_dists, "grad_dists", 5}; at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; at::checkAllSameGPU( c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); at::checkAllSameType( c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); if (grad_face_verts.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_face_verts; } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesBackwardCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), pix_to_face.contiguous().data_ptr<int64_t>(), perspective_correct, clip_barycentric_coords, N, H, W, K, grad_zbuf.contiguous().data_ptr<float>(), grad_bary.contiguous().data_ptr<float>(), grad_dists.contiguous().data_ptr<float>(), grad_face_verts.data_ptr<float>()); AT_CUDA_CHECK(cudaGetLastError()); return grad_face_verts; } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesCoarseCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const int N, const int F, const int H, const int W, const int bin_size, const int chunk_size, const int max_faces_per_bin, int* faces_per_bin, int* bin_faces) { extern __shared__ char sbuf[]; const int M = max_faces_per_bin; const int num_bins = 1 + (W - 1) / bin_size; // Integer divide round up const float half_pix = 1.0f / W; // Size of half a pixel in NDC units // This is a boolean array of shape (num_bins, num_bins, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size); // Have each block handle a chunk of faces const int chunks_per_batch = 1 + (F - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; // batch index const int chunk_idx = chunk % chunks_per_batch; const int face_start_idx = chunk_idx * chunk_size; binmask.block_clear(); const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx]; const int64_t mesh_face_stop_idx = mesh_face_start_idx + num_faces_per_mesh[batch_idx]; // Have each thread handle a different face within the chunk for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) { const int f_idx = face_start_idx + f; // Check if face index corresponds to the mesh in the batch given by // batch_idx if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) { continue; } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Compute screen-space bbox for the triangle expanded by blur. float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius); float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius); float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius); float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius); float zmax = FloatMax3(v0.z, v1.z, v2.z); if (zmax < 0) { continue; // Face is behind the camera. } // Brute-force search over all bins; TODO(T54294966) something smarter. for (int by = 0; by < num_bins; ++by) { // Y coordinate of the top and bottom of the bin. // PixToNdc gives the location of the center of each pixel, so we // need to add/subtract a half pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const float bin_y_min = PixToNdc(by * bin_size, H) - half_pix; const float bin_y_max = PixToNdc((by + 1) * bin_size - 1, H) + half_pix; const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); for (int bx = 0; bx < num_bins; ++bx) { // X coordinate of the left and right of the bin. // Reverse ordering of x axis so that +X is left. const float bin_x_max = PixToNdc((bx + 1) * bin_size - 1, W) + half_pix; const float bin_x_min = PixToNdc(bx * bin_size, W) - half_pix; const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); if (y_overlap && x_overlap) { binmask.set(by, bx, f); } } } } __syncthreads(); // Now we have processed every face in the current chunk. We need to // count the number of faces in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) { const int by = byx / num_bins; const int bx = byx % num_bins; const int count = binmask.count(by, bx); const int faces_per_bin_idx = batch_idx * num_bins * num_bins + by * num_bins + bx; // This atomically increments the (global) number of faces found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_faces array for the // faces in the current chunk that fall into this bin. const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_faces. int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M + bx * M + start; for (int f = 0; f < chunk_size; ++f) { if (binmask.get(by, bx, f)) { // TODO(T54296346) find the correct method for handling errors in // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. // Either decrease bin size or increase max_faces_per_bin bin_faces[next_idx] = face_start_idx + f; next_idx++; } } } __syncthreads(); } } at::Tensor RasterizeMeshesCoarseCuda( const at::Tensor& face_verts, const at::Tensor& mesh_to_face_first_idx, const at::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int bin_size, const int max_faces_per_bin) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, mesh_to_face_first_idx_t{ mesh_to_face_first_idx, "mesh_to_face_first_idx", 2}, num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; at::CheckedFrom c = "RasterizeMeshesCoarseCuda"; at::checkAllSameGPU( c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int W = image_size; const int H = image_size; const int F = face_verts.size(0); const int N = num_faces_per_mesh.size(0); const int num_bins = 1 + (image_size - 1) / bin_size; // Divide round up. const int M = max_faces_per_bin; if (num_bins >= kMaxFacesPerBin) { std::stringstream ss; ss << "Got " << num_bins << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = num_faces_per_mesh.options().dtype(at::kInt); at::Tensor faces_per_bin = at::zeros({N, num_bins, num_bins}, opts); at::Tensor bin_faces = at::full({N, num_bins, num_bins, M}, -1, opts); if (bin_faces.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return bin_faces; } const int chunk_size = 512; const size_t shared_size = num_bins * num_bins * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; RasterizeMeshesCoarseCudaKernel<<<blocks, threads, shared_size, stream>>>( face_verts.contiguous().data_ptr<float>(), mesh_to_face_first_idx.contiguous().data_ptr<int64_t>(), num_faces_per_mesh.contiguous().data_ptr<int64_t>(), blur_radius, N, F, H, W, bin_size, chunk_size, M, faces_per_bin.data_ptr<int32_t>(), bin_faces.data_ptr<int32_t>()); AT_CUDA_CHECK(cudaGetLastError()); return bin_faces; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, B, B, T) const float blur_radius, const int bin_size, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces, const int N, const int B, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists, // (N, S, S, K) float* bary // (N, S, S, K, 3) ) { // This can be more than S^2 if S % bin_size != 0 int num_pixels = N * B * B * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (B * B * bin_size * bin_size); i %= B * B * bin_size * bin_size; const int by = i / (B * bin_size * bin_size); i %= B * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * B * B * M + by * B * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct, clip_barycentric_coords, cull_backfaces); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const int pix_idx = n * H * W * K + yidx * H * K + xidx * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> RasterizeMeshesFineCuda( const at::Tensor& face_verts, const at::Tensor& bin_faces, const int image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct, const bool clip_barycentric_coords, const bool cull_backfaces) { TORCH_CHECK( face_verts.ndimension() == 3 && face_verts.size(1) == 3 && face_verts.size(2) == 3, "face_verts must have dimensions (num_faces, 3, 3)"); TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); // Check inputs are on the same device at::TensorArg face_verts_t{face_verts, "face_verts", 1}, bin_faces_t{bin_faces, "bin_faces", 2}; at::CheckedFrom c = "RasterizeMeshesFineCuda"; at::checkAllSameGPU(c, {face_verts_t, bin_faces_t}); // Set the device for the kernel launch based on the device of the input at::cuda::CUDAGuard device_guard(face_verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const int N = bin_faces.size(0); const int B = bin_faces.size(1); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = image_size; // Assume square images only. const int W = image_size; if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 150"); } auto long_opts = bin_faces.options().dtype(at::kLong); auto float_opts = face_verts.options().dtype(at::kFloat); at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); if (face_idxs.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesFineCudaKernel<<<blocks, threads, 0, stream>>>( face_verts.contiguous().data_ptr<float>(), bin_faces.contiguous().data_ptr<int32_t>(), blur_radius, bin_size, perspective_correct, clip_barycentric_coords, cull_backfaces, N, B, M, H, W, K, face_idxs.data_ptr<int64_t>(), zbuf.data_ptr<float>(), pix_dists.data_ptr<float>(), bary.data_ptr<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
6ac8d18109244b324071278e20ae7425756a8906.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> #include <unistd.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <string> /* Instrues COMPILAR --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencvKarma.cu -o go `pkg-config --cflags --libs opencv` -w EXECUTAR --> ./go DOMAIN_DIMS STENCIL_ORDER SPACE_TIME_BLOCK_TIMES BLOCK_DIM_X BLOCK_DIM_Y */ using namespace std; //===> CONSTANTES karma model <===// #ifndef MODEL_WIDTH #define MODEL_WIDTH 0 #endif #define Eh 3.0f #define En 1.0f #define Re 0.6f #define tauE 5.0f #define tauN 250.0f #define gam 0.001f #define East 1.5415f #define DT 0.05f #define DX (12.0f / MODEL_WIDTH) /* Funo somente da GPU que recebe os parametros para o calculo de um stencil d_e - dado de entrada d_r - dado de saida d_v - campo que deve ser atualizado c_coeff - varivel utilizada para armazenar o valores dos coeficcientes do stencil (utilizada apenas na verso com stencil simples usado anteriormente) X - Y - Dimenses das estruturas de entrada k - ordem do stencil x -y - posio do centro do stencil na estrutura de entrada GX - Dimenso horizontal da estrutura do dado de sada Gx - Gy posio do centro do stencil na estrutura de saida */ __device__ void _2Dstencil_(float *d_e, float *d_r, float *d_v, int X, int x, int y, int GX, int Gx, int Gy) { int h_e_i = x + (y * (X)); float temp = d_e[h_e_i]; float rv = d_v[h_e_i]; float Rn = (1.0f / (1.0f - expf(-Re))) - rv; float p = (temp > En) * 1.0f; float dv = (Rn * p - (1.0f - p) * rv) / tauN; float Dn = rv * rv; float hE = (1.0f - tanh(temp - Eh)) * temp * temp / 2.0f; float du = (((East - Dn) * hE) - temp) / tauE; float xlapr = d_e[(x + 1) + ((y) * (X))] - temp; float xlapl = temp - d_e[(x - 1) + ((y) * (X))]; float xlapf = d_e[(x) + ((y + 1) * (X))] - temp; float xlapb = temp - d_e[(x) + ((y - 1) * (X))]; float lap = xlapr - xlapl + xlapf - xlapb; temp = (temp + (du * DT) + (lap * DT * gam / (DX * DX))); d_v[h_e_i] = rv + dv * DT; h_e_i = Gx + ((Gy) * (GX)); d_r[h_e_i] = temp; } /* funo chamada pelo host que controla as cpias e a ordem do calculo dos stencils bem como a carga para cada thread */ __global__ void _2Dstencil_global(float *d_e, float *d_r, float *d_v, int X, int Y, int times) { int x, y; //,h_e_i,h_r_i,Xs,Ys,Dx,Dy; x = threadIdx.x + (blockIdx.x * blockDim.x); y = threadIdx.y + (blockIdx.y * blockDim.y); extern __shared__ float sharedOrig[]; int blockThreadIndex = threadIdx.x + threadIdx.y * blockDim.x; // Xs = threadIdx.x; // Ys = threadIdx.y; int Dx = blockDim.x + (2 * times); int Dy = blockDim.y + (2 * times); int sharedTam = Dx * Dy; float * shared = sharedOrig; float * sharedRes = shared + sharedTam; float * sharedV = sharedRes + sharedTam; //float * sharedRes = &shared[sharedTam]; //float *sharedV = &sharedRes[sharedTam]; /* Copia o Tile de memria compartilhada necessria para a configurao de tempo desejada Stride utilizado pois a quantidade de elementos a serem copiados sempre maior que a quantidade de threads As bordas */ for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y)) { int sharedIdxX = stride % Dx; int sharedIdxY = int(stride / Dx); int globalIdxX =(blockIdx.x * blockDim.x) + sharedIdxX - times; int globalIdxY =(blockIdx.y * blockDim.y) + sharedIdxY - times; //int globalIdx = globalIdxX + (globalIdxX < 0) - (globalIdxX >= X) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y)) * X; int globalIdx = globalIdxX*(!(globalIdxX < 0 || globalIdxX >= X)) + (globalIdxX + (globalIdxX < 0) - (globalIdxX >= X))*((globalIdxX < 0 || globalIdxX >= X)) + (globalIdxY*(!(globalIdxY < 0 || globalIdxY >= Y)) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y))*((globalIdxY < 0 || globalIdxY >= Y))) * X; shared[stride] = d_e[globalIdx]; sharedV[stride] = d_v[globalIdx]; } __syncthreads(); /* Envia pra ser calculado todos os elementos alm do ultimo instante de tempo */ for (int t = 1; t < times; t++) { //_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2); int tDx = blockDim.x + ((times - t) * 2); int tDy = blockDim.y + ((times - t) * 2); int tk2 = (t); int tSharedTam = tDx * tDy; for (int stride = blockThreadIndex; stride < tSharedTam; stride += (blockDim.x * blockDim.y)) { _2Dstencil_(shared, sharedRes, sharedV, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2); } float * temp = shared; shared = sharedRes; sharedRes = temp; __syncthreads(); } /* Envia pra ser calculado todos os elementos do ultimo instante de tempo */ _2Dstencil_(shared, d_r, sharedV, Dx, ((x%(blockDim.x))+times), ((y%(blockDim.y))+times), X, x, y); __syncthreads(); int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx; int globalIdx = x + y * X; d_v[globalIdx] = sharedV[sharedIdx]; } int main(int argc, char *argv[]) { /* Declaraes e valores padroes */ float *h_e, *h_r, *h_v; float *d_e, *d_r, *d_v; int size, sharedSize; int X = 32; int Y = 32; int times = 1,globalTimes = 1; int BX = 32; int BY = 32; int GX = 1; int GY = 1; /* Obteno dos parmetros de entrada */ if (argc > 1) { X = atoi(argv[1]); Y = X; } if (argc > 2) { times = atoi(argv[2]); } if (argc > 3) { globalTimes = atoi(argv[3]); } if (X > 32) { if (argc > 4) BX = atoi(argv[4]); GX = ceil((float)X / (float)BX); BX = 32; } if (Y > 32) { if (argc > 5) BY = atoi(argv[5]); GY = ceil((float)Y / (float)BY); BY = 32; } /* Allocaes de memria e configurao dos blocos e grid */ dim3 block_dim(BX, BY, 1); dim3 grid_dim(GX, GY, 1); //sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int); sharedSize = ((block_dim.x + (2 * times)) * (block_dim.y + (2 * times))) * sizeof(float) * 3; //sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2))); size = X * Y * sizeof(float); //tam = X * Y; h_e = (float *)malloc(size); h_r = (float *)malloc(size); h_v = (float *)malloc(size); hipMalloc(&d_e, size); hipMalloc(&d_r, size); hipMalloc(&d_v, size); //Copia os dados do campo e envia para a GPU e inicializa o dominio de entrada FILE *arq; arq = fopen("entrada.txt", "rt"); for (int i = 0; i < X; i++) for (int j = 0; j < Y; j++) { h_v[i + j * X] =0.5f; int temp; fscanf(arq," %d",&temp); h_e[i + j * X] = temp; } fclose(arq); hipMemcpy(d_v, h_v, size, hipMemcpyHostToDevice); /* Copy vectors from host memory to device memory Copia os dados da entrada de volta a GPU */ hipMemcpy(d_e, h_e, size, hipMemcpyHostToDevice); /* Comea o Timer */ hipDeviceSynchronize(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); /****************** *** Kernel Call *** *******************/ //_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z); /* Executa o kernel */ for(int i=0; i<globalTimes/times; i ++) { hipLaunchKernelGGL(( _2Dstencil_global), dim3(grid_dim), dim3(block_dim), sharedSize, 0, d_e, d_r, d_v, X, Y, times); float * temp = d_e; d_e = d_r; d_r = temp; } /* Identifica possveis erros */ hipError_t err = hipSuccess; err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", hipGetErrorString(err)); } /****************** *** Kernel Call *** *******************/ hipDeviceSynchronize(); /* Para o Timer */ hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf ("[%d,%.5f]",times,elapsedTime); // arq = fopen("TempoExecucaoBlocking12000VariandoTimes.txt", "a"); // //printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY); // // float sharedTime = 0.0; // // if(MODEL_WIDTH == 64) // // sharedTime = 108.41396; // // if(MODEL_WIDTH == 96) // // sharedTime = 89.01120; // // if(MODEL_WIDTH == 128) // // sharedTime = 95.11117; // // if(MODEL_WIDTH == 160) // // sharedTime = 113.37702; // // if(MODEL_WIDTH == 192) // // sharedTime = 101.13689; // // if(MODEL_WIDTH == 224) // // sharedTime = 154.31091; // // if(MODEL_WIDTH == 256) // // sharedTime = 186.73097; // // if(MODEL_WIDTH == 288) // // sharedTime = 218.92052; // // if(MODEL_WIDTH == 320) // // sharedTime = 232.28406; // // if(MODEL_WIDTH == 352) // // sharedTime = 295.31876; // // if(MODEL_WIDTH == 384) // // sharedTime = 304.94522; // // if(MODEL_WIDTH == 416) // // sharedTime = 385.76855; // // if(MODEL_WIDTH == 448) // // sharedTime = 570.88287; // // if(MODEL_WIDTH == 480) // // sharedTime = 701.02271; // // if(MODEL_WIDTH == 512) // // sharedTime = 768.65991; // // if(MODEL_WIDTH == 544) // // sharedTime = 881.91882; // // if(MODEL_WIDTH == 576) // // sharedTime = 979.11212; // // if(MODEL_WIDTH == 608) // // sharedTime = 1082.10193; // // if(MODEL_WIDTH == 640) // // sharedTime = 1188.77576; // // if(MODEL_WIDTH == 672) // // sharedTime = 1316.50024; // // if(MODEL_WIDTH == 704) // // sharedTime = 1436.11035; // // if(MODEL_WIDTH == 736) // // sharedTime = 1532.38489; // // if(MODEL_WIDTH == 768) // // sharedTime = 1576.36401; // fprintf (arq,"(%d,%.5f),\n",times,elapsedTime);//,sharedTime); // fclose(arq); /* Copia o resultado para a imagem de visualizao */ hipMemcpy(h_r, d_e, size, hipMemcpyDeviceToHost); arq = fopen("resultado.txt", "wt"); for (int i = 0; i < X; i++) { for (int j = 0; j < Y; j++) { fprintf(arq," %6.4f",h_r[i+j*X]); } fprintf(arq,"\n"); } fclose(arq); hipFree(d_e); hipFree(d_r); std::free(h_e); std::free(h_r); return 0; } /* main */
6ac8d18109244b324071278e20ae7425756a8906.cu
#include <stdio.h> #include <time.h> #include <unistd.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <string> /* Instruções COMPILAR --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencvKarma.cu -o go `pkg-config --cflags --libs opencv` -w EXECUTAR --> ./go DOMAIN_DIMS STENCIL_ORDER SPACE_TIME_BLOCK_TIMES BLOCK_DIM_X BLOCK_DIM_Y */ using namespace std; //===> CONSTANTES karma model <===// #ifndef MODEL_WIDTH #define MODEL_WIDTH 0 #endif #define Eh 3.0f #define En 1.0f #define Re 0.6f #define tauE 5.0f #define tauN 250.0f #define gam 0.001f #define East 1.5415f #define DT 0.05f #define DX (12.0f / MODEL_WIDTH) /* Função somente da GPU que recebe os parametros para o calculo de um stencil d_e - dado de entrada d_r - dado de saida d_v - campo que deve ser atualizado c_coeff - variável utilizada para armazenar o valores dos coeficcientes do stencil (utilizada apenas na versão com stencil simples usado anteriormente) X - Y - Dimensões das estruturas de entrada k - ordem do stencil x -y - posição do centro do stencil na estrutura de entrada GX - Dimensão horizontal da estrutura do dado de saída Gx - Gy posição do centro do stencil na estrutura de saida */ __device__ void _2Dstencil_(float *d_e, float *d_r, float *d_v, int X, int x, int y, int GX, int Gx, int Gy) { int h_e_i = x + (y * (X)); float temp = d_e[h_e_i]; float rv = d_v[h_e_i]; float Rn = (1.0f / (1.0f - expf(-Re))) - rv; float p = (temp > En) * 1.0f; float dv = (Rn * p - (1.0f - p) * rv) / tauN; float Dn = rv * rv; float hE = (1.0f - tanh(temp - Eh)) * temp * temp / 2.0f; float du = (((East - Dn) * hE) - temp) / tauE; float xlapr = d_e[(x + 1) + ((y) * (X))] - temp; float xlapl = temp - d_e[(x - 1) + ((y) * (X))]; float xlapf = d_e[(x) + ((y + 1) * (X))] - temp; float xlapb = temp - d_e[(x) + ((y - 1) * (X))]; float lap = xlapr - xlapl + xlapf - xlapb; temp = (temp + (du * DT) + (lap * DT * gam / (DX * DX))); d_v[h_e_i] = rv + dv * DT; h_e_i = Gx + ((Gy) * (GX)); d_r[h_e_i] = temp; } /* função chamada pelo host que controla as cópias e a ordem do calculo dos stencils bem como a carga para cada thread */ __global__ void _2Dstencil_global(float *d_e, float *d_r, float *d_v, int X, int Y, int times) { int x, y; //,h_e_i,h_r_i,Xs,Ys,Dx,Dy; x = threadIdx.x + (blockIdx.x * blockDim.x); y = threadIdx.y + (blockIdx.y * blockDim.y); extern __shared__ float sharedOrig[]; int blockThreadIndex = threadIdx.x + threadIdx.y * blockDim.x; // Xs = threadIdx.x; // Ys = threadIdx.y; int Dx = blockDim.x + (2 * times); int Dy = blockDim.y + (2 * times); int sharedTam = Dx * Dy; float * shared = sharedOrig; float * sharedRes = shared + sharedTam; float * sharedV = sharedRes + sharedTam; //float * sharedRes = &shared[sharedTam]; //float *sharedV = &sharedRes[sharedTam]; /* Copia o Tile de memória compartilhada necessária para a configuração de tempo desejada Stride é utilizado pois a quantidade de elementos a serem copiados é sempre maior que a quantidade de threads As bordas */ for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y)) { int sharedIdxX = stride % Dx; int sharedIdxY = int(stride / Dx); int globalIdxX =(blockIdx.x * blockDim.x) + sharedIdxX - times; int globalIdxY =(blockIdx.y * blockDim.y) + sharedIdxY - times; //int globalIdx = globalIdxX + (globalIdxX < 0) - (globalIdxX >= X) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y)) * X; int globalIdx = globalIdxX*(!(globalIdxX < 0 || globalIdxX >= X)) + (globalIdxX + (globalIdxX < 0) - (globalIdxX >= X))*((globalIdxX < 0 || globalIdxX >= X)) + (globalIdxY*(!(globalIdxY < 0 || globalIdxY >= Y)) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y))*((globalIdxY < 0 || globalIdxY >= Y))) * X; shared[stride] = d_e[globalIdx]; sharedV[stride] = d_v[globalIdx]; } __syncthreads(); /* Envia pra ser calculado todos os elementos além do ultimo instante de tempo */ for (int t = 1; t < times; t++) { //_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2); int tDx = blockDim.x + ((times - t) * 2); int tDy = blockDim.y + ((times - t) * 2); int tk2 = (t); int tSharedTam = tDx * tDy; for (int stride = blockThreadIndex; stride < tSharedTam; stride += (blockDim.x * blockDim.y)) { _2Dstencil_(shared, sharedRes, sharedV, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2); } float * temp = shared; shared = sharedRes; sharedRes = temp; __syncthreads(); } /* Envia pra ser calculado todos os elementos do ultimo instante de tempo */ _2Dstencil_(shared, d_r, sharedV, Dx, ((x%(blockDim.x))+times), ((y%(blockDim.y))+times), X, x, y); __syncthreads(); int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx; int globalIdx = x + y * X; d_v[globalIdx] = sharedV[sharedIdx]; } int main(int argc, char *argv[]) { /* Declarações e valores padroes */ float *h_e, *h_r, *h_v; float *d_e, *d_r, *d_v; int size, sharedSize; int X = 32; int Y = 32; int times = 1,globalTimes = 1; int BX = 32; int BY = 32; int GX = 1; int GY = 1; /* Obtenção dos parâmetros de entrada */ if (argc > 1) { X = atoi(argv[1]); Y = X; } if (argc > 2) { times = atoi(argv[2]); } if (argc > 3) { globalTimes = atoi(argv[3]); } if (X > 32) { if (argc > 4) BX = atoi(argv[4]); GX = ceil((float)X / (float)BX); BX = 32; } if (Y > 32) { if (argc > 5) BY = atoi(argv[5]); GY = ceil((float)Y / (float)BY); BY = 32; } /* Allocações de memória e configuração dos blocos e grid */ dim3 block_dim(BX, BY, 1); dim3 grid_dim(GX, GY, 1); //sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int); sharedSize = ((block_dim.x + (2 * times)) * (block_dim.y + (2 * times))) * sizeof(float) * 3; //sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2))); size = X * Y * sizeof(float); //tam = X * Y; h_e = (float *)malloc(size); h_r = (float *)malloc(size); h_v = (float *)malloc(size); cudaMalloc(&d_e, size); cudaMalloc(&d_r, size); cudaMalloc(&d_v, size); //Copia os dados do campo e envia para a GPU e inicializa o dominio de entrada FILE *arq; arq = fopen("entrada.txt", "rt"); for (int i = 0; i < X; i++) for (int j = 0; j < Y; j++) { h_v[i + j * X] =0.5f; int temp; fscanf(arq," %d",&temp); h_e[i + j * X] = temp; } fclose(arq); cudaMemcpy(d_v, h_v, size, cudaMemcpyHostToDevice); /* Copy vectors from host memory to device memory Copia os dados da entrada de volta a GPU */ cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice); /* Começa o Timer */ cudaDeviceSynchronize(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); /****************** *** Kernel Call *** *******************/ //_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z); /* Executa o kernel */ for(int i=0; i<globalTimes/times; i ++) { _2Dstencil_global<<<grid_dim, block_dim, sharedSize>>>(d_e, d_r, d_v, X, Y, times); float * temp = d_e; d_e = d_r; d_r = temp; } /* Identifica possíveis erros */ cudaError_t err = cudaSuccess; err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err)); } /****************** *** Kernel Call *** *******************/ cudaDeviceSynchronize(); /* Para o Timer */ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf ("[%d,%.5f]",times,elapsedTime); // arq = fopen("TempoExecucaoBlocking12000VariandoTimes.txt", "a"); // //printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY); // // float sharedTime = 0.0; // // if(MODEL_WIDTH == 64) // // sharedTime = 108.41396; // // if(MODEL_WIDTH == 96) // // sharedTime = 89.01120; // // if(MODEL_WIDTH == 128) // // sharedTime = 95.11117; // // if(MODEL_WIDTH == 160) // // sharedTime = 113.37702; // // if(MODEL_WIDTH == 192) // // sharedTime = 101.13689; // // if(MODEL_WIDTH == 224) // // sharedTime = 154.31091; // // if(MODEL_WIDTH == 256) // // sharedTime = 186.73097; // // if(MODEL_WIDTH == 288) // // sharedTime = 218.92052; // // if(MODEL_WIDTH == 320) // // sharedTime = 232.28406; // // if(MODEL_WIDTH == 352) // // sharedTime = 295.31876; // // if(MODEL_WIDTH == 384) // // sharedTime = 304.94522; // // if(MODEL_WIDTH == 416) // // sharedTime = 385.76855; // // if(MODEL_WIDTH == 448) // // sharedTime = 570.88287; // // if(MODEL_WIDTH == 480) // // sharedTime = 701.02271; // // if(MODEL_WIDTH == 512) // // sharedTime = 768.65991; // // if(MODEL_WIDTH == 544) // // sharedTime = 881.91882; // // if(MODEL_WIDTH == 576) // // sharedTime = 979.11212; // // if(MODEL_WIDTH == 608) // // sharedTime = 1082.10193; // // if(MODEL_WIDTH == 640) // // sharedTime = 1188.77576; // // if(MODEL_WIDTH == 672) // // sharedTime = 1316.50024; // // if(MODEL_WIDTH == 704) // // sharedTime = 1436.11035; // // if(MODEL_WIDTH == 736) // // sharedTime = 1532.38489; // // if(MODEL_WIDTH == 768) // // sharedTime = 1576.36401; // fprintf (arq,"(%d,%.5f),\n",times,elapsedTime);//,sharedTime); // fclose(arq); /* Copia o resultado para a imagem de visualização */ cudaMemcpy(h_r, d_e, size, cudaMemcpyDeviceToHost); arq = fopen("resultado.txt", "wt"); for (int i = 0; i < X; i++) { for (int j = 0; j < Y; j++) { fprintf(arq," %6.4f",h_r[i+j*X]); } fprintf(arq,"\n"); } fclose(arq); cudaFree(d_e); cudaFree(d_r); std::free(h_e); std::free(h_r); return 0; } /* main */
5f1f38fc39ea90ad25f67f566de0bfa4ed110a8c.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _FLATTERN_CUDA_ #define _FLATTERN_CUDA_ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include "../Source/CUDA/cuda_header.h" __global__ void cuda_kernel_rain(float *output, float *input, Size size, Size size_two){ int x_iter = blockIdx.x*blockDim.x + threadIdx.x; int y_iter = blockIdx.y*blockDim.y + threadIdx.y; int z_iter = 0; float rain_sum = 0.f; for(z_iter = 0; z_iter < size_two.depth_; z_iter++){ if(x_iter +1 < size_two.width_ && x_iter - 1 >= 0){ if(y_iter + 1 < size_two.height_ && y_iter - 1 >= 0){ if(z_iter + 1 < size_two.depth_ && z_iter - 1 >= 0){ float* cell_rain = input + (z_iter*size_two.pitch_slice_) + (y_iter*size_two.pitch_) + (PIXEL_FMT_SIZE_RG * x_iter); rain_sum += cell_rain[F_identifier_]; } } } } int xIter = x_iter; int yIter = y_iter; if(x_iter%2 != 0 && y_iter%2 != 0){ xIter--; yIter--; }else if(x_iter%2 != 0){ xIter--; }else if(y_iter%2 != 0){ yIter--; } xIter /= 2; yIter /= 2; float* rain = output + (yIter*size.pitch_) + (PIXEL_FMT_SIZE_RGBA * xIter); if(rain_sum < 0){ rain_sum *= -100000.f; } rain[0] = rain_sum; } extern "C" void cuda_fluid_rain(void *output, void *input, Size size, Size size_two){ hipError_t error = hipSuccess; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads //dim3 Dg = dim3((size.width_+Db.x-1)/Db.x, (size.height_+Db.y-1)/Db.y); dim3 Dg = dim3((size_two.width_+Db.x-1)/Db.x, (size_two.height_+Db.y-1)/Db.y); hipLaunchKernelGGL(( cuda_kernel_rain), dim3(Dg),dim3(Db), 0, 0, (float *)output, (float *)input, size, size_two); error = hipGetLastError(); if (error != hipSuccess){ printf("cuda_kernel_jacobi() failed to launch error = %d\n", error); } } #endif
5f1f38fc39ea90ad25f67f566de0bfa4ed110a8c.cu
#ifndef _FLATTERN_CUDA_ #define _FLATTERN_CUDA_ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" #include "../Source/CUDA/cuda_header.h" __global__ void cuda_kernel_rain(float *output, float *input, Size size, Size size_two){ int x_iter = blockIdx.x*blockDim.x + threadIdx.x; int y_iter = blockIdx.y*blockDim.y + threadIdx.y; int z_iter = 0; float rain_sum = 0.f; for(z_iter = 0; z_iter < size_two.depth_; z_iter++){ if(x_iter +1 < size_two.width_ && x_iter - 1 >= 0){ if(y_iter + 1 < size_two.height_ && y_iter - 1 >= 0){ if(z_iter + 1 < size_two.depth_ && z_iter - 1 >= 0){ float* cell_rain = input + (z_iter*size_two.pitch_slice_) + (y_iter*size_two.pitch_) + (PIXEL_FMT_SIZE_RG * x_iter); rain_sum += cell_rain[F_identifier_]; } } } } int xIter = x_iter; int yIter = y_iter; if(x_iter%2 != 0 && y_iter%2 != 0){ xIter--; yIter--; }else if(x_iter%2 != 0){ xIter--; }else if(y_iter%2 != 0){ yIter--; } xIter /= 2; yIter /= 2; float* rain = output + (yIter*size.pitch_) + (PIXEL_FMT_SIZE_RGBA * xIter); if(rain_sum < 0){ rain_sum *= -100000.f; } rain[0] = rain_sum; } extern "C" void cuda_fluid_rain(void *output, void *input, Size size, Size size_two){ cudaError_t error = cudaSuccess; dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads //dim3 Dg = dim3((size.width_+Db.x-1)/Db.x, (size.height_+Db.y-1)/Db.y); dim3 Dg = dim3((size_two.width_+Db.x-1)/Db.x, (size_two.height_+Db.y-1)/Db.y); cuda_kernel_rain<<<Dg,Db>>>((float *)output, (float *)input, size, size_two); error = cudaGetLastError(); if (error != cudaSuccess){ printf("cuda_kernel_jacobi() failed to launch error = %d\n", error); } } #endif
005fc410a978c1ff30ef9eb2e6d31d52150e036c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_utils.h> #include <gtest/gtest.h> #include <test_utils.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <limits> #include <memory> #include <utility> #include "fil/fil.h" #include "ml_utils.h" #include "random/rng.h" #include "test_utils.h" #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; struct FilTestParams { // input data parameters int rows; int cols; float nan_prob; // forest parameters int depth; int num_trees; float leaf_prob; // output parameters fil::output_t output; float threshold; // runtime parameters fil::algo_t algo; int seed; float tolerance; // treelite parameters, only used for treelite tests tl::Operator op; }; std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "rows = " << ps.rows << ", cols = " << ps.cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << ps.output << ", threshold = " << ps.threshold << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op); return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void SetUp() override { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(hipStreamCreate(&stream)); handle.setStream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void TearDown() override { CUDA_CHECK(hipFree(preds_d)); CUDA_CHECK(hipFree(want_preds_d)); CUDA_CHECK(hipFree(data_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data float* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data allocate(weights_d, num_nodes); allocate(thresholds_d, num_nodes); allocate(fids_d, num_nodes); allocate(def_lefts_d, num_nodes); allocate(is_leafs_d, num_nodes); // generate on-GPU random data Random::Rng r(ps.seed); r.uniform(weights_d, num_nodes, -1.0f, 1.0f, stream); r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> weights_h(num_nodes), thresholds_h(num_nodes); std::vector<int> fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; updateHost(weights_h.data(), weights_d, num_nodes, stream); updateHost(thresholds_h.data(), thresholds_d, num_nodes, stream); updateHost(fids_h.data(), fids_d, num_nodes, stream); updateHost(def_lefts_h, def_lefts_d, num_nodes, stream); updateHost(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::dense_node_init(&nodes[i], weights_h[i], thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(hipFree(is_leafs_d)); CUDA_CHECK(hipFree(def_lefts_d)); CUDA_CHECK(hipFree(fids_d)); CUDA_CHECK(hipFree(thresholds_d)); CUDA_CHECK(hipFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.rows * ps.cols; allocate(data_d, num_data); bool* mask_d = nullptr; allocate(mask_d, num_data); // generate random data Random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; hipLaunchKernelGGL(( nan_kernel), dim3(ceildiv(int(num_data), tpb)), dim3(tpb), 0, stream, data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(hipPeekAtLastError()); // copy to host data_h.resize(num_data); updateHost(data_h.data(), data_d, num_data, stream); CUDA_CHECK(hipStreamSynchronize(stream)); // clean up CUDA_CHECK(hipFree(mask_d)); } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.rows); int num_nodes = tree_num_nodes(); for (int i = 0; i < ps.rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.cols]); } if ((ps.output & fil::output_t::AVG) != 0) pred = pred / ps.num_trees; if ((ps.output & fil::output_t::SIGMOID) != 0) pred = sigmoid(pred); if ((ps.output & fil::output_t::THRESHOLD) != 0) { pred = pred > ps.threshold ? 1.0f : 0.0f; } want_preds_h[i] = pred; } // copy to GPU allocate(want_preds_d, ps.rows); updateDevice(want_preds_d, want_preds_h.data(), ps.rows, stream); CUDA_CHECK(hipStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict allocate(preds_d, ps.rows); fil::predict(handle, forest, preds_d, data_d, ps.rows); CUDA_CHECK(hipStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { ASSERT_TRUE(devArrMatch(want_preds_d, preds_d, ps.rows, CompareApprox<float>(ps.tolerance), stream)); } float infer_one_tree(fil::dense_node_t* root, float* data) { int curr = 0; float output = 0.0f, threshold = 0.0f; int fid = 0; bool def_left = false, is_leaf = false; for (;;) { fil::dense_node_decode(&root[curr], &output, &threshold, &fid, &def_left, &is_leaf); if (is_leaf) break; float val = data[fid]; bool cond = isnan(val) ? !def_left : val >= threshold; curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* want_preds_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node_t> nodes; // parameters hipStream_t stream; cumlHandle handle; FilTestParams ps; }; class PredictFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.nodes = nodes.data(); fil_ps.depth = ps.depth; fil_ps.ntrees = ps.num_trees; fil_ps.cols = ps.cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil::init_dense(handle, pforest, &fil_ps); } }; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; TL_CPP_CHECK(builder->CreateNode(key)); int feature; float threshold, output; bool is_leaf, default_left; fil::dense_node_decode(&nodes[node], &output, &threshold, &feature, &default_left, &is_leaf); if (is_leaf) { TL_CPP_CHECK(builder->SetLeafNode(key, output)); } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); TL_CPP_CHECK(builder->SetNumericalTestNode( key, feature, ps.op, threshold, default_left, left_key, right_key)); } return key; } void init_forest(fil::forest_t* pforest) override { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; std::unique_ptr<tlf::ModelBuilder> model_builder( new tlf::ModelBuilder(ps.cols, 1, random_forest_flag)); // model metadata if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); TL_CPP_CHECK(tree_builder->SetRootNode(root_key)); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model(new tl::Model); TL_CPP_CHECK(model_builder->CommitModel(model.get())); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::THRESHOLD) != 0; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(hipStreamSynchronize(stream)); } }; // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, algo, // seed, tolerance std::vector<FilTestParams> predict_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, }; TEST_P(PredictFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictFilTest, testing::ValuesIn(predict_inputs)); std::vector<FilTestParams> import_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, }; TEST_P(TreeliteFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteFilTest, testing::ValuesIn(import_inputs)); } // namespace ML
005fc410a978c1ff30ef9eb2e6d31d52150e036c.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_utils.h> #include <gtest/gtest.h> #include <test_utils.h> #include <treelite/frontend.h> #include <treelite/tree.h> #include <cmath> #include <limits> #include <memory> #include <utility> #include "fil/fil.h" #include "ml_utils.h" #include "random/rng.h" #include "test_utils.h" #define TL_CPP_CHECK(call) ASSERT(int(call) >= 0, "treelite call error") namespace ML { using namespace MLCommon; namespace tl = treelite; namespace tlf = treelite::frontend; struct FilTestParams { // input data parameters int rows; int cols; float nan_prob; // forest parameters int depth; int num_trees; float leaf_prob; // output parameters fil::output_t output; float threshold; // runtime parameters fil::algo_t algo; int seed; float tolerance; // treelite parameters, only used for treelite tests tl::Operator op; }; std::ostream& operator<<(std::ostream& os, const FilTestParams& ps) { os << "rows = " << ps.rows << ", cols = " << ps.cols << ", nan_prob = " << ps.nan_prob << ", depth = " << ps.depth << ", num_trees = " << ps.num_trees << ", leaf_prob = " << ps.leaf_prob << ", output = " << ps.output << ", threshold = " << ps.threshold << ", algo = " << ps.algo << ", seed = " << ps.seed << ", tolerance = " << ps.tolerance << ", op = " << tl::OpName(ps.op); return os; } __global__ void nan_kernel(float* data, const bool* mask, int len, float nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } float sigmoid(float x) { return 1.0f / (1.0f + expf(-x)); } class BaseFilTest : public testing::TestWithParam<FilTestParams> { protected: void SetUp() override { // setup ps = testing::TestWithParam<FilTestParams>::GetParam(); CUDA_CHECK(cudaStreamCreate(&stream)); handle.setStream(stream); generate_forest(); generate_data(); predict_on_cpu(); predict_on_gpu(); } void TearDown() override { CUDA_CHECK(cudaFree(preds_d)); CUDA_CHECK(cudaFree(want_preds_d)); CUDA_CHECK(cudaFree(data_d)); } void generate_forest() { size_t num_nodes = forest_num_nodes(); // helper data float* weights_d = nullptr; float* thresholds_d = nullptr; int* fids_d = nullptr; bool* def_lefts_d = nullptr; bool* is_leafs_d = nullptr; bool* def_lefts_h = nullptr; bool* is_leafs_h = nullptr; // allocate GPU data allocate(weights_d, num_nodes); allocate(thresholds_d, num_nodes); allocate(fids_d, num_nodes); allocate(def_lefts_d, num_nodes); allocate(is_leafs_d, num_nodes); // generate on-GPU random data Random::Rng r(ps.seed); r.uniform(weights_d, num_nodes, -1.0f, 1.0f, stream); r.uniform(thresholds_d, num_nodes, -1.0f, 1.0f, stream); r.uniformInt(fids_d, num_nodes, 0, ps.cols, stream); r.bernoulli(def_lefts_d, num_nodes, 0.5f, stream); r.bernoulli(is_leafs_d, num_nodes, 1.0f - ps.leaf_prob, stream); // copy data to host std::vector<float> weights_h(num_nodes), thresholds_h(num_nodes); std::vector<int> fids_h(num_nodes); def_lefts_h = new bool[num_nodes]; is_leafs_h = new bool[num_nodes]; updateHost(weights_h.data(), weights_d, num_nodes, stream); updateHost(thresholds_h.data(), thresholds_d, num_nodes, stream); updateHost(fids_h.data(), fids_d, num_nodes, stream); updateHost(def_lefts_h, def_lefts_d, num_nodes, stream); updateHost(is_leafs_h, is_leafs_d, num_nodes, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // mark leaves for (size_t i = 0; i < ps.num_trees; ++i) { int num_tree_nodes = tree_num_nodes(); size_t leaf_start = num_tree_nodes * i + num_tree_nodes / 2; size_t leaf_end = num_tree_nodes * (i + 1); for (size_t j = leaf_start; j < leaf_end; ++j) { is_leafs_h[j] = true; } } // initialize nodes nodes.resize(num_nodes); for (size_t i = 0; i < num_nodes; ++i) { fil::dense_node_init(&nodes[i], weights_h[i], thresholds_h[i], fids_h[i], def_lefts_h[i], is_leafs_h[i]); } // clean up delete[] def_lefts_h; delete[] is_leafs_h; CUDA_CHECK(cudaFree(is_leafs_d)); CUDA_CHECK(cudaFree(def_lefts_d)); CUDA_CHECK(cudaFree(fids_d)); CUDA_CHECK(cudaFree(thresholds_d)); CUDA_CHECK(cudaFree(weights_d)); } void generate_data() { // allocate arrays size_t num_data = ps.rows * ps.cols; allocate(data_d, num_data); bool* mask_d = nullptr; allocate(mask_d, num_data); // generate random data Random::Rng r(ps.seed); r.uniform(data_d, num_data, -1.0f, 1.0f, stream); r.bernoulli(mask_d, num_data, ps.nan_prob, stream); int tpb = 256; nan_kernel<<<ceildiv(int(num_data), tpb), tpb, 0, stream>>>( data_d, mask_d, num_data, std::numeric_limits<float>::quiet_NaN()); CUDA_CHECK(cudaPeekAtLastError()); // copy to host data_h.resize(num_data); updateHost(data_h.data(), data_d, num_data, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); // clean up CUDA_CHECK(cudaFree(mask_d)); } void predict_on_cpu() { // predict on host std::vector<float> want_preds_h(ps.rows); int num_nodes = tree_num_nodes(); for (int i = 0; i < ps.rows; ++i) { float pred = 0.0f; for (int j = 0; j < ps.num_trees; ++j) { pred += infer_one_tree(&nodes[j * num_nodes], &data_h[i * ps.cols]); } if ((ps.output & fil::output_t::AVG) != 0) pred = pred / ps.num_trees; if ((ps.output & fil::output_t::SIGMOID) != 0) pred = sigmoid(pred); if ((ps.output & fil::output_t::THRESHOLD) != 0) { pred = pred > ps.threshold ? 1.0f : 0.0f; } want_preds_h[i] = pred; } // copy to GPU allocate(want_preds_d, ps.rows); updateDevice(want_preds_d, want_preds_h.data(), ps.rows, stream); CUDA_CHECK(cudaStreamSynchronize(stream)); } virtual void init_forest(fil::forest_t* pforest) = 0; void predict_on_gpu() { fil::forest_t forest = nullptr; init_forest(&forest); // predict allocate(preds_d, ps.rows); fil::predict(handle, forest, preds_d, data_d, ps.rows); CUDA_CHECK(cudaStreamSynchronize(stream)); // cleanup fil::free(handle, forest); } void compare() { ASSERT_TRUE(devArrMatch(want_preds_d, preds_d, ps.rows, CompareApprox<float>(ps.tolerance), stream)); } float infer_one_tree(fil::dense_node_t* root, float* data) { int curr = 0; float output = 0.0f, threshold = 0.0f; int fid = 0; bool def_left = false, is_leaf = false; for (;;) { fil::dense_node_decode(&root[curr], &output, &threshold, &fid, &def_left, &is_leaf); if (is_leaf) break; float val = data[fid]; bool cond = isnan(val) ? !def_left : val >= threshold; curr = (curr << 1) + 1 + (cond ? 1 : 0); } return output; } int tree_num_nodes() { return (1 << (ps.depth + 1)) - 1; } int forest_num_nodes() { return tree_num_nodes() * ps.num_trees; } // predictions float* preds_d = nullptr; float* want_preds_d = nullptr; // input data float* data_d = nullptr; std::vector<float> data_h; // forest data std::vector<fil::dense_node_t> nodes; // parameters cudaStream_t stream; cumlHandle handle; FilTestParams ps; }; class PredictFilTest : public BaseFilTest { protected: void init_forest(fil::forest_t* pforest) override { // init FIL model fil::forest_params_t fil_ps; fil_ps.nodes = nodes.data(); fil_ps.depth = ps.depth; fil_ps.ntrees = ps.num_trees; fil_ps.cols = ps.cols; fil_ps.algo = ps.algo; fil_ps.output = ps.output; fil_ps.threshold = ps.threshold; fil::init_dense(handle, pforest, &fil_ps); } }; class TreeliteFilTest : public BaseFilTest { protected: /** adds nodes[node] of tree starting at index root to builder at index at *pkey, increments *pkey, and returns the treelite key of the node */ int node_to_treelite(tlf::TreeBuilder* builder, int* pkey, int root, int node) { int key = (*pkey)++; TL_CPP_CHECK(builder->CreateNode(key)); int feature; float threshold, output; bool is_leaf, default_left; fil::dense_node_decode(&nodes[node], &output, &threshold, &feature, &default_left, &is_leaf); if (is_leaf) { TL_CPP_CHECK(builder->SetLeafNode(key, output)); } else { int left = root + 2 * (node - root) + 1; int right = root + 2 * (node - root) + 2; switch (ps.op) { case tl::Operator::kLT: break; case tl::Operator::kLE: // adjust the threshold threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); break; case tl::Operator::kGT: // adjust the threshold; left and right still need to be swapped threshold = std::nextafterf(threshold, -std::numeric_limits<float>::infinity()); case tl::Operator::kGE: // swap left and right std::swap(left, right); default_left = !default_left; break; default: ASSERT(false, "comparison operator must be <, >, <= or >="); } int left_key = node_to_treelite(builder, pkey, root, left); int right_key = node_to_treelite(builder, pkey, root, right); TL_CPP_CHECK(builder->SetNumericalTestNode( key, feature, ps.op, threshold, default_left, left_key, right_key)); } return key; } void init_forest(fil::forest_t* pforest) override { bool random_forest_flag = (ps.output & fil::output_t::AVG) != 0; std::unique_ptr<tlf::ModelBuilder> model_builder( new tlf::ModelBuilder(ps.cols, 1, random_forest_flag)); // model metadata if ((ps.output & fil::output_t::SIGMOID) != 0) { model_builder->SetModelParam("pred_transform", "sigmoid"); } // build the trees for (int i_tree = 0; i_tree < ps.num_trees; ++i_tree) { tlf::TreeBuilder* tree_builder = new tlf::TreeBuilder(); int key_counter = 0; int root = i_tree * tree_num_nodes(); int root_key = node_to_treelite(tree_builder, &key_counter, root, root); TL_CPP_CHECK(tree_builder->SetRootNode(root_key)); // InsertTree() consumes tree_builder TL_CPP_CHECK(model_builder->InsertTree(tree_builder)); } // commit the model std::unique_ptr<tl::Model> model(new tl::Model); TL_CPP_CHECK(model_builder->CommitModel(model.get())); // init FIL forest with the model fil::treelite_params_t params; params.algo = ps.algo; params.threshold = ps.threshold; params.output_class = (ps.output & fil::output_t::THRESHOLD) != 0; fil::from_treelite(handle, pforest, (ModelHandle)model.get(), &params); CUDA_CHECK(cudaStreamSynchronize(stream)); } }; // rows, cols, nan_prob, depth, num_trees, leaf_prob, output, threshold, algo, // seed, tolerance std::vector<FilTestParams> predict_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f}, }; TEST_P(PredictFilTest, Predict) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, PredictFilTest, testing::ValuesIn(predict_inputs)); std::vector<FilTestParams> import_inputs = { {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::NAIVE, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::RAW, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::SIGMOID, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::SIGMOID | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t::AVG, 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kLE}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGT}, {20000, 50, 0.05, 8, 50, 0.05, fil::output_t(fil::output_t::AVG | fil::output_t::THRESHOLD), 0, fil::algo_t::BATCH_TREE_REORG, 42, 2e-3f, tl::Operator::kGE}, }; TEST_P(TreeliteFilTest, Import) { compare(); } INSTANTIATE_TEST_CASE_P(FilTests, TreeliteFilTest, testing::ValuesIn(import_inputs)); } // namespace ML
b543856e083ebd00f8d355f4c0dfcb69f60df850.hip
// !!! This is a file automatically generated by hipify!!! #include "FaceDetectExtras.h" #include "GPUHaarCascade.h" #include "GPU_Face_Detect.cuh" #include "CPU_Face_Detect.h" #include "OpenCV_Face_Detect.h" using namespace std; using namespace cv; static void CheckError() { hipError_t error = hipGetLastError(); if(error != hipSuccess) { printf("CUDA error1: %s\n", hipGetErrorString(error)); system("pause"); } } // OpenCV Cascadecascadeclassifiers int numberOfClassifiers(CvHaarClassifierCascade *cvCascade) { int totalClassifiers = 0; for(int i = 0; i < cvCascade->count; i++) { CvHaarStageClassifier stage = cvCascade->stage_classifier[i]; totalClassifiers += stage.count; } return totalClassifiers; } void displayResults(IplImage * image, std::vector<CvRect> faces, char * windowTitle) { // for(int i = 0; i < faces.size(); i++) { CvRect face_rect = faces[i]; cvRectangle( image, cvPoint(face_rect.x, face_rect.y), cvPoint((face_rect.x + face_rect.width), (face_rect.y + face_rect.height)), CV_RGB(255, 255, 255), 3); } cvNamedWindow( windowTitle, 0 ); cvShowImage( windowTitle, image ); //cvWaitKey(0); } IplImage * createCopy(IplImage *img) { IplImage *cpyImg = cvCreateImage(cvSize(img->width, img->height), img->depth, img->nChannels); cvCopy(img, cpyImg); return cpyImg; } int main( int argc, const char** argv ) { //loadOpenCVHaarCascade XML int optlen = strlen("--cascade="); const char *imageFileName = "images\\lena_256.jpg"; const char *detectorFileName; // if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 ) { detectorFileName = argv[1] + optlen; imageFileName = argc > 2 ? argv[2] : imageFileName; } else { printf("Incorrect input for command line. Using default values instead.\n"); printf("Correct Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n\n" ); detectorFileName = "data\\haarcascade_frontalface_default.xml"; } // IplImage* image; if((image = cvLoadImage(imageFileName, CV_LOAD_IMAGE_GRAYSCALE)) == 0) { cout << "Error occured loading image file. Check file name?" << endl; system("pause"); return 0; } int width = image->width; int height = image->height; CvSize imgSize = cvSize(width, height); printf("Input image: %s\n", imageFileName); printf("Image size: [%d, %d]\n\n", width, height); //load OpenCVHaarCascadesystem GPUHaarCascade CvHaarClassifierCascade *cvCascade = loadCVHaarCascade(detectorFileName); GPUHaarCascade gpuHaarCascade; //OpenCV Haar CascadeGPU Haar Cascade gpuHaarCascade.load(cvCascade, numberOfClassifiers(cvCascade), imgSize); printf("Input Detector: %s\n", detectorFileName); printf("Num of Stages: %d\n", gpuHaarCascade.numOfStages); printf("Num of Classifiers: %d\n\n", gpuHaarCascade.totalNumOfClassifiers); // CvMat* sum = cvCreateMat(height + 1, width + 1, CV_32SC1); CvMat* sqsum = cvCreateMat(height + 1, width + 1, CV_64FC1); cvIntegral(image, sum, sqsum); // double factor = 1.0f; float scaleFactor = 1.2f; std::vector<double> scale; while(factor * gpuHaarCascade.orig_window_size.width < width - 10 && factor * gpuHaarCascade.orig_window_size.height < height - 10) { scale.push_back(factor); factor *= scaleFactor; } // int minNeighbors = 3; // GPU initGPU(gpuHaarCascade, image, sum, sqsum); IplImage *gpuImage_v1 = createCopy(image); std::vector<CvRect> gpuFaces_v1 = runGPUHaarDetection(scale, minNeighbors, V1); IplImage *gpuImage_v3 = createCopy(image); std::vector<CvRect> gpuFaces_v3;// = runGPUHaarDetection(scale, minNeighbors, V3); IplImage *gpuImage_v4 = createCopy(image); std::vector<CvRect> gpuFaces_v4;// = runGPUHaarDetection(scale, minNeighbors, V4); shutDownGPU(); // CPU Mat sum_Mat = cvarrToMat(sum); Mat sqsum_Mat = cvarrToMat(sqsum); IplImage *cpuImage = createCopy(image); std::vector<CvRect> cpuFaces = runCPUHaarDetection(gpuHaarCascade, imgSize, sum_Mat, sqsum_Mat, scale, minNeighbors); IplImage *cpuImage_Multithread = createCopy(image); runCPUHaarDetection_Multithread(gpuHaarCascade, imgSize, sum_Mat, sqsum_Mat, scale, minNeighbors); // OpenCV IplImage *opencvImage = createCopy(image); std::vector<CvRect> opencvFaces = runOpenCVHaarDetection(image, cvCascade, scaleFactor); // displayResults(gpuImage_v1, gpuFaces_v1, "GPU Results v1"); displayResults(gpuImage_v3, gpuFaces_v3, "GPU Results v3"); displayResults(gpuImage_v4, gpuFaces_v4, "GPU Results v4"); displayResults(cpuImage, cpuFaces, "CPU Results"); displayResults(cpuImage_Multithread, gpuFaces_v3, "CPU_Multithread Results"); displayResults(opencvImage, opencvFaces, "OpenCV Results"); cvWaitKey(0); system("pause"); // cvReleaseHaarClassifierCascade( &cvCascade ); cvReleaseImage(&image); cvReleaseMat(&sum); cvReleaseMat(&sqsum); cvReleaseImage(&image); cvReleaseImage(&gpuImage_v1); cvReleaseImage(&gpuImage_v3); cvReleaseImage(&gpuImage_v4); cvReleaseImage(&cpuImage); cvReleaseImage(&cpuImage_Multithread); cvReleaseImage(&opencvImage); gpuHaarCascade.shutdown(); return 0; }
b543856e083ebd00f8d355f4c0dfcb69f60df850.cu
#include "FaceDetectExtras.h" #include "GPUHaarCascade.h" #include "GPU_Face_Detect.cuh" #include "CPU_Face_Detect.h" #include "OpenCV_Face_Detect.h" using namespace std; using namespace cv; static void CheckError() { cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error1: %s\n", cudaGetErrorString(error)); system("pause"); } } // 遍历OpenCV Cascade来判断该cascade下classifiers的数目 int numberOfClassifiers(CvHaarClassifierCascade *cvCascade) { int totalClassifiers = 0; for(int i = 0; i < cvCascade->count; i++) { CvHaarStageClassifier stage = cvCascade->stage_classifier[i]; totalClassifiers += stage.count; } return totalClassifiers; } void displayResults(IplImage * image, std::vector<CvRect> faces, char * windowTitle) { // 创建矩形来展示结果 for(int i = 0; i < faces.size(); i++) { CvRect face_rect = faces[i]; cvRectangle( image, cvPoint(face_rect.x, face_rect.y), cvPoint((face_rect.x + face_rect.width), (face_rect.y + face_rect.height)), CV_RGB(255, 255, 255), 3); } cvNamedWindow( windowTitle, 0 ); cvShowImage( windowTitle, image ); //cvWaitKey(0); } IplImage * createCopy(IplImage *img) { IplImage *cpyImg = cvCreateImage(cvSize(img->width, img->height), img->depth, img->nChannels); cvCopy(img, cpyImg); return cpyImg; } int main( int argc, const char** argv ) { //load测试图片和OpenCVHaarCascade XML文件进行检测 int optlen = strlen("--cascade="); const char *imageFileName = "images\\lena_256.jpg"; const char *detectorFileName; // 检测输入格式是否正确 if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 ) { detectorFileName = argv[1] + optlen; imageFileName = argc > 2 ? argv[2] : imageFileName; } else { printf("Incorrect input for command line. Using default values instead.\n"); printf("Correct Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n\n" ); detectorFileName = "data\\haarcascade_frontalface_default.xml"; } // 装载图像 IplImage* image; if((image = cvLoadImage(imageFileName, CV_LOAD_IMAGE_GRAYSCALE)) == 0) { cout << "Error occured loading image file. Check file name?" << endl; system("pause"); return 0; } int width = image->width; int height = image->height; CvSize imgSize = cvSize(width, height); printf("Input image: %s\n", imageFileName); printf("Image size: [%d, %d]\n\n", width, height); //load OpenCVHaarCascade并创建system GPUHaarCascade CvHaarClassifierCascade *cvCascade = loadCVHaarCascade(detectorFileName); GPUHaarCascade gpuHaarCascade; //将OpenCV Haar Cascade数据结构转为GPU Haar Cascade结构 gpuHaarCascade.load(cvCascade, numberOfClassifiers(cvCascade), imgSize); printf("Input Detector: %s\n", detectorFileName); printf("Num of Stages: %d\n", gpuHaarCascade.numOfStages); printf("Num of Classifiers: %d\n\n", gpuHaarCascade.totalNumOfClassifiers); //积分图像计算 CvMat* sum = cvCreateMat(height + 1, width + 1, CV_32SC1); CvMat* sqsum = cvCreateMat(height + 1, width + 1, CV_64FC1); cvIntegral(image, sum, sqsum); //计算放缩值,在每次检测之后重新调整检测窗口大小 double factor = 1.0f; float scaleFactor = 1.2f; std::vector<double> scale; while(factor * gpuHaarCascade.orig_window_size.width < width - 10 && factor * gpuHaarCascade.orig_window_size.height < height - 10) { scale.push_back(factor); factor *= scaleFactor; } // 用于分组检测到的矩形(即脸部区域) int minNeighbors = 3; // 运行GPU脸部检测 initGPU(gpuHaarCascade, image, sum, sqsum); IplImage *gpuImage_v1 = createCopy(image); std::vector<CvRect> gpuFaces_v1 = runGPUHaarDetection(scale, minNeighbors, V1); IplImage *gpuImage_v3 = createCopy(image); std::vector<CvRect> gpuFaces_v3;// = runGPUHaarDetection(scale, minNeighbors, V3); IplImage *gpuImage_v4 = createCopy(image); std::vector<CvRect> gpuFaces_v4;// = runGPUHaarDetection(scale, minNeighbors, V4); shutDownGPU(); // 运行CPU脸部检测 Mat sum_Mat = cvarrToMat(sum); Mat sqsum_Mat = cvarrToMat(sqsum); IplImage *cpuImage = createCopy(image); std::vector<CvRect> cpuFaces = runCPUHaarDetection(gpuHaarCascade, imgSize, sum_Mat, sqsum_Mat, scale, minNeighbors); IplImage *cpuImage_Multithread = createCopy(image); runCPUHaarDetection_Multithread(gpuHaarCascade, imgSize, sum_Mat, sqsum_Mat, scale, minNeighbors); // 运行OpenCV脸部检测 IplImage *opencvImage = createCopy(image); std::vector<CvRect> opencvFaces = runOpenCVHaarDetection(image, cvCascade, scaleFactor); // 结果展示 displayResults(gpuImage_v1, gpuFaces_v1, "GPU Results v1"); displayResults(gpuImage_v3, gpuFaces_v3, "GPU Results v3"); displayResults(gpuImage_v4, gpuFaces_v4, "GPU Results v4"); displayResults(cpuImage, cpuFaces, "CPU Results"); displayResults(cpuImage_Multithread, gpuFaces_v3, "CPU_Multithread Results"); displayResults(opencvImage, opencvFaces, "OpenCV Results"); cvWaitKey(0); system("pause"); //释放内存 cvReleaseHaarClassifierCascade( &cvCascade ); cvReleaseImage(&image); cvReleaseMat(&sum); cvReleaseMat(&sqsum); cvReleaseImage(&image); cvReleaseImage(&gpuImage_v1); cvReleaseImage(&gpuImage_v3); cvReleaseImage(&gpuImage_v4); cvReleaseImage(&cpuImage); cvReleaseImage(&cpuImage_Multithread); cvReleaseImage(&opencvImage); gpuHaarCascade.shutdown(); return 0; }
7737637dd060d6875e869afa6ffda6a8a1c38207.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" //double* x, * devx, * val, * gra, * r, * graMax; //double* hes_value; ////int size; //int* pos_x, * pos_y; //int* csr; double* x; //thrust::pair<int, int> *device_pos; //typedef double (*fp)(double); //typedef void (*val_fp)(double*, double*, int); //typedef void (*valsum_fp)(double*, double*,int); //typedef void (*gra_fp)(double*, double*, int); //typedef void (*gramin_fp)(double*, double*,int); //typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int); //typedef void (*print_fp)(double*, int); int numSMs; __global__ void sum_val(double* val, double* r) { int index = threadIdx.x; for (int i = 1; i < blockDim.x; i <<= 1) { if (index % (i << 1) == i) { val[index - i] += val[index]; } __syncthreads(); } if (index == 0) { r[0] = val[0]; } }
7737637dd060d6875e869afa6ffda6a8a1c38207.cu
#include "includes.h" //double* x, * devx, * val, * gra, * r, * graMax; //double* hes_value; ////int size; //int* pos_x, * pos_y; //int* csr; double* x; //thrust::pair<int, int> *device_pos; //typedef double (*fp)(double); //typedef void (*val_fp)(double*, double*, int); //typedef void (*valsum_fp)(double*, double*,int); //typedef void (*gra_fp)(double*, double*, int); //typedef void (*gramin_fp)(double*, double*,int); //typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int); //typedef void (*print_fp)(double*, int); int numSMs; __global__ void sum_val(double* val, double* r) { int index = threadIdx.x; for (int i = 1; i < blockDim.x; i <<= 1) { if (index % (i << 1) == i) { val[index - i] += val[index]; } __syncthreads(); } if (index == 0) { r[0] = val[0]; } }
9d2e2d1e42a0b5251daafdc805394cf40ee9ad9b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <cugraph/detail/utility_wrappers.hpp> #include <cugraph/graph_functions.hpp> #include <cugraph/utilities/device_functors.cuh> #include <cugraph/utilities/high_res_timer.hpp> #include <raft/core/handle.hpp> #include <rmm/device_uvector.hpp> #include <gtest/gtest.h> #include <thrust/binary_search.h> #include <thrust/distance.h> #include <thrust/fill.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/unique.h> struct RenumberSampledEdgelist_Usecase { size_t num_vertices{}; size_t num_sampled_edges{}; size_t num_hops{1}; // enabled if larger than 1 size_t num_labels{1}; // enabled if larger than 1 bool check_correctness{true}; }; class Tests_RenumberSampledEdgelist : public ::testing::TestWithParam<RenumberSampledEdgelist_Usecase> { public: Tests_RenumberSampledEdgelist() {} static void SetUpTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t> void run_current_test(RenumberSampledEdgelist_Usecase const& usecase) { using label_t = int32_t; raft::handle_t handle{}; HighResTimer hr_timer{}; raft::random::RngState rng_state(0); rmm::device_uvector<vertex_t> org_edgelist_srcs(usecase.num_sampled_edges, handle.get_stream()); rmm::device_uvector<vertex_t> org_edgelist_dsts(usecase.num_sampled_edges, handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), org_edgelist_srcs.data(), org_edgelist_srcs.size(), vertex_t{0}, static_cast<vertex_t>(usecase.num_vertices), rng_state); cugraph::detail::uniform_random_fill(handle.get_stream(), org_edgelist_dsts.data(), org_edgelist_dsts.size(), vertex_t{0}, static_cast<vertex_t>(usecase.num_vertices), rng_state); std::optional<rmm::device_uvector<int32_t>> edgelist_hops{std::nullopt}; if (usecase.num_hops > 1) { edgelist_hops = rmm::device_uvector<int32_t>(usecase.num_sampled_edges, handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), (*edgelist_hops).data(), (*edgelist_hops).size(), int32_t{0}, static_cast<int32_t>(usecase.num_hops), rng_state); } std::optional<std::tuple<rmm::device_uvector<label_t>, rmm::device_uvector<size_t>>> label_offsets{std::nullopt}; if (usecase.num_labels > 1) { rmm::device_uvector<label_t> labels(usecase.num_labels, handle.get_stream()); thrust::sequence(handle.get_thrust_policy(), labels.begin(), labels.end(), label_t{0}); rmm::device_uvector<label_t> edgelist_labels(usecase.num_sampled_edges, handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), edgelist_labels.data(), edgelist_labels.size(), label_t{0}, static_cast<label_t>(usecase.num_labels), rng_state); rmm::device_uvector<size_t> offsets(usecase.num_labels + 1, handle.get_stream()); thrust::fill(handle.get_thrust_policy(), offsets.begin(), offsets.end(), size_t{0}); thrust::for_each( handle.get_thrust_policy(), edgelist_labels.begin(), edgelist_labels.end(), [offsets = raft::device_span<size_t>(offsets.data(), offsets.size())] __device__(label_t label) { cuda::atomic_ref<size_t, cuda::thread_scope_device> atomic_counter(offsets[label]); atomic_counter.fetch_add(size_t{1}, cuda::std::memory_order_relaxed); }); thrust::exclusive_scan( handle.get_thrust_policy(), offsets.begin(), offsets.end(), offsets.begin()); label_offsets = std::make_tuple(std::move(labels), std::move(offsets)); } rmm::device_uvector<vertex_t> renumbered_edgelist_srcs(org_edgelist_srcs.size(), handle.get_stream()); rmm::device_uvector<vertex_t> renumbered_edgelist_dsts(org_edgelist_dsts.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), org_edgelist_srcs.begin(), org_edgelist_srcs.end(), renumbered_edgelist_srcs.begin()); thrust::copy(handle.get_thrust_policy(), org_edgelist_dsts.begin(), org_edgelist_dsts.end(), renumbered_edgelist_dsts.begin()); rmm::device_uvector<vertex_t> renumber_map(0, handle.get_stream()); std::optional<rmm::device_uvector<size_t>> renumber_map_label_offsets{std::nullopt}; if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement hr_timer.start("Renumber sampled edgelist"); } std::tie(renumbered_edgelist_srcs, renumbered_edgelist_dsts, renumber_map, renumber_map_label_offsets) = cugraph::renumber_sampled_edgelist( handle, std::move(renumbered_edgelist_srcs), std::move(renumbered_edgelist_dsts), edgelist_hops ? std::make_optional<raft::device_span<int32_t const>>( (*edgelist_hops).data(), (*edgelist_hops).size()) : std::nullopt, label_offsets ? std::make_optional< std::tuple<raft::device_span<label_t const>, raft::device_span<size_t const>>>( std::make_tuple(raft::device_span<label_t const>(std::get<0>(*label_offsets).data(), std::get<0>(*label_offsets).size()), raft::device_span<size_t const>(std::get<1>(*label_offsets).data(), std::get<1>(*label_offsets).size()))) : std::nullopt); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement hr_timer.stop(); hr_timer.display_and_clear(std::cout); } if (usecase.check_correctness) { for (size_t i = 0; i < usecase.num_labels; ++i) { size_t edgelist_start_offset = label_offsets ? std::get<1>(*label_offsets).element(i, handle.get_stream()) : size_t{0}; size_t edgelist_end_offset = label_offsets ? std::get<1>(*label_offsets).element(i + 1, handle.get_stream()) : usecase.num_sampled_edges; if (edgelist_start_offset == edgelist_end_offset) continue; auto this_label_org_edgelist_srcs = raft::device_span<vertex_t const>(org_edgelist_srcs.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); auto this_label_org_edgelist_dsts = raft::device_span<vertex_t const>(org_edgelist_dsts.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); auto this_label_edgelist_hops = edgelist_hops ? std::make_optional<raft::device_span<int32_t const>>( (*edgelist_hops).data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset) : std::nullopt; auto this_label_renumbered_edgelist_srcs = raft::device_span<vertex_t const>(renumbered_edgelist_srcs.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); auto this_label_renumbered_edgelist_dsts = raft::device_span<vertex_t const>(renumbered_edgelist_dsts.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); size_t renumber_map_start_offset = renumber_map_label_offsets ? (*renumber_map_label_offsets).element(i, handle.get_stream()) : size_t{0}; size_t renumber_map_end_offset = renumber_map_label_offsets ? (*renumber_map_label_offsets).element(i + 1, handle.get_stream()) : renumber_map.size(); auto this_label_renumber_map = raft::device_span<vertex_t const>(renumber_map.data() + renumber_map_start_offset, renumber_map_end_offset - renumber_map_start_offset); // check un-renumbering recovers the original edge list auto pair_first = thrust::make_zip_iterator(this_label_org_edgelist_srcs.begin(), this_label_renumbered_edgelist_srcs.begin()); auto num_renumber_errors = thrust::count_if(handle.get_thrust_policy(), pair_first, pair_first + this_label_org_edgelist_srcs.size(), [this_label_renumber_map] __device__(auto pair) { auto org = thrust::get<0>(pair); auto renumbered = thrust::get<1>(pair); return this_label_renumber_map[renumbered] != org; }); ASSERT_TRUE(num_renumber_errors == 0) << "Renumber error in edge list sources."; pair_first = thrust::make_zip_iterator(this_label_org_edgelist_dsts.begin(), this_label_renumbered_edgelist_dsts.begin()); num_renumber_errors = thrust::count_if(handle.get_thrust_policy(), pair_first, pair_first + this_label_org_edgelist_dsts.size(), [this_label_renumber_map] __device__(auto pair) { auto org = thrust::get<0>(pair); auto renumbered = thrust::get<1>(pair); return this_label_renumber_map[renumbered] != org; }); ASSERT_TRUE(num_renumber_errors == 0) << "Renumber error in edge list destinations."; // Check the invariants in renumber_map // Say we found the minimum (primary key:hop, secondary key:flag) pairs for every unique // vertices, where flag is 0 for sources and 1 for destinations. Then, vertices with smaller // (hop, flag) pairs should be renumbered to smaller numbers than vertices with larger (hop, // flag) pairs. rmm::device_uvector<vertex_t> unique_srcs(this_label_org_edgelist_srcs.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), this_label_org_edgelist_srcs.begin(), this_label_org_edgelist_srcs.end(), unique_srcs.begin()); std::optional<rmm::device_uvector<int32_t>> unique_src_hops = this_label_edgelist_hops ? std::make_optional<rmm::device_uvector<int32_t>>( (*this_label_edgelist_hops).size(), handle.get_stream()) : std::nullopt; if (this_label_edgelist_hops) { thrust::copy(handle.get_thrust_policy(), (*this_label_edgelist_hops).begin(), (*this_label_edgelist_hops).end(), (*unique_src_hops).begin()); auto pair_first = thrust::make_zip_iterator(unique_srcs.begin(), (*unique_src_hops).begin()); thrust::sort(handle.get_thrust_policy(), pair_first, pair_first + unique_srcs.size()); unique_srcs.resize( thrust::distance(unique_srcs.begin(), thrust::get<0>(thrust::unique_by_key(handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end(), (*unique_src_hops).begin()))), handle.get_stream()); (*unique_src_hops).resize(unique_srcs.size(), handle.get_stream()); } else { thrust::sort(handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end()); unique_srcs.resize( thrust::distance( unique_srcs.begin(), thrust::unique(handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end())), handle.get_stream()); } rmm::device_uvector<vertex_t> unique_dsts(this_label_org_edgelist_dsts.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), this_label_org_edgelist_dsts.begin(), this_label_org_edgelist_dsts.end(), unique_dsts.begin()); std::optional<rmm::device_uvector<int32_t>> unique_dst_hops = this_label_edgelist_hops ? std::make_optional<rmm::device_uvector<int32_t>>( (*this_label_edgelist_hops).size(), handle.get_stream()) : std::nullopt; if (this_label_edgelist_hops) { thrust::copy(handle.get_thrust_policy(), (*this_label_edgelist_hops).begin(), (*this_label_edgelist_hops).end(), (*unique_dst_hops).begin()); auto pair_first = thrust::make_zip_iterator(unique_dsts.begin(), (*unique_dst_hops).begin()); thrust::sort(handle.get_thrust_policy(), pair_first, pair_first + unique_dsts.size()); unique_dsts.resize( thrust::distance(unique_dsts.begin(), thrust::get<0>(thrust::unique_by_key(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end(), (*unique_dst_hops).begin()))), handle.get_stream()); (*unique_dst_hops).resize(unique_dsts.size(), handle.get_stream()); } else { thrust::sort(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end()); unique_dsts.resize( thrust::distance( unique_dsts.begin(), thrust::unique(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end())), handle.get_stream()); } rmm::device_uvector<vertex_t> sorted_org_vertices(this_label_renumber_map.size(), handle.get_stream()); rmm::device_uvector<vertex_t> matching_renumbered_vertices(sorted_org_vertices.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), this_label_renumber_map.begin(), this_label_renumber_map.end(), sorted_org_vertices.begin()); thrust::sequence(handle.get_thrust_policy(), matching_renumbered_vertices.begin(), matching_renumbered_vertices.end(), vertex_t{0}); thrust::sort_by_key(handle.get_thrust_policy(), sorted_org_vertices.begin(), sorted_org_vertices.end(), matching_renumbered_vertices.begin()); if (this_label_edgelist_hops) { rmm::device_uvector<vertex_t> merged_vertices(unique_srcs.size() + unique_dsts.size(), handle.get_stream()); rmm::device_uvector<int32_t> merged_hops(merged_vertices.size(), handle.get_stream()); rmm::device_uvector<int8_t> merged_flags(merged_vertices.size(), handle.get_stream()); auto src_triplet_first = thrust::make_zip_iterator(unique_srcs.begin(), (*unique_src_hops).begin(), thrust::make_constant_iterator(int8_t{0})); auto dst_triplet_first = thrust::make_zip_iterator(unique_dsts.begin(), (*unique_dst_hops).begin(), thrust::make_constant_iterator(int8_t{1})); thrust::merge(handle.get_thrust_policy(), src_triplet_first, src_triplet_first + unique_srcs.size(), dst_triplet_first, dst_triplet_first + unique_dsts.size(), thrust::make_zip_iterator( merged_vertices.begin(), merged_hops.begin(), merged_flags.begin())); merged_vertices.resize( thrust::distance( merged_vertices.begin(), thrust::get<0>(thrust::unique_by_key( handle.get_thrust_policy(), merged_vertices.begin(), merged_vertices.end(), thrust::make_zip_iterator(merged_hops.begin(), merged_flags.begin())))), handle.get_stream()); merged_hops.resize(merged_vertices.size(), handle.get_stream()); merged_flags.resize(merged_vertices.size(), handle.get_stream()); auto sort_key_first = thrust::make_zip_iterator(merged_hops.begin(), merged_flags.begin()); thrust::sort_by_key(handle.get_thrust_policy(), sort_key_first, sort_key_first + merged_hops.size(), merged_vertices.begin()); auto num_unique_keys = thrust::count_if( handle.get_thrust_policy(), thrust::make_counting_iterator(size_t{0}), thrust::make_counting_iterator(merged_hops.size()), cugraph::detail::is_first_in_run_t<decltype(sort_key_first)>{sort_key_first}); rmm::device_uvector<vertex_t> min_vertices(num_unique_keys, handle.get_stream()); rmm::device_uvector<vertex_t> max_vertices(num_unique_keys, handle.get_stream()); auto renumbered_merged_vertex_first = thrust::make_transform_iterator( merged_vertices.begin(), [sorted_org_vertices = raft::device_span<vertex_t const>(sorted_org_vertices.data(), sorted_org_vertices.size()), matching_renumbered_vertices = raft::device_span<vertex_t const>( matching_renumbered_vertices.data(), matching_renumbered_vertices.size())] __device__(vertex_t src) { auto it = thrust::lower_bound( thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), src); return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; }); thrust::reduce_by_key(handle.get_thrust_policy(), sort_key_first, sort_key_first + merged_hops.size(), renumbered_merged_vertex_first, thrust::make_discard_iterator(), min_vertices.begin(), thrust::equal_to<thrust::tuple<int32_t, int8_t>>{}, thrust::minimum<vertex_t>{}); thrust::reduce_by_key(handle.get_thrust_policy(), sort_key_first, sort_key_first + merged_hops.size(), renumbered_merged_vertex_first, thrust::make_discard_iterator(), max_vertices.begin(), thrust::equal_to<thrust::tuple<int32_t, int8_t>>{}, thrust::maximum<vertex_t>{}); auto num_violations = thrust::count_if(handle.get_thrust_policy(), thrust::make_counting_iterator(size_t{1}), thrust::make_counting_iterator(min_vertices.size()), [min_vertices = raft::device_span<vertex_t const>(min_vertices.data(), min_vertices.size()), max_vertices = raft::device_span<vertex_t const>( max_vertices.data(), max_vertices.size())] __device__(size_t i) { return min_vertices[i] <= max_vertices[i - 1]; }); ASSERT_TRUE(num_violations == 0) << "Invariant violated, a vertex with a smaller (hop,flag) pair is renumbered to a " "larger value than a vertex with a larger (hop, flag) pair."; } else { unique_dsts.resize( thrust::distance( unique_dsts.begin(), thrust::remove_if(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end(), [sorted_unique_srcs = raft::device_span<vertex_t const>( unique_srcs.data(), unique_srcs.size())] __device__(auto dst) { return thrust::binary_search(thrust::seq, sorted_unique_srcs.begin(), sorted_unique_srcs.end(), dst); })), handle.get_stream()); auto max_src_renumbered_vertex = thrust::transform_reduce( handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end(), [sorted_org_vertices = raft::device_span<vertex_t const>(sorted_org_vertices.data(), sorted_org_vertices.size()), matching_renumbered_vertices = raft::device_span<vertex_t const>( matching_renumbered_vertices.data(), matching_renumbered_vertices.size())] __device__(vertex_t src) { auto it = thrust::lower_bound( thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), src); return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; }, std::numeric_limits<vertex_t>::lowest(), thrust::maximum<vertex_t>{}); auto min_dst_renumbered_vertex = thrust::transform_reduce( handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end(), [sorted_org_vertices = raft::device_span<vertex_t const>(sorted_org_vertices.data(), sorted_org_vertices.size()), matching_renumbered_vertices = raft::device_span<vertex_t const>( matching_renumbered_vertices.data(), matching_renumbered_vertices.size())] __device__(vertex_t dst) { auto it = thrust::lower_bound( thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), dst); return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; }, std::numeric_limits<vertex_t>::max(), thrust::minimum<vertex_t>{}); ASSERT_TRUE(max_src_renumbered_vertex < min_dst_renumbered_vertex) << "Invariants violated, a source vertex is renumbered to a non-smaller value than a " "vertex that appear only in the edge list destinations."; } } } } }; TEST_P(Tests_RenumberSampledEdgelist, CheckInt32) { auto param = GetParam(); run_current_test<int32_t>(param); } TEST_P(Tests_RenumberSampledEdgelist, CheckInt64) { auto param = GetParam(); run_current_test<int64_t>(param); } INSTANTIATE_TEST_SUITE_P( small_test, Tests_RenumberSampledEdgelist, ::testing::Values(RenumberSampledEdgelist_Usecase{1024, 4096, 1, 1, true}, RenumberSampledEdgelist_Usecase{1024, 4096, 3, 1, true}, RenumberSampledEdgelist_Usecase{1024, 32768, 1, 256, true}, RenumberSampledEdgelist_Usecase{1024, 32768, 3, 256, true})); INSTANTIATE_TEST_SUITE_P( benchmark_test, Tests_RenumberSampledEdgelist, ::testing::Values(RenumberSampledEdgelist_Usecase{1 << 20, 1 << 20, 1, 1, false}, RenumberSampledEdgelist_Usecase{1 << 20, 1 << 20, 5, 1, false}, RenumberSampledEdgelist_Usecase{1 << 20, 1 << 24, 1, 1 << 20, false}, RenumberSampledEdgelist_Usecase{1 << 20, 1 << 24, 5, 1 << 20, false})); CUGRAPH_TEST_PROGRAM_MAIN()
9d2e2d1e42a0b5251daafdc805394cf40ee9ad9b.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utilities/base_fixture.hpp> #include <cugraph/detail/utility_wrappers.hpp> #include <cugraph/graph_functions.hpp> #include <cugraph/utilities/device_functors.cuh> #include <cugraph/utilities/high_res_timer.hpp> #include <raft/core/handle.hpp> #include <rmm/device_uvector.hpp> #include <gtest/gtest.h> #include <thrust/binary_search.h> #include <thrust/distance.h> #include <thrust/fill.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/reduce.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/unique.h> struct RenumberSampledEdgelist_Usecase { size_t num_vertices{}; size_t num_sampled_edges{}; size_t num_hops{1}; // enabled if larger than 1 size_t num_labels{1}; // enabled if larger than 1 bool check_correctness{true}; }; class Tests_RenumberSampledEdgelist : public ::testing::TestWithParam<RenumberSampledEdgelist_Usecase> { public: Tests_RenumberSampledEdgelist() {} static void SetUpTestCase() {} static void TearDownTestCase() {} virtual void SetUp() {} virtual void TearDown() {} template <typename vertex_t> void run_current_test(RenumberSampledEdgelist_Usecase const& usecase) { using label_t = int32_t; raft::handle_t handle{}; HighResTimer hr_timer{}; raft::random::RngState rng_state(0); rmm::device_uvector<vertex_t> org_edgelist_srcs(usecase.num_sampled_edges, handle.get_stream()); rmm::device_uvector<vertex_t> org_edgelist_dsts(usecase.num_sampled_edges, handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), org_edgelist_srcs.data(), org_edgelist_srcs.size(), vertex_t{0}, static_cast<vertex_t>(usecase.num_vertices), rng_state); cugraph::detail::uniform_random_fill(handle.get_stream(), org_edgelist_dsts.data(), org_edgelist_dsts.size(), vertex_t{0}, static_cast<vertex_t>(usecase.num_vertices), rng_state); std::optional<rmm::device_uvector<int32_t>> edgelist_hops{std::nullopt}; if (usecase.num_hops > 1) { edgelist_hops = rmm::device_uvector<int32_t>(usecase.num_sampled_edges, handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), (*edgelist_hops).data(), (*edgelist_hops).size(), int32_t{0}, static_cast<int32_t>(usecase.num_hops), rng_state); } std::optional<std::tuple<rmm::device_uvector<label_t>, rmm::device_uvector<size_t>>> label_offsets{std::nullopt}; if (usecase.num_labels > 1) { rmm::device_uvector<label_t> labels(usecase.num_labels, handle.get_stream()); thrust::sequence(handle.get_thrust_policy(), labels.begin(), labels.end(), label_t{0}); rmm::device_uvector<label_t> edgelist_labels(usecase.num_sampled_edges, handle.get_stream()); cugraph::detail::uniform_random_fill(handle.get_stream(), edgelist_labels.data(), edgelist_labels.size(), label_t{0}, static_cast<label_t>(usecase.num_labels), rng_state); rmm::device_uvector<size_t> offsets(usecase.num_labels + 1, handle.get_stream()); thrust::fill(handle.get_thrust_policy(), offsets.begin(), offsets.end(), size_t{0}); thrust::for_each( handle.get_thrust_policy(), edgelist_labels.begin(), edgelist_labels.end(), [offsets = raft::device_span<size_t>(offsets.data(), offsets.size())] __device__(label_t label) { cuda::atomic_ref<size_t, cuda::thread_scope_device> atomic_counter(offsets[label]); atomic_counter.fetch_add(size_t{1}, cuda::std::memory_order_relaxed); }); thrust::exclusive_scan( handle.get_thrust_policy(), offsets.begin(), offsets.end(), offsets.begin()); label_offsets = std::make_tuple(std::move(labels), std::move(offsets)); } rmm::device_uvector<vertex_t> renumbered_edgelist_srcs(org_edgelist_srcs.size(), handle.get_stream()); rmm::device_uvector<vertex_t> renumbered_edgelist_dsts(org_edgelist_dsts.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), org_edgelist_srcs.begin(), org_edgelist_srcs.end(), renumbered_edgelist_srcs.begin()); thrust::copy(handle.get_thrust_policy(), org_edgelist_dsts.begin(), org_edgelist_dsts.end(), renumbered_edgelist_dsts.begin()); rmm::device_uvector<vertex_t> renumber_map(0, handle.get_stream()); std::optional<rmm::device_uvector<size_t>> renumber_map_label_offsets{std::nullopt}; if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement hr_timer.start("Renumber sampled edgelist"); } std::tie(renumbered_edgelist_srcs, renumbered_edgelist_dsts, renumber_map, renumber_map_label_offsets) = cugraph::renumber_sampled_edgelist( handle, std::move(renumbered_edgelist_srcs), std::move(renumbered_edgelist_dsts), edgelist_hops ? std::make_optional<raft::device_span<int32_t const>>( (*edgelist_hops).data(), (*edgelist_hops).size()) : std::nullopt, label_offsets ? std::make_optional< std::tuple<raft::device_span<label_t const>, raft::device_span<size_t const>>>( std::make_tuple(raft::device_span<label_t const>(std::get<0>(*label_offsets).data(), std::get<0>(*label_offsets).size()), raft::device_span<size_t const>(std::get<1>(*label_offsets).data(), std::get<1>(*label_offsets).size()))) : std::nullopt); if (cugraph::test::g_perf) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement hr_timer.stop(); hr_timer.display_and_clear(std::cout); } if (usecase.check_correctness) { for (size_t i = 0; i < usecase.num_labels; ++i) { size_t edgelist_start_offset = label_offsets ? std::get<1>(*label_offsets).element(i, handle.get_stream()) : size_t{0}; size_t edgelist_end_offset = label_offsets ? std::get<1>(*label_offsets).element(i + 1, handle.get_stream()) : usecase.num_sampled_edges; if (edgelist_start_offset == edgelist_end_offset) continue; auto this_label_org_edgelist_srcs = raft::device_span<vertex_t const>(org_edgelist_srcs.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); auto this_label_org_edgelist_dsts = raft::device_span<vertex_t const>(org_edgelist_dsts.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); auto this_label_edgelist_hops = edgelist_hops ? std::make_optional<raft::device_span<int32_t const>>( (*edgelist_hops).data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset) : std::nullopt; auto this_label_renumbered_edgelist_srcs = raft::device_span<vertex_t const>(renumbered_edgelist_srcs.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); auto this_label_renumbered_edgelist_dsts = raft::device_span<vertex_t const>(renumbered_edgelist_dsts.data() + edgelist_start_offset, edgelist_end_offset - edgelist_start_offset); size_t renumber_map_start_offset = renumber_map_label_offsets ? (*renumber_map_label_offsets).element(i, handle.get_stream()) : size_t{0}; size_t renumber_map_end_offset = renumber_map_label_offsets ? (*renumber_map_label_offsets).element(i + 1, handle.get_stream()) : renumber_map.size(); auto this_label_renumber_map = raft::device_span<vertex_t const>(renumber_map.data() + renumber_map_start_offset, renumber_map_end_offset - renumber_map_start_offset); // check un-renumbering recovers the original edge list auto pair_first = thrust::make_zip_iterator(this_label_org_edgelist_srcs.begin(), this_label_renumbered_edgelist_srcs.begin()); auto num_renumber_errors = thrust::count_if(handle.get_thrust_policy(), pair_first, pair_first + this_label_org_edgelist_srcs.size(), [this_label_renumber_map] __device__(auto pair) { auto org = thrust::get<0>(pair); auto renumbered = thrust::get<1>(pair); return this_label_renumber_map[renumbered] != org; }); ASSERT_TRUE(num_renumber_errors == 0) << "Renumber error in edge list sources."; pair_first = thrust::make_zip_iterator(this_label_org_edgelist_dsts.begin(), this_label_renumbered_edgelist_dsts.begin()); num_renumber_errors = thrust::count_if(handle.get_thrust_policy(), pair_first, pair_first + this_label_org_edgelist_dsts.size(), [this_label_renumber_map] __device__(auto pair) { auto org = thrust::get<0>(pair); auto renumbered = thrust::get<1>(pair); return this_label_renumber_map[renumbered] != org; }); ASSERT_TRUE(num_renumber_errors == 0) << "Renumber error in edge list destinations."; // Check the invariants in renumber_map // Say we found the minimum (primary key:hop, secondary key:flag) pairs for every unique // vertices, where flag is 0 for sources and 1 for destinations. Then, vertices with smaller // (hop, flag) pairs should be renumbered to smaller numbers than vertices with larger (hop, // flag) pairs. rmm::device_uvector<vertex_t> unique_srcs(this_label_org_edgelist_srcs.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), this_label_org_edgelist_srcs.begin(), this_label_org_edgelist_srcs.end(), unique_srcs.begin()); std::optional<rmm::device_uvector<int32_t>> unique_src_hops = this_label_edgelist_hops ? std::make_optional<rmm::device_uvector<int32_t>>( (*this_label_edgelist_hops).size(), handle.get_stream()) : std::nullopt; if (this_label_edgelist_hops) { thrust::copy(handle.get_thrust_policy(), (*this_label_edgelist_hops).begin(), (*this_label_edgelist_hops).end(), (*unique_src_hops).begin()); auto pair_first = thrust::make_zip_iterator(unique_srcs.begin(), (*unique_src_hops).begin()); thrust::sort(handle.get_thrust_policy(), pair_first, pair_first + unique_srcs.size()); unique_srcs.resize( thrust::distance(unique_srcs.begin(), thrust::get<0>(thrust::unique_by_key(handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end(), (*unique_src_hops).begin()))), handle.get_stream()); (*unique_src_hops).resize(unique_srcs.size(), handle.get_stream()); } else { thrust::sort(handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end()); unique_srcs.resize( thrust::distance( unique_srcs.begin(), thrust::unique(handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end())), handle.get_stream()); } rmm::device_uvector<vertex_t> unique_dsts(this_label_org_edgelist_dsts.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), this_label_org_edgelist_dsts.begin(), this_label_org_edgelist_dsts.end(), unique_dsts.begin()); std::optional<rmm::device_uvector<int32_t>> unique_dst_hops = this_label_edgelist_hops ? std::make_optional<rmm::device_uvector<int32_t>>( (*this_label_edgelist_hops).size(), handle.get_stream()) : std::nullopt; if (this_label_edgelist_hops) { thrust::copy(handle.get_thrust_policy(), (*this_label_edgelist_hops).begin(), (*this_label_edgelist_hops).end(), (*unique_dst_hops).begin()); auto pair_first = thrust::make_zip_iterator(unique_dsts.begin(), (*unique_dst_hops).begin()); thrust::sort(handle.get_thrust_policy(), pair_first, pair_first + unique_dsts.size()); unique_dsts.resize( thrust::distance(unique_dsts.begin(), thrust::get<0>(thrust::unique_by_key(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end(), (*unique_dst_hops).begin()))), handle.get_stream()); (*unique_dst_hops).resize(unique_dsts.size(), handle.get_stream()); } else { thrust::sort(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end()); unique_dsts.resize( thrust::distance( unique_dsts.begin(), thrust::unique(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end())), handle.get_stream()); } rmm::device_uvector<vertex_t> sorted_org_vertices(this_label_renumber_map.size(), handle.get_stream()); rmm::device_uvector<vertex_t> matching_renumbered_vertices(sorted_org_vertices.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), this_label_renumber_map.begin(), this_label_renumber_map.end(), sorted_org_vertices.begin()); thrust::sequence(handle.get_thrust_policy(), matching_renumbered_vertices.begin(), matching_renumbered_vertices.end(), vertex_t{0}); thrust::sort_by_key(handle.get_thrust_policy(), sorted_org_vertices.begin(), sorted_org_vertices.end(), matching_renumbered_vertices.begin()); if (this_label_edgelist_hops) { rmm::device_uvector<vertex_t> merged_vertices(unique_srcs.size() + unique_dsts.size(), handle.get_stream()); rmm::device_uvector<int32_t> merged_hops(merged_vertices.size(), handle.get_stream()); rmm::device_uvector<int8_t> merged_flags(merged_vertices.size(), handle.get_stream()); auto src_triplet_first = thrust::make_zip_iterator(unique_srcs.begin(), (*unique_src_hops).begin(), thrust::make_constant_iterator(int8_t{0})); auto dst_triplet_first = thrust::make_zip_iterator(unique_dsts.begin(), (*unique_dst_hops).begin(), thrust::make_constant_iterator(int8_t{1})); thrust::merge(handle.get_thrust_policy(), src_triplet_first, src_triplet_first + unique_srcs.size(), dst_triplet_first, dst_triplet_first + unique_dsts.size(), thrust::make_zip_iterator( merged_vertices.begin(), merged_hops.begin(), merged_flags.begin())); merged_vertices.resize( thrust::distance( merged_vertices.begin(), thrust::get<0>(thrust::unique_by_key( handle.get_thrust_policy(), merged_vertices.begin(), merged_vertices.end(), thrust::make_zip_iterator(merged_hops.begin(), merged_flags.begin())))), handle.get_stream()); merged_hops.resize(merged_vertices.size(), handle.get_stream()); merged_flags.resize(merged_vertices.size(), handle.get_stream()); auto sort_key_first = thrust::make_zip_iterator(merged_hops.begin(), merged_flags.begin()); thrust::sort_by_key(handle.get_thrust_policy(), sort_key_first, sort_key_first + merged_hops.size(), merged_vertices.begin()); auto num_unique_keys = thrust::count_if( handle.get_thrust_policy(), thrust::make_counting_iterator(size_t{0}), thrust::make_counting_iterator(merged_hops.size()), cugraph::detail::is_first_in_run_t<decltype(sort_key_first)>{sort_key_first}); rmm::device_uvector<vertex_t> min_vertices(num_unique_keys, handle.get_stream()); rmm::device_uvector<vertex_t> max_vertices(num_unique_keys, handle.get_stream()); auto renumbered_merged_vertex_first = thrust::make_transform_iterator( merged_vertices.begin(), [sorted_org_vertices = raft::device_span<vertex_t const>(sorted_org_vertices.data(), sorted_org_vertices.size()), matching_renumbered_vertices = raft::device_span<vertex_t const>( matching_renumbered_vertices.data(), matching_renumbered_vertices.size())] __device__(vertex_t src) { auto it = thrust::lower_bound( thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), src); return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; }); thrust::reduce_by_key(handle.get_thrust_policy(), sort_key_first, sort_key_first + merged_hops.size(), renumbered_merged_vertex_first, thrust::make_discard_iterator(), min_vertices.begin(), thrust::equal_to<thrust::tuple<int32_t, int8_t>>{}, thrust::minimum<vertex_t>{}); thrust::reduce_by_key(handle.get_thrust_policy(), sort_key_first, sort_key_first + merged_hops.size(), renumbered_merged_vertex_first, thrust::make_discard_iterator(), max_vertices.begin(), thrust::equal_to<thrust::tuple<int32_t, int8_t>>{}, thrust::maximum<vertex_t>{}); auto num_violations = thrust::count_if(handle.get_thrust_policy(), thrust::make_counting_iterator(size_t{1}), thrust::make_counting_iterator(min_vertices.size()), [min_vertices = raft::device_span<vertex_t const>(min_vertices.data(), min_vertices.size()), max_vertices = raft::device_span<vertex_t const>( max_vertices.data(), max_vertices.size())] __device__(size_t i) { return min_vertices[i] <= max_vertices[i - 1]; }); ASSERT_TRUE(num_violations == 0) << "Invariant violated, a vertex with a smaller (hop,flag) pair is renumbered to a " "larger value than a vertex with a larger (hop, flag) pair."; } else { unique_dsts.resize( thrust::distance( unique_dsts.begin(), thrust::remove_if(handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end(), [sorted_unique_srcs = raft::device_span<vertex_t const>( unique_srcs.data(), unique_srcs.size())] __device__(auto dst) { return thrust::binary_search(thrust::seq, sorted_unique_srcs.begin(), sorted_unique_srcs.end(), dst); })), handle.get_stream()); auto max_src_renumbered_vertex = thrust::transform_reduce( handle.get_thrust_policy(), unique_srcs.begin(), unique_srcs.end(), [sorted_org_vertices = raft::device_span<vertex_t const>(sorted_org_vertices.data(), sorted_org_vertices.size()), matching_renumbered_vertices = raft::device_span<vertex_t const>( matching_renumbered_vertices.data(), matching_renumbered_vertices.size())] __device__(vertex_t src) { auto it = thrust::lower_bound( thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), src); return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; }, std::numeric_limits<vertex_t>::lowest(), thrust::maximum<vertex_t>{}); auto min_dst_renumbered_vertex = thrust::transform_reduce( handle.get_thrust_policy(), unique_dsts.begin(), unique_dsts.end(), [sorted_org_vertices = raft::device_span<vertex_t const>(sorted_org_vertices.data(), sorted_org_vertices.size()), matching_renumbered_vertices = raft::device_span<vertex_t const>( matching_renumbered_vertices.data(), matching_renumbered_vertices.size())] __device__(vertex_t dst) { auto it = thrust::lower_bound( thrust::seq, sorted_org_vertices.begin(), sorted_org_vertices.end(), dst); return matching_renumbered_vertices[thrust::distance(sorted_org_vertices.begin(), it)]; }, std::numeric_limits<vertex_t>::max(), thrust::minimum<vertex_t>{}); ASSERT_TRUE(max_src_renumbered_vertex < min_dst_renumbered_vertex) << "Invariants violated, a source vertex is renumbered to a non-smaller value than a " "vertex that appear only in the edge list destinations."; } } } } }; TEST_P(Tests_RenumberSampledEdgelist, CheckInt32) { auto param = GetParam(); run_current_test<int32_t>(param); } TEST_P(Tests_RenumberSampledEdgelist, CheckInt64) { auto param = GetParam(); run_current_test<int64_t>(param); } INSTANTIATE_TEST_SUITE_P( small_test, Tests_RenumberSampledEdgelist, ::testing::Values(RenumberSampledEdgelist_Usecase{1024, 4096, 1, 1, true}, RenumberSampledEdgelist_Usecase{1024, 4096, 3, 1, true}, RenumberSampledEdgelist_Usecase{1024, 32768, 1, 256, true}, RenumberSampledEdgelist_Usecase{1024, 32768, 3, 256, true})); INSTANTIATE_TEST_SUITE_P( benchmark_test, Tests_RenumberSampledEdgelist, ::testing::Values(RenumberSampledEdgelist_Usecase{1 << 20, 1 << 20, 1, 1, false}, RenumberSampledEdgelist_Usecase{1 << 20, 1 << 20, 5, 1, false}, RenumberSampledEdgelist_Usecase{1 << 20, 1 << 24, 1, 1 << 20, false}, RenumberSampledEdgelist_Usecase{1 << 20, 1 << 24, 5, 1 << 20, false})); CUGRAPH_TEST_PROGRAM_MAIN()
3c84cc31d36192f30da739c08750ce7df3d8c5bb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ballquery_batch_p_cuda_.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int meanActive = 1; float radius = 1; const float *xyz = NULL; hipMalloc(&xyz, XSIZE*YSIZE); const int *batch_idxs = NULL; hipMalloc(&batch_idxs, XSIZE*YSIZE); const int *batch_offsets = NULL; hipMalloc(&batch_offsets, XSIZE*YSIZE); int *idx = NULL; hipMalloc(&idx, XSIZE*YSIZE); int *start_len = NULL; hipMalloc(&start_len, XSIZE*YSIZE); int *cumsum = NULL; hipMalloc(&cumsum, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ballquery_batch_p_cuda_), dim3(gridBlock),dim3(threadBlock), 0, 0, n,meanActive,radius,xyz,batch_idxs,batch_offsets,idx,start_len,cumsum); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ballquery_batch_p_cuda_), dim3(gridBlock),dim3(threadBlock), 0, 0, n,meanActive,radius,xyz,batch_idxs,batch_offsets,idx,start_len,cumsum); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ballquery_batch_p_cuda_), dim3(gridBlock),dim3(threadBlock), 0, 0, n,meanActive,radius,xyz,batch_idxs,batch_offsets,idx,start_len,cumsum); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3c84cc31d36192f30da739c08750ce7df3d8c5bb.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ballquery_batch_p_cuda_.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int meanActive = 1; float radius = 1; const float *xyz = NULL; cudaMalloc(&xyz, XSIZE*YSIZE); const int *batch_idxs = NULL; cudaMalloc(&batch_idxs, XSIZE*YSIZE); const int *batch_offsets = NULL; cudaMalloc(&batch_offsets, XSIZE*YSIZE); int *idx = NULL; cudaMalloc(&idx, XSIZE*YSIZE); int *start_len = NULL; cudaMalloc(&start_len, XSIZE*YSIZE); int *cumsum = NULL; cudaMalloc(&cumsum, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ballquery_batch_p_cuda_<<<gridBlock,threadBlock>>>(n,meanActive,radius,xyz,batch_idxs,batch_offsets,idx,start_len,cumsum); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ballquery_batch_p_cuda_<<<gridBlock,threadBlock>>>(n,meanActive,radius,xyz,batch_idxs,batch_offsets,idx,start_len,cumsum); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ballquery_batch_p_cuda_<<<gridBlock,threadBlock>>>(n,meanActive,radius,xyz,batch_idxs,batch_offsets,idx,start_len,cumsum); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c3b34f9e58e6411d3ce6bc0062972d4014b186b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include <paddle/fluid/memory/allocation/allocator.h> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/distribute_fpn_proposals_op.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; int const BBoxSize = 4; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <class T> __global__ void GPUDistFpnProposalsHelper( const int nthreads, const T* rois, const int lod_size, const int refer_level, const int refer_scale, const int max_level, const int min_level, int* roi_batch_id_data, int* sub_lod_list, int* target_lvls, bool pixel_offset = true) { CUDA_KERNEL_LOOP(i, nthreads) { const T* offset_roi = rois + i * BBoxSize; int roi_batch_ind = roi_batch_id_data[i]; // get the target level of current rois T roi_area = RoIArea(offset_roi, pixel_offset); T roi_scale = sqrt(roi_area); int tgt_lvl = floor( log2(roi_scale / static_cast<T>(refer_scale) + (T)1e-8) + refer_level); tgt_lvl = min(max_level, max(tgt_lvl, min_level)); target_lvls[i] = tgt_lvl; // compute number of rois in the same batch and same target level platform::CudaAtomicAdd( sub_lod_list + (tgt_lvl - min_level) * lod_size + roi_batch_ind, 1); } } template <typename DeviceContext, typename T> class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* fpn_rois = ctx.Input<paddle::framework::LoDTensor>("FpnRois"); auto multi_fpn_rois = ctx.MultiOutput<LoDTensor>("MultiFpnRois"); auto* restore_index = ctx.Output<Tensor>("RestoreIndex"); const int min_level = ctx.Attr<int>("min_level"); const int max_level = ctx.Attr<int>("max_level"); const int refer_level = ctx.Attr<int>("refer_level"); const int refer_scale = ctx.Attr<int>("refer_scale"); const bool pixel_offset = ctx.Attr<bool>("pixel_offset"); int num_level = max_level - min_level + 1; // check that the fpn_rois is not empty if (!ctx.HasInput("RoisNum")) { PADDLE_ENFORCE_EQ( fpn_rois->lod().size(), 1UL, platform::errors::InvalidArgument("DistributeFpnProposalsOp needs LoD" "with one level")); } std::vector<size_t> fpn_rois_lod; if (ctx.HasInput("RoisNum")) { auto* rois_num = ctx.Input<Tensor>("RoisNum"); fpn_rois_lod = GetLodFromRoisNum(rois_num); } else { fpn_rois_lod = fpn_rois->lod().back(); } int lod_size = fpn_rois_lod.size() - 1; int roi_num = fpn_rois_lod[lod_size]; auto& dev_ctx = ctx.template device_context<DeviceContext>(); // get batch id by lod in CPU Tensor roi_batch_id_list; roi_batch_id_list.Resize({roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); for (int n = 0; n < lod_size; ++n) { for (size_t i = fpn_rois_lod[n]; i < fpn_rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopySync(roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor sub_lod_list; sub_lod_list.Resize({num_level, lod_size}); int* sub_lod_list_data = sub_lod_list.mutable_data<int>(dev_ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, int> set_zero; set_zero(dev_ctx, &sub_lod_list, static_cast<int>(0)); Tensor target_lvls; target_lvls.Resize({roi_num}); int* target_lvls_data = target_lvls.mutable_data<int>(dev_ctx.GetPlace()); int dist_blocks = NumBlocks(roi_num); int threads = kNumCUDAThreads; // get target levels and sub_lod list hipLaunchKernelGGL(( GPUDistFpnProposalsHelper<T>), dim3(dist_blocks), dim3(threads), 0, dev_ctx.stream(), roi_num, fpn_rois->data<T>(), lod_size, refer_level, refer_scale, max_level, min_level, roi_batch_id_list_gpu.data<int>(), sub_lod_list_data, target_lvls_data, pixel_offset); auto place = dev_ctx.GetPlace(); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({roi_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, roi_num); for_range(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; int* keys_out = keys_out_t.mutable_data<int>({roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs<int, int>( nullptr, temp_storage_bytes, target_lvls_data, keys_out, idx_in, idx_out, roi_num, 0, sizeof(int) * 8, dev_ctx.stream()); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort target level to get corresponding index hipcub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, target_lvls_data, keys_out, idx_in, idx_out, roi_num, 0, sizeof(int) * 8, dev_ctx.stream()); int* restore_idx_data = restore_index->mutable_data<int>({roi_num, 1}, dev_ctx.GetPlace()); // sort current index to get restore index hipcub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, idx_out, keys_out, idx_in, restore_idx_data, roi_num, 0, sizeof(int) * 8, dev_ctx.stream()); int start = 0; auto multi_rois_num = ctx.MultiOutput<Tensor>("MultiLevelRoIsNum"); std::vector<int> sub_lod_list_cpu(lod_size * num_level); memory::Copy(platform::CPUPlace(), sub_lod_list_cpu.data(), place, sub_lod_list_data, sizeof(int) * lod_size * num_level, dev_ctx.stream()); dev_ctx.Wait(); for (int i = 0; i < num_level; ++i) { Tensor sub_lod = sub_lod_list.Slice(i, i + 1); // transfer length-based lod to offset-based lod std::vector<size_t> offset(1, 0); for (int j = 0; j < lod_size; ++j) { offset.emplace_back(offset.back() + sub_lod_list_cpu[i * lod_size + j]); } int sub_rois_num = offset.back(); int end = start + sub_rois_num; if (end > start) { Tensor sub_idx = index_out_t.Slice(start, end); start = end; multi_fpn_rois[i]->mutable_data<T>({sub_rois_num, kBoxDim}, dev_ctx.GetPlace()); GPUGather<T>(dev_ctx, *fpn_rois, sub_idx, multi_fpn_rois[i]); } else { multi_fpn_rois[i]->mutable_data<T>({sub_rois_num, kBoxDim}, dev_ctx.GetPlace()); } if (multi_rois_num.size() > 0) { Tensor* rois_num_t = multi_rois_num[i]; paddle::framework::TensorCopySync(sub_lod, dev_ctx.GetPlace(), rois_num_t); rois_num_t->Resize({lod_size}); } framework::LoD lod; lod.emplace_back(offset); multi_fpn_rois[i]->set_lod(lod); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( distribute_fpn_proposals, ops::GPUDistributeFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUDistributeFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, double>);
c3b34f9e58e6411d3ce6bc0062972d4014b186b4.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include <paddle/fluid/memory/allocation/allocator.h> #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/distribute_fpn_proposals_op.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; int const BBoxSize = 4; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <class T> __global__ void GPUDistFpnProposalsHelper( const int nthreads, const T* rois, const int lod_size, const int refer_level, const int refer_scale, const int max_level, const int min_level, int* roi_batch_id_data, int* sub_lod_list, int* target_lvls, bool pixel_offset = true) { CUDA_KERNEL_LOOP(i, nthreads) { const T* offset_roi = rois + i * BBoxSize; int roi_batch_ind = roi_batch_id_data[i]; // get the target level of current rois T roi_area = RoIArea(offset_roi, pixel_offset); T roi_scale = sqrt(roi_area); int tgt_lvl = floor( log2(roi_scale / static_cast<T>(refer_scale) + (T)1e-8) + refer_level); tgt_lvl = min(max_level, max(tgt_lvl, min_level)); target_lvls[i] = tgt_lvl; // compute number of rois in the same batch and same target level platform::CudaAtomicAdd( sub_lod_list + (tgt_lvl - min_level) * lod_size + roi_batch_ind, 1); } } template <typename DeviceContext, typename T> class GPUDistributeFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* fpn_rois = ctx.Input<paddle::framework::LoDTensor>("FpnRois"); auto multi_fpn_rois = ctx.MultiOutput<LoDTensor>("MultiFpnRois"); auto* restore_index = ctx.Output<Tensor>("RestoreIndex"); const int min_level = ctx.Attr<int>("min_level"); const int max_level = ctx.Attr<int>("max_level"); const int refer_level = ctx.Attr<int>("refer_level"); const int refer_scale = ctx.Attr<int>("refer_scale"); const bool pixel_offset = ctx.Attr<bool>("pixel_offset"); int num_level = max_level - min_level + 1; // check that the fpn_rois is not empty if (!ctx.HasInput("RoisNum")) { PADDLE_ENFORCE_EQ( fpn_rois->lod().size(), 1UL, platform::errors::InvalidArgument("DistributeFpnProposalsOp needs LoD" "with one level")); } std::vector<size_t> fpn_rois_lod; if (ctx.HasInput("RoisNum")) { auto* rois_num = ctx.Input<Tensor>("RoisNum"); fpn_rois_lod = GetLodFromRoisNum(rois_num); } else { fpn_rois_lod = fpn_rois->lod().back(); } int lod_size = fpn_rois_lod.size() - 1; int roi_num = fpn_rois_lod[lod_size]; auto& dev_ctx = ctx.template device_context<DeviceContext>(); // get batch id by lod in CPU Tensor roi_batch_id_list; roi_batch_id_list.Resize({roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); for (int n = 0; n < lod_size; ++n) { for (size_t i = fpn_rois_lod[n]; i < fpn_rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopySync(roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor sub_lod_list; sub_lod_list.Resize({num_level, lod_size}); int* sub_lod_list_data = sub_lod_list.mutable_data<int>(dev_ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, int> set_zero; set_zero(dev_ctx, &sub_lod_list, static_cast<int>(0)); Tensor target_lvls; target_lvls.Resize({roi_num}); int* target_lvls_data = target_lvls.mutable_data<int>(dev_ctx.GetPlace()); int dist_blocks = NumBlocks(roi_num); int threads = kNumCUDAThreads; // get target levels and sub_lod list GPUDistFpnProposalsHelper<T><<<dist_blocks, threads, 0, dev_ctx.stream()>>>( roi_num, fpn_rois->data<T>(), lod_size, refer_level, refer_scale, max_level, min_level, roi_batch_id_list_gpu.data<int>(), sub_lod_list_data, target_lvls_data, pixel_offset); auto place = dev_ctx.GetPlace(); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({roi_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, roi_num); for_range(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; int* keys_out = keys_out_t.mutable_data<int>({roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs<int, int>( nullptr, temp_storage_bytes, target_lvls_data, keys_out, idx_in, idx_out, roi_num, 0, sizeof(int) * 8, dev_ctx.stream()); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort target level to get corresponding index cub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, target_lvls_data, keys_out, idx_in, idx_out, roi_num, 0, sizeof(int) * 8, dev_ctx.stream()); int* restore_idx_data = restore_index->mutable_data<int>({roi_num, 1}, dev_ctx.GetPlace()); // sort current index to get restore index cub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, idx_out, keys_out, idx_in, restore_idx_data, roi_num, 0, sizeof(int) * 8, dev_ctx.stream()); int start = 0; auto multi_rois_num = ctx.MultiOutput<Tensor>("MultiLevelRoIsNum"); std::vector<int> sub_lod_list_cpu(lod_size * num_level); memory::Copy(platform::CPUPlace(), sub_lod_list_cpu.data(), place, sub_lod_list_data, sizeof(int) * lod_size * num_level, dev_ctx.stream()); dev_ctx.Wait(); for (int i = 0; i < num_level; ++i) { Tensor sub_lod = sub_lod_list.Slice(i, i + 1); // transfer length-based lod to offset-based lod std::vector<size_t> offset(1, 0); for (int j = 0; j < lod_size; ++j) { offset.emplace_back(offset.back() + sub_lod_list_cpu[i * lod_size + j]); } int sub_rois_num = offset.back(); int end = start + sub_rois_num; if (end > start) { Tensor sub_idx = index_out_t.Slice(start, end); start = end; multi_fpn_rois[i]->mutable_data<T>({sub_rois_num, kBoxDim}, dev_ctx.GetPlace()); GPUGather<T>(dev_ctx, *fpn_rois, sub_idx, multi_fpn_rois[i]); } else { multi_fpn_rois[i]->mutable_data<T>({sub_rois_num, kBoxDim}, dev_ctx.GetPlace()); } if (multi_rois_num.size() > 0) { Tensor* rois_num_t = multi_rois_num[i]; paddle::framework::TensorCopySync(sub_lod, dev_ctx.GetPlace(), rois_num_t); rois_num_t->Resize({lod_size}); } framework::LoD lod; lod.emplace_back(offset); multi_fpn_rois[i]->set_lod(lod); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( distribute_fpn_proposals, ops::GPUDistributeFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUDistributeFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, double>);
f2d6dd81809290e752b0b86168bb410d15e3c248.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"header.h" void __global__ new_angle_based(const struct vertex_buffer *d_vb, struct vertex_buffer *result,int *fan,int vertices,int max,float *jacobian,function_info*funct,float*d1) { __shared__ int fan_list[512][20]; //int tx=threadIdx.x; int ty=threadIdx.y; int count; //int column=blockIdx.x * blockDim.x+tx; int row=blockIdx.y *blockDim.y+ty; struct vertex_buffer v0; struct vertex_buffer v1; struct vertex_buffer v2; struct vertex_buffer vr; struct vertex_buffer vr1; struct vertex_buffer vr2; struct vertex_buffer vr3; struct vertex_buffer new_pos; float temp0,temp1,temp2; float alpha1; float alpha2; float beta; int index; int index1; objective_function o; float temp[2][2]; float precision=0.000001; float sn; float sn_1; float sn_temp; float lamda; float slope; o.coeff_sqr_x=o.coeff_sqr_y=o.coeff_xy=o.rest.coeff_x=o.rest.coeff_y=o.rest.const_coeff=0; if(row<vertices){ //if(row==1)printf("**greetings from row 60\n"); index1=row*max; if(fan[index1+1]){ count=fan[index1]; for(int i=2;i<count+2;i++){ fan_list[ty][i]=fan[index1+i]; //if(row==60) //printf("%d\n",fan_list[ty][i]); } //perform smoothing //count=fan_list[ty][0]; vr.x=d_vb[fan_list[ty][2]].x; vr.y=d_vb[fan_list[ty][2]].y; vr1.x=d_vb[row].x; vr1.y=d_vb[row].y; vr2.x=d_vb[fan_list[ty][count+1]].x; vr2.y=d_vb[fan_list[ty][count+1]].y; for(int j=2;j<count+2;j++){ //vr.x=d_vb[fan_list[ty][j]].x; //vr.y=d_vb[fan_list[ty][j]].y; v1.x=vr1.x-vr.x; v1.y=vr1.y-vr.y; //if(row==68) //printf("%f %f\n",vr.x,vr.y); if(j-1==count)index=2; else index=j+1; vr3.x=d_vb[fan_list[ty][index]].x; vr3.y=d_vb[fan_list[ty][index]].y; //v0.x=d_vb[fan_list[ty][index]].x-vr.x; //v0.y=d_vb[fan_list[ty][index]].y-vr.y; v0.x=vr3.x-vr.x; v0.y=vr3.y-vr.y; //if(j==2)index=count+1; //else index=j-1; //v2.x=d_vb[fan_list[ty][index]].x-vr.x; //v2.y=d_vb[fan_list[ty][index]].y-vr.y; v2.x=vr2.x-vr.x; v2.y=vr2.y-vr.y; temp0=sqrtf(powf(v0.x,2)+powf(v0.y,2)); temp1=sqrtf(powf(v1.x,2)+powf(v1.y,2)); temp2=sqrtf(powf(v2.x,2)+powf(v2.y,2)); alpha1=acos((v1.x*v2.x+v1.y*v2.y)/(temp1*temp2)); alpha2=acos((v1.x*v0.x+v1.y*v0.y)/(temp1*temp0)); beta=(alpha2+alpha1)/2; beta=alpha2-beta; new_pos.x=vr.x+(vr1.x-vr.x)*cos(beta)-(vr1.y-vr.y)*sin(beta); new_pos.y=vr.y+(vr1.x-vr.x)*sin(beta)+(vr1.y-vr.y)*cos(beta); temp0=new_pos.y-vr.y; temp1=vr.x-new_pos.x; temp2=vr.y*new_pos.x-vr.x*new_pos.y; slope=sqrtf(powf(temp0,2)+powf(temp1,2)); temp0=funct[index1+j-2].coeff_x=temp0/slope; temp1=funct[index1+j-2].coeff_y=temp1/slope; temp2=funct[index1+j-2].const_coeff=temp2/slope; // if(row==104)printf("%f %f %f\n",temp0,temp1,temp2); o.coeff_sqr_x+=powf(temp0,2); o.coeff_sqr_y+=powf(temp1,2); o.coeff_xy+=2*temp0*temp1; o.rest.coeff_x+=2*temp0*temp2; o.rest.coeff_y+=2*temp1*temp2; o.rest.const_coeff+=powf(temp2,2); vr2=vr; vr=vr3; }//Inner loop ends here index=2*index1; //if(row==104)printf("%f %f %f %f %f %f\n",o.coeff_sqr_x,o.coeff_sqr_y,o.coeff_xy,o.rest.coeff_x,o.rest.coeff_y,o.rest.const_coeff); temp[0][0]=temp[0][1]=temp[1][0]=temp[1][1]=0; for(int k=0;k<count;k++){ temp0=jacobian[index+2*k]=funct[index1+k].coeff_x; temp1=jacobian[index+2*k+1]=funct[index1+k].coeff_y; temp[1][1]+=powf(temp0,2); temp[1][0]+=(temp0*temp1); temp[0][0]+=powf(temp1,2); //if(row==104)printf("%f %f\n",jacobian[row*max*2+2*k],jacobian[row*max*2+2*k+1]); } // if(row==104) // for(int k=0;k<count;k++){ // printf("%f %f\n",jacobian[row*max*2+2*k],jacobian[row*max*2+2*k+1]); //} /* temp[0][0]=temp[0][1]=temp[1][0]=temp[1][1]=0; for(int k=0;k<count;k++){ temp[1][1]+=powf(jacobian[row*max*2+2*k],2); temp[1][0]+=jacobian[row*max*2+2*k]*jacobian[row*max*2+2*k+1]; temp[0][0]+=powf(jacobian[row*max*2+2*k+1],2); // if(row==104) //printf("%f %f \n",funct[row*max+k].coeff_x,funct[row*max+k].coeff_y); } */ temp[0][1]=temp[1][0]; // if(row==104)printf("%f %f %f %f\n",temp[0][0],temp[0][1],temp[1][0],temp[1][1]); slope=(temp[0][0]*temp[1][1])-(temp[0][1]*temp[1][0]); temp[0][0]/=slope; temp[1][0]/=-slope; temp[0][1]/=-slope; temp[1][1]/=slope; // if(row==104)printf("%f %f %f %f\n",temp[0][0],temp[0][1],temp[1][0],temp[1][1]); for(int ii=0;ii<count;ii++){ d1[index+ii]=temp[0][0]*jacobian[index+2*ii]; d1[index+ii]+=(temp[0][1]*jacobian[index+2*ii+1]); d1[index+ii+count]=temp[1][0]*jacobian[index+2*ii]; d1[index+ii+count]+=(temp[1][1]*jacobian[index+2*ii+1]); // if(row==104){printf("%f %f %f %f %f %f\n",temp[0][0],temp[0][1],temp[1][0],temp[1][1],jacobian[row*max*2+2*ii],jacobian[row*max*2+2*ii+1]); // printf("%f %f\n",d1[row*max*2+ii],d1[row*max*2+ii+count]);} } //Initial guess of internal vertex v1.x=vr1.x; v1.y=vr1.y; //Here begins the calculation of main algorithmic loop for(int kk=0;kk<10;kk++){ //cout<<v1.x<<" "<<v1.y<<endl; //calculation of function vector reusing the jacobain matrix this time for function vector for(int ii=0;ii<count;ii++){ jacobian[index+ii]=v1.x*funct[index1+ii].coeff_x+v1.y*funct[index1+ii].coeff_y+funct[index1+ii].const_coeff; // if(row==104)printf("****%f\n",jacobian[row*max*2+ii]); //printf("****%f %f \n",funct[row*max+ii].coeff_x,funct[row*max+ii].coeff_y); } //First step of Algorithm sn=powf(v1.x,2)*o.coeff_sqr_x+powf(v1.y,2)*o.coeff_sqr_y+v1.x*v1.y*o.coeff_xy; sn+=v1.x*o.rest.coeff_x+v1.y*o.rest.coeff_y+o.rest.const_coeff; v0.x=v0.y=0;//Reinitializing the current vertex to calculate the new position for(int ii=0;ii<count;ii++){ v0.x+=d1[index+ii]*jacobian[index+ii]; v0.y+=d1[index+ii+count]*jacobian[index+ii]; } //new coordinates i.e. xn+1 //if(row==104)printf("*****%f %f\n",v0.x,v0.y); v2.x=v1.x+v0.x; v2.y=v1.y+v0.y; sn_1=powf(v2.x,2)*o.coeff_sqr_x+powf(v2.y,2)*o.coeff_sqr_y+v2.x*v2.y*o.coeff_xy; sn_1+=v2.x*o.rest.coeff_x+v2.y*o.rest.coeff_y+o.rest.const_coeff; if(fabs((sn_1-sn)/sn) < precision){ //puts("reached"); break;} lamda=1;//step 3 of algorithm if(sn_1<sn){//puts("Here"); v1.x=v2.x;v1.y=v2.y;continue;}//step 4 of algorithm //calculation of lamda using derivative;step 5 of algorithm lamda=(2*o.coeff_sqr_x*v0.x+o.coeff_xy*v0.y)*v1.x; lamda+=(2*o.coeff_sqr_y*v0.y+o.coeff_xy*v0.x)*v1.y; lamda+=o.rest.coeff_x*v0.x+o.rest.coeff_y*v0.y; lamda/=(o.coeff_sqr_x*powf(v0.x,2)+o.coeff_sqr_y*powf(v0.y,2)+o.coeff_xy*v0.x*v0.y); lamda/=(-2); v2.x=v1.x+lamda*v0.x; v2.y=v1.y+lamda*v0.y; sn_temp=powf(v2.x,2)*o.coeff_sqr_x+powf(v2.y,2)*o.coeff_sqr_y+v2.x*v2.y*o.coeff_xy; sn_temp+=v2.x*o.rest.coeff_x+v2.y*o.rest.coeff_y+o.rest.const_coeff; //Step 6 of an algorithm if(sn>sn_1){ if(sn_1<sn_temp)lamda=1; } else if(sn<sn_temp)lamda=0; //cout<<"lamda="<<lamda<<endl; //Step 7 of algorithm //if(row==104)printf("lamda=%f\n",lamda); int k=kk; while(k<10){ if(sn_1<sn){break;} //puts("***************"); lamda/=2; v1.x=v1.x+lamda*v0.x; v1.y=v1.y+lamda*v0.y; sn=powf(v1.x,2)*o.coeff_sqr_x+powf(v1.y,2)*o.coeff_sqr_y+v1.x*v1.y*o.coeff_xy; sn+=v1.x*o.rest.coeff_x+v1.y*o.rest.coeff_y+o.rest.const_coeff; for(int ii=0;ii<count;ii++) jacobian[index+ii]=v1.x*funct[index1+ii].coeff_x+v1.y*funct[index1+ii].coeff_y+funct[index1+ii].const_coeff; v0.x=v0.y=0;//Reinitializing the current vertex to calculate the new position for(int ii=0;ii<count;ii++){ v0.x+=d1[index+ii]*jacobian[index+ii]; v0.y+=d1[index+ii+count]*jacobian[index+ii]; } v2.x=v1.x+v0.x; v2.y=v1.y+v0.y; sn_1=powf(v2.x,2)*o.coeff_sqr_x+powf(v2.y,2)*o.coeff_sqr_x+v2.x*v2.y*o.coeff_xy; sn_1+=v2.x*o.rest.coeff_x+v2.y*o.rest.coeff_y+o.rest.const_coeff; k++; } if(k==10)break; }//Algorithm loop ends here*/ //if(row==104)printf("%f %f\n",v1.x,v1.y); result[row].x=v1.x; result[row].y=v1.y; } else{ result[row].x=d_vb[row].x; result[row].y=d_vb[row].y; } } }
f2d6dd81809290e752b0b86168bb410d15e3c248.cu
#include"header.h" void __global__ new_angle_based(const struct vertex_buffer *d_vb, struct vertex_buffer *result,int *fan,int vertices,int max,float *jacobian,function_info*funct,float*d1) { __shared__ int fan_list[512][20]; //int tx=threadIdx.x; int ty=threadIdx.y; int count; //int column=blockIdx.x * blockDim.x+tx; int row=blockIdx.y *blockDim.y+ty; struct vertex_buffer v0; struct vertex_buffer v1; struct vertex_buffer v2; struct vertex_buffer vr; struct vertex_buffer vr1; struct vertex_buffer vr2; struct vertex_buffer vr3; struct vertex_buffer new_pos; float temp0,temp1,temp2; float alpha1; float alpha2; float beta; int index; int index1; objective_function o; float temp[2][2]; float precision=0.000001; float sn; float sn_1; float sn_temp; float lamda; float slope; o.coeff_sqr_x=o.coeff_sqr_y=o.coeff_xy=o.rest.coeff_x=o.rest.coeff_y=o.rest.const_coeff=0; if(row<vertices){ //if(row==1)printf("**greetings from row 60\n"); index1=row*max; if(fan[index1+1]){ count=fan[index1]; for(int i=2;i<count+2;i++){ fan_list[ty][i]=fan[index1+i]; //if(row==60) //printf("%d\n",fan_list[ty][i]); } //perform smoothing //count=fan_list[ty][0]; vr.x=d_vb[fan_list[ty][2]].x; vr.y=d_vb[fan_list[ty][2]].y; vr1.x=d_vb[row].x; vr1.y=d_vb[row].y; vr2.x=d_vb[fan_list[ty][count+1]].x; vr2.y=d_vb[fan_list[ty][count+1]].y; for(int j=2;j<count+2;j++){ //vr.x=d_vb[fan_list[ty][j]].x; //vr.y=d_vb[fan_list[ty][j]].y; v1.x=vr1.x-vr.x; v1.y=vr1.y-vr.y; //if(row==68) //printf("%f %f\n",vr.x,vr.y); if(j-1==count)index=2; else index=j+1; vr3.x=d_vb[fan_list[ty][index]].x; vr3.y=d_vb[fan_list[ty][index]].y; //v0.x=d_vb[fan_list[ty][index]].x-vr.x; //v0.y=d_vb[fan_list[ty][index]].y-vr.y; v0.x=vr3.x-vr.x; v0.y=vr3.y-vr.y; //if(j==2)index=count+1; //else index=j-1; //v2.x=d_vb[fan_list[ty][index]].x-vr.x; //v2.y=d_vb[fan_list[ty][index]].y-vr.y; v2.x=vr2.x-vr.x; v2.y=vr2.y-vr.y; temp0=sqrtf(powf(v0.x,2)+powf(v0.y,2)); temp1=sqrtf(powf(v1.x,2)+powf(v1.y,2)); temp2=sqrtf(powf(v2.x,2)+powf(v2.y,2)); alpha1=acos((v1.x*v2.x+v1.y*v2.y)/(temp1*temp2)); alpha2=acos((v1.x*v0.x+v1.y*v0.y)/(temp1*temp0)); beta=(alpha2+alpha1)/2; beta=alpha2-beta; new_pos.x=vr.x+(vr1.x-vr.x)*cos(beta)-(vr1.y-vr.y)*sin(beta); new_pos.y=vr.y+(vr1.x-vr.x)*sin(beta)+(vr1.y-vr.y)*cos(beta); temp0=new_pos.y-vr.y; temp1=vr.x-new_pos.x; temp2=vr.y*new_pos.x-vr.x*new_pos.y; slope=sqrtf(powf(temp0,2)+powf(temp1,2)); temp0=funct[index1+j-2].coeff_x=temp0/slope; temp1=funct[index1+j-2].coeff_y=temp1/slope; temp2=funct[index1+j-2].const_coeff=temp2/slope; // if(row==104)printf("%f %f %f\n",temp0,temp1,temp2); o.coeff_sqr_x+=powf(temp0,2); o.coeff_sqr_y+=powf(temp1,2); o.coeff_xy+=2*temp0*temp1; o.rest.coeff_x+=2*temp0*temp2; o.rest.coeff_y+=2*temp1*temp2; o.rest.const_coeff+=powf(temp2,2); vr2=vr; vr=vr3; }//Inner loop ends here index=2*index1; //if(row==104)printf("%f %f %f %f %f %f\n",o.coeff_sqr_x,o.coeff_sqr_y,o.coeff_xy,o.rest.coeff_x,o.rest.coeff_y,o.rest.const_coeff); temp[0][0]=temp[0][1]=temp[1][0]=temp[1][1]=0; for(int k=0;k<count;k++){ temp0=jacobian[index+2*k]=funct[index1+k].coeff_x; temp1=jacobian[index+2*k+1]=funct[index1+k].coeff_y; temp[1][1]+=powf(temp0,2); temp[1][0]+=(temp0*temp1); temp[0][0]+=powf(temp1,2); //if(row==104)printf("%f %f\n",jacobian[row*max*2+2*k],jacobian[row*max*2+2*k+1]); } // if(row==104) // for(int k=0;k<count;k++){ // printf("%f %f\n",jacobian[row*max*2+2*k],jacobian[row*max*2+2*k+1]); //} /* temp[0][0]=temp[0][1]=temp[1][0]=temp[1][1]=0; for(int k=0;k<count;k++){ temp[1][1]+=powf(jacobian[row*max*2+2*k],2); temp[1][0]+=jacobian[row*max*2+2*k]*jacobian[row*max*2+2*k+1]; temp[0][0]+=powf(jacobian[row*max*2+2*k+1],2); // if(row==104) //printf("%f %f \n",funct[row*max+k].coeff_x,funct[row*max+k].coeff_y); } */ temp[0][1]=temp[1][0]; // if(row==104)printf("%f %f %f %f\n",temp[0][0],temp[0][1],temp[1][0],temp[1][1]); slope=(temp[0][0]*temp[1][1])-(temp[0][1]*temp[1][0]); temp[0][0]/=slope; temp[1][0]/=-slope; temp[0][1]/=-slope; temp[1][1]/=slope; // if(row==104)printf("%f %f %f %f\n",temp[0][0],temp[0][1],temp[1][0],temp[1][1]); for(int ii=0;ii<count;ii++){ d1[index+ii]=temp[0][0]*jacobian[index+2*ii]; d1[index+ii]+=(temp[0][1]*jacobian[index+2*ii+1]); d1[index+ii+count]=temp[1][0]*jacobian[index+2*ii]; d1[index+ii+count]+=(temp[1][1]*jacobian[index+2*ii+1]); // if(row==104){printf("%f %f %f %f %f %f\n",temp[0][0],temp[0][1],temp[1][0],temp[1][1],jacobian[row*max*2+2*ii],jacobian[row*max*2+2*ii+1]); // printf("%f %f\n",d1[row*max*2+ii],d1[row*max*2+ii+count]);} } //Initial guess of internal vertex v1.x=vr1.x; v1.y=vr1.y; //Here begins the calculation of main algorithmic loop for(int kk=0;kk<10;kk++){ //cout<<v1.x<<" "<<v1.y<<endl; //calculation of function vector reusing the jacobain matrix this time for function vector for(int ii=0;ii<count;ii++){ jacobian[index+ii]=v1.x*funct[index1+ii].coeff_x+v1.y*funct[index1+ii].coeff_y+funct[index1+ii].const_coeff; // if(row==104)printf("****%f\n",jacobian[row*max*2+ii]); //printf("****%f %f \n",funct[row*max+ii].coeff_x,funct[row*max+ii].coeff_y); } //First step of Algorithm sn=powf(v1.x,2)*o.coeff_sqr_x+powf(v1.y,2)*o.coeff_sqr_y+v1.x*v1.y*o.coeff_xy; sn+=v1.x*o.rest.coeff_x+v1.y*o.rest.coeff_y+o.rest.const_coeff; v0.x=v0.y=0;//Reinitializing the current vertex to calculate the new position for(int ii=0;ii<count;ii++){ v0.x+=d1[index+ii]*jacobian[index+ii]; v0.y+=d1[index+ii+count]*jacobian[index+ii]; } //new coordinates i.e. xn+1 //if(row==104)printf("*****%f %f\n",v0.x,v0.y); v2.x=v1.x+v0.x; v2.y=v1.y+v0.y; sn_1=powf(v2.x,2)*o.coeff_sqr_x+powf(v2.y,2)*o.coeff_sqr_y+v2.x*v2.y*o.coeff_xy; sn_1+=v2.x*o.rest.coeff_x+v2.y*o.rest.coeff_y+o.rest.const_coeff; if(fabs((sn_1-sn)/sn) < precision){ //puts("reached"); break;} lamda=1;//step 3 of algorithm if(sn_1<sn){//puts("Here"); v1.x=v2.x;v1.y=v2.y;continue;}//step 4 of algorithm //calculation of lamda using derivative;step 5 of algorithm lamda=(2*o.coeff_sqr_x*v0.x+o.coeff_xy*v0.y)*v1.x; lamda+=(2*o.coeff_sqr_y*v0.y+o.coeff_xy*v0.x)*v1.y; lamda+=o.rest.coeff_x*v0.x+o.rest.coeff_y*v0.y; lamda/=(o.coeff_sqr_x*powf(v0.x,2)+o.coeff_sqr_y*powf(v0.y,2)+o.coeff_xy*v0.x*v0.y); lamda/=(-2); v2.x=v1.x+lamda*v0.x; v2.y=v1.y+lamda*v0.y; sn_temp=powf(v2.x,2)*o.coeff_sqr_x+powf(v2.y,2)*o.coeff_sqr_y+v2.x*v2.y*o.coeff_xy; sn_temp+=v2.x*o.rest.coeff_x+v2.y*o.rest.coeff_y+o.rest.const_coeff; //Step 6 of an algorithm if(sn>sn_1){ if(sn_1<sn_temp)lamda=1; } else if(sn<sn_temp)lamda=0; //cout<<"lamda="<<lamda<<endl; //Step 7 of algorithm //if(row==104)printf("lamda=%f\n",lamda); int k=kk; while(k<10){ if(sn_1<sn){break;} //puts("***************"); lamda/=2; v1.x=v1.x+lamda*v0.x; v1.y=v1.y+lamda*v0.y; sn=powf(v1.x,2)*o.coeff_sqr_x+powf(v1.y,2)*o.coeff_sqr_y+v1.x*v1.y*o.coeff_xy; sn+=v1.x*o.rest.coeff_x+v1.y*o.rest.coeff_y+o.rest.const_coeff; for(int ii=0;ii<count;ii++) jacobian[index+ii]=v1.x*funct[index1+ii].coeff_x+v1.y*funct[index1+ii].coeff_y+funct[index1+ii].const_coeff; v0.x=v0.y=0;//Reinitializing the current vertex to calculate the new position for(int ii=0;ii<count;ii++){ v0.x+=d1[index+ii]*jacobian[index+ii]; v0.y+=d1[index+ii+count]*jacobian[index+ii]; } v2.x=v1.x+v0.x; v2.y=v1.y+v0.y; sn_1=powf(v2.x,2)*o.coeff_sqr_x+powf(v2.y,2)*o.coeff_sqr_x+v2.x*v2.y*o.coeff_xy; sn_1+=v2.x*o.rest.coeff_x+v2.y*o.rest.coeff_y+o.rest.const_coeff; k++; } if(k==10)break; }//Algorithm loop ends here*/ //if(row==104)printf("%f %f\n",v1.x,v1.y); result[row].x=v1.x; result[row].y=v1.y; } else{ result[row].x=d_vb[row].x; result[row].y=d_vb[row].y; } } }
5bcdccff154e3fbb779227ccfed6f08ca5d0e043.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ void mul(double a, double b, double *res) { *res = a * b; // NaN *res = (*res)-(*res) / (*res)-(*res); } __global__ void compute(double *x, double *y, int size) { double d; for (int i=0; i < size; ++i) { double tmp; mul(x[i], y[i], &tmp); d += tmp; } int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { printf("dot: %f\n", d); } }
5bcdccff154e3fbb779227ccfed6f08ca5d0e043.cu
#include <stdio.h> __device__ void mul(double a, double b, double *res) { *res = a * b; // NaN *res = (*res)-(*res) / (*res)-(*res); } __global__ void compute(double *x, double *y, int size) { double d; for (int i=0; i < size; ++i) { double tmp; mul(x[i], y[i], &tmp); d += tmp; } int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { printf("dot: %f\n", d); } }
d2026895142e877c94175181ff243cce30937eba.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "ShapeConvexPolyhedron.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolyhedron template hipError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron >(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolyhedron ::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeConvexPolyhedron >(const hpmc_args_t& args, const typename ShapeConvexPolyhedron ::param_type *d_params); template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron ::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
d2026895142e877c94175181ff243cce30937eba.cu
// Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "ShapeConvexPolyhedron.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeConvexPolyhedron template cudaError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron >(const hpmc_free_volume_args_t &args, const typename ShapeConvexPolyhedron ::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeConvexPolyhedron >(const hpmc_args_t& args, const typename ShapeConvexPolyhedron ::param_type *d_params); template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeConvexPolyhedron ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
fd688115b391121be5cc1d9b87c2544c19083c32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front; int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front; int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front; int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front; int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim0_update_halo_kernel2_xvel_plus_2_front * \ ydim0_update_halo_kernel2_xvel_plus_2_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim1_update_halo_kernel2_xvel_plus_2_front * \ ydim1_update_halo_kernel2_xvel_plus_2_front * (z)) // user function __device__ inline void update_halo_kernel2_xvel_plus_2_front(double *xvel0, double *xvel1, const int *fields) { if (fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)]; if (fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_xvel_plus_2_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front * ydim0_update_halo_kernel2_xvel_plus_2_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front * ydim1_update_halo_kernel2_xvel_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_xvel_plus_2_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_xvel_plus_2_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 79)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(79, "update_halo_kernel2_xvel_plus_2_front"); OPS_kernels[79].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h || ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h || xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h || ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[79].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_plus_2_front), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[79].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[79].mpi_time += t2 - t1; OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
fd688115b391121be5cc1d9b87c2544c19083c32.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_xvel_plus_2_front; int xdim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel2_xvel_plus_2_front; int ydim0_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel2_xvel_plus_2_front; int xdim1_update_halo_kernel2_xvel_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel2_xvel_plus_2_front; int ydim1_update_halo_kernel2_xvel_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim0_update_halo_kernel2_xvel_plus_2_front * \ ydim0_update_halo_kernel2_xvel_plus_2_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_xvel_plus_2_front * (y) + \ xdim1_update_halo_kernel2_xvel_plus_2_front * \ ydim1_update_halo_kernel2_xvel_plus_2_front * (z)) // user function __device__ inline void update_halo_kernel2_xvel_plus_2_front(double *xvel0, double *xvel1, const int *fields) { if (fields[FIELD_XVEL0] == 1) xvel0[OPS_ACC0(0, 0, 0)] = xvel0[OPS_ACC0(0, 0, -2)]; if (fields[FIELD_XVEL1] == 1) xvel1[OPS_ACC1(0, 0, 0)] = xvel1[OPS_ACC1(0, 0, -2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_xvel_plus_2_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_xvel_plus_2_front * ydim0_update_halo_kernel2_xvel_plus_2_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_xvel_plus_2_front * ydim1_update_halo_kernel2_xvel_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_xvel_plus_2_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_xvel_plus_2_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 79)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(79, "update_halo_kernel2_xvel_plus_2_front"); OPS_kernels[79].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_xvel_plus_2_front_h || ydim0 != ydim0_update_halo_kernel2_xvel_plus_2_front_h || xdim1 != xdim1_update_halo_kernel2_xvel_plus_2_front_h || ydim1 != ydim1_update_halo_kernel2_xvel_plus_2_front_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_xvel_plus_2_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_xvel_plus_2_front_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_xvel_plus_2_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_xvel_plus_2_front_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_xvel_plus_2_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_xvel_plus_2_front_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_xvel_plus_2_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_xvel_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[79].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_xvel_plus_2_front<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[79].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[79].mpi_time += t2 - t1; OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[79].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
82600d59ddbe37781b3d95bb78fc076b541d507a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // @author [email protected] // #include <op_boilerplate.h> #include <loops/reduce.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> template <typename T, typename OpClass> __device__ void reduceSimpleGeneric( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce::ReduceFunction<T>::template transformCudaXD<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceSimpleGeneric1D( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceFunction<T>::template transformCuda1D<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, nullptr, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceSimpleGeneric3D( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceFunction<T>::template transformCuda3D<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, nullptr, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceScalarGeneric( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), 0); } __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, reductionBuffer, manager, tadOnlyShapeInfo); }; // reduceScalar DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float, INPUT(float *x, Nd4jLong *xShapeInfo, float *extraParams, float *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, double, INPUT(double *x, Nd4jLong *xShapeInfo, double *extraParams, double *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float16, INPUT(float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) // reduce1D DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float, INPUT(float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, double, INPUT(double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float16, INPUT(float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) // reduce3D DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float, INPUT(float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, double, INPUT(double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float16, INPUT(float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) // reduceXD DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float, INPUT(float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, double, INPUT(double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float16, INPUT(float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) namespace functions { namespace reduce { template <> _CUDA_H void ReduceFunction<float>::execReduceScalar(dim3 launchDims, hipStream_t *stream, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, float *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, nullptr, 1, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } template <> _CUDA_H void ReduceFunction<float16>::execReduceScalar(dim3 launchDims, hipStream_t *stream, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, nullptr, 1, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarHalf(...) failed"); } template <> _CUDA_H void ReduceFunction<double>::execReduceScalar(dim3 launchDims, hipStream_t *stream, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, double *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, nullptr, 1, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarDouble(...) failed"); } template <> _CUDA_H void ReduceFunction<float>::execReduceXD(dim3 launchDims, hipStream_t *stream, int opNum, int rank, float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (rank == 1) { DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else if (rank <= 3) { DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else { DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void ReduceFunction<float16>::execReduceXD(dim3 launchDims, hipStream_t *stream, int opNum, int rank, float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (rank == 1) { DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else if (rank <= 3) { DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else { DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void ReduceFunction<double>::execReduceXD(dim3 launchDims, hipStream_t *stream, int opNum, int rank, double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (rank == 1) { DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else if (rank <= 3) { DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else { DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } DEBUG_KERNEL(stream, opNum); } template <typename T> __device__ void initializeShared(T *extraParams, T **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(T); T *sPartialsDeref = (T *) *sPartials; for (int i = 0; i < sPartialsLength; i++) { sPartialsDeref[i] = extraParams[0]; } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::transformCuda1D(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials;// = (T *)manager->getSharedReductionBuffer(); __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; T *rX = dx + tadOffsetForBlock; sPartials[threadIdx.x] = OpType::startingValue(rX); if (tadEWS >= 1) { for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(rX[i * tadEWS], extraParams), extraParams); } } else { __shared__ int tadRank; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; Nd4jLong xCoord[MAX_RANK]; if (threadIdx.x == 0) { tadRank = shape::rank(tadOnlyShapeInfo); tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); auto xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::execScalarCuda( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int elementWiseStride = shape::elementWiseStride(xShapeInfo); auto n = shape::length(xShapeInfo); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results T *sPartials = (T *)manager->getSharedReductionBuffer(); sPartials[threadIdx.x] = OpType::startingValue(dx); if (elementWiseStride >= 1) { for (int i = tid; i < n; i += (blockDim.x * gridDim.x)) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[i * elementWiseStride], extraParams), extraParams); } } else { __shared__ int rank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xShape = shape::shapeOf(xShapeInfo); xStride = shape::stride(xShapeInfo); } __syncthreads(); Nd4jLong ind2sub[MAX_RANK]; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { shape::ind2subC(rank, xShape, i, ind2sub); auto offset = shape::getOffset(0, xShape, xStride, ind2sub, rank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[offset], extraParams), extraParams); } } __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), extraParams); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) { reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],n,extraParams); } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(dx); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { result[0] = OpType::postProcess(sPartials[0], n, extraParams); } } } else { if (threadIdx.x == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; result[0] = OpType::postProcess(sPartials[0], n, extraParams); } } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::transformCuda3D( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials; // = (T *)manager->getSharedReductionBuffer(); __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); Nd4jLong xCoord[3]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); Nd4jLong xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::transformCudaXD( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials; // __shared__ shape::TAD *tad; __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); Nd4jLong xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::aggregatePartials(T *sPartials, Nd4jLong tid, Nd4jLong numItems, T *extraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) { sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); } __syncthreads(); } } BUILD_CALL_1(template __device__ void ReduceFunction<float>::execScalarCuda, float, (float*, Nd4jLong*, float*, float*, Nd4jLong*, float*, UnifiedSharedMemory *, Nd4jLong*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<float16>::execScalarCuda, float16, (float16*, Nd4jLong*, float16*, float16*, Nd4jLong*, float16*, UnifiedSharedMemory *, Nd4jLong*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<double>::execScalarCuda, double, (double*, Nd4jLong*, double*, double*, Nd4jLong*, double*, UnifiedSharedMemory *, Nd4jLong*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<float>::aggregatePartials, float, (float*, Nd4jLong, Nd4jLong, float*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<float16>::aggregatePartials, float16, (float16*, Nd4jLong, Nd4jLong, float16*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<double>::aggregatePartials, double, (double*, Nd4jLong, Nd4jLong, double*), REDUCE_OPS) } }
82600d59ddbe37781b3d95bb78fc076b541d507a.cu
// // @author [email protected] // #include <op_boilerplate.h> #include <loops/reduce.h> #include <loops/legacy_ops.h> #include <helpers/DebugHelper.h> template <typename T, typename OpClass> __device__ void reduceSimpleGeneric( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), shape::rank(xShapeInfo)); } __syncthreads(); functions::reduce::ReduceFunction<T>::template transformCudaXD<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceSimpleGeneric1D( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceFunction<T>::template transformCuda1D<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, nullptr, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceSimpleGeneric3D( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::reduce::ReduceFunction<T>::template transformCuda3D<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, nullptr, tadOnlyShapeInfo, tadOffsets); } template <typename T, typename OpClass> __device__ void reduceScalarGeneric( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::reduce::ReduceFunction<T>), sizeof(shape::TAD), 0); } __syncthreads(); functions::reduce::ReduceFunction<T>::template execScalarCuda<OpClass>( dx, xShapeInfo, extraParams, result, resultShapeInfo, reductionBuffer, manager, tadOnlyShapeInfo); }; // reduceScalar DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float, INPUT(float *x, Nd4jLong *xShapeInfo, float *extraParams, float *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, double, INPUT(double *x, Nd4jLong *xShapeInfo, double *extraParams, double *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceScalarSimple_, reduceScalarGeneric, float16, INPUT(float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo), PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) // reduce1D DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float, INPUT(float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, double, INPUT(double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric1D_, reduceSimpleGeneric1D, float16, INPUT(float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) // reduce3D DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float, INPUT(float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, double, INPUT(double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGeneric3D_, reduceSimpleGeneric3D, float16, INPUT(float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) // reduceXD DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float, INPUT(float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, double, INPUT(double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) DISPATCH_KERNEL_SIMPLE(reduceSimpleGenericXD_, reduceSimpleGeneric, float16, INPUT(float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets), PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) namespace functions { namespace reduce { template <> _CUDA_H void ReduceFunction<float>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, float *x, Nd4jLong *xShapeInfo, float *extraParams, float *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, nullptr, 1, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed"); } template <> _CUDA_H void ReduceFunction<float16>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *extraParams, float16 *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, float16 *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, nullptr, 1, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarHalf(...) failed"); } template <> _CUDA_H void ReduceFunction<double>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, int opNum, double *x, Nd4jLong *xShapeInfo, double *extraParams, double *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, double *reductionBuffer, Nd4jLong *tadOnlyShapeInfo) { DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, nullptr, 1, reductionBuffer, tadOnlyShapeInfo), OPS_A(REDUCE_OPS)) nd4j::DebugHelper::checkErrorCode(stream, "execReduceScalarDouble(...) failed"); } template <> _CUDA_H void ReduceFunction<float>::execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, float *x, Nd4jLong *xShape, float *extraParams, float *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (rank == 1) { DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else if (rank <= 3) { DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else { DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void ReduceFunction<float16>::execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, float16 *x, Nd4jLong *xShape, float16 *extraParams, float16 *z, Nd4jLong *zShape, int *dimension, int dimensionLength, float16 *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (rank == 1) { DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else if (rank <= 3) { DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else { DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } DEBUG_KERNEL(stream, opNum); } template <> _CUDA_H void ReduceFunction<double>::execReduceXD(dim3 launchDims, cudaStream_t *stream, int opNum, int rank, double *x, Nd4jLong *xShape, double *extraParams, double *z, Nd4jLong *zShape, int *dimension, int dimensionLength, double *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) { if (rank == 1) { DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else if (rank <= 3) { DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } else { DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShape, extraParams, z, zShape, dimension, dimensionLength, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(REDUCE_OPS)) } DEBUG_KERNEL(stream, opNum); } template <typename T> __device__ void initializeShared(T *extraParams, T **sPartials, int sMemSize) { int sPartialsLength = sMemSize / sizeof(T); T *sPartialsDeref = (T *) *sPartials; for (int i = 0; i < sPartialsLength; i++) { sPartialsDeref[i] = extraParams[0]; } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::transformCuda1D(T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials;// = (T *)manager->getSharedReductionBuffer(); __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; T *rX = dx + tadOffsetForBlock; sPartials[threadIdx.x] = OpType::startingValue(rX); if (tadEWS >= 1) { for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(rX[i * tadEWS], extraParams), extraParams); } } else { __shared__ int tadRank; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; Nd4jLong xCoord[MAX_RANK]; if (threadIdx.x == 0) { tadRank = shape::rank(tadOnlyShapeInfo); tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); auto xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::execScalarCuda( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int elementWiseStride = shape::elementWiseStride(xShapeInfo); auto n = shape::length(xShapeInfo); auto tid = blockDim.x * blockIdx.x + threadIdx.x; //shared memory space for storing intermediate results T *sPartials = (T *)manager->getSharedReductionBuffer(); sPartials[threadIdx.x] = OpType::startingValue(dx); if (elementWiseStride >= 1) { for (int i = tid; i < n; i += (blockDim.x * gridDim.x)) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[i * elementWiseStride], extraParams), extraParams); } } else { __shared__ int rank; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *xStride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xShape = shape::shapeOf(xShapeInfo); xStride = shape::stride(xShapeInfo); } __syncthreads(); Nd4jLong ind2sub[MAX_RANK]; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { shape::ind2subC(rank, xShape, i, ind2sub); auto offset = shape::getOffset(0, xShape, xStride, ind2sub, rank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[offset], extraParams), extraParams); } } __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, n), extraParams); __syncthreads(); if (gridDim.x > 1) { unsigned int *tc = (unsigned int *)reductionBuffer; __shared__ bool amLast; tid = threadIdx.x; if (threadIdx.x == 0) { reductionBuffer[blockIdx.x] = sPartials[0];//this->postProcess(sPartials[0],n,extraParams); } __threadfence(); __syncthreads(); if (threadIdx.x == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; sPartials[threadIdx.x] = OpType::startingValue(dx); for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(gridDim.x, blockDim.x), extraParams); __syncthreads(); if (threadIdx.x == 0) { result[0] = OpType::postProcess(sPartials[0], n, extraParams); } } } else { if (threadIdx.x == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; result[0] = OpType::postProcess(sPartials[0], n, extraParams); } } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::transformCuda3D( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials; // = (T *)manager->getSharedReductionBuffer(); __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); Nd4jLong xCoord[3]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); Nd4jLong xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::transformCudaXD( T *dx, Nd4jLong *xShapeInfo, T *extraParams, T *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, T *reductionBuffer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { if (OpType::requiresSpecialAccumulation) { OpType::execSpecialCuda(dx, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionBuffer, manager, tadOnlyShapeInfo, tadOffsets); return; } //shared memory space for storing intermediate results __shared__ T *sPartials; // __shared__ shape::TAD *tad; __shared__ int tadLength; __shared__ int tadRank; __shared__ int numTads; __shared__ Nd4jLong *tadShape; __shared__ Nd4jLong *tadStride; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = (T *) shmem; tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); tadRank = shape::rank(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); } __syncthreads(); Nd4jLong xCoord[MAX_RANK]; for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Nd4jLong tadOffsetForBlock = tadOffsets[r]; sPartials[threadIdx.x] = OpType::startingValue(dx + tadOffsetForBlock); for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { shape::ind2subC(tadRank, tadShape, i, xCoord); Nd4jLong xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], OpType::op(dx[xOffset], extraParams), extraParams); } __syncthreads(); // aggregate. do NOT reduce for elements > tadLength aggregatePartials<OpType>(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) result[r] = OpType::postProcess(sPartials[threadIdx.x], tadLength, extraParams); } } template <typename T> template <typename OpType> __device__ void ReduceFunction<T>::aggregatePartials(T *sPartials, Nd4jLong tid, Nd4jLong numItems, T *extraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. Nd4jLong floorPow2 = numItems; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numItems) { sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams); } __syncthreads(); } } BUILD_CALL_1(template __device__ void ReduceFunction<float>::execScalarCuda, float, (float*, Nd4jLong*, float*, float*, Nd4jLong*, float*, UnifiedSharedMemory *, Nd4jLong*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<float16>::execScalarCuda, float16, (float16*, Nd4jLong*, float16*, float16*, Nd4jLong*, float16*, UnifiedSharedMemory *, Nd4jLong*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<double>::execScalarCuda, double, (double*, Nd4jLong*, double*, double*, Nd4jLong*, double*, UnifiedSharedMemory *, Nd4jLong*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<float>::aggregatePartials, float, (float*, Nd4jLong, Nd4jLong, float*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<float16>::aggregatePartials, float16, (float16*, Nd4jLong, Nd4jLong, float16*), REDUCE_OPS) BUILD_CALL_1(template __device__ void ReduceFunction<double>::aggregatePartials, double, (double*, Nd4jLong, Nd4jLong, double*), REDUCE_OPS) } }
14123f4138193c085464b971995381d508be53be.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } // call recursively return recursiveReduce(data, stride); } __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 1024; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(hipMalloc((void **) &d_idata, bytes)); CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(hipFree(d_idata)); CHECK(hipFree(d_odata)); // reset device CHECK(hipDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
14123f4138193c085464b971995381d508be53be.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> // Recursive Implementation of Interleaved Pair Approach int recursiveReduce(int *data, int const size) { // terminate check if (size == 1) return data[0]; // renew the stride int const stride = size / 2; // in-place reduction for (int i = 0; i < stride; i++) { data[i] += data[i + stride]; } // call recursively return recursiveReduce(data, stride); } __global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 1024; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(cudaMalloc((void **) &d_idata, bytes)); CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = recursiveReduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); // kernel 1: reduceNeighbored CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(cudaFree(d_idata)); CHECK(cudaFree(d_odata)); // reset device CHECK(cudaDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
6f963c03343f72571e2d41c96201fbeec676de54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************** * main.cu * This is the start point of the project! *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cutil_inline.h> #include <iostream> #include <vector> #include <indexing/lcss.h> #include <kernel/kernel_func.cu> #include <kernel/kernel_idxlcss.cu> /************************************************************************/ /* Init CUDA */ /************************************************************************/ #if __DEVICE_EMULATION__ bool InitCUDA(void){return true;} #else bool InitCUDA(void) { int count = 0; int i = 0; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } hipSetDevice(i); printf("CUDA initialized.\n"); return true; } #endif int main(int argc, char* argv[]) { float h_A[32*128],h_B[128]; float *d_A,*d_B; unsigned int size = 32*128*sizeof(float); if(!InitCUDA()) { return 0; } char *device_result = 0; char host_result[12] ={0}; cutilSafeCall( hipMalloc((void**) &device_result, sizeof(char) * 11)); unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); ////////////////////////////////////////////////////////////////////////// hipMalloc((void**)&d_A,size); hipMalloc((void**)&d_B,32*sizeof(float)); for(int i=0;i<32*128;i++) { h_A[i] = 1; } hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( SumMatrix), dim3(32),dim3(128), 0, 0, d_A,d_B); hipMemcpy(h_B,d_B,32*sizeof(float),hipMemcpyDeviceToHost); for(int i=0;i<32;i++) { std::cout<<h_B[i]<<" "; } std::cout<<std::endl; std::vector<int> v1,v2; // int n,m,t; // while(std::cin>>n>>m&&n&&m) // { // v1.clear(); // v2.clear(); // for(int i=0;i<n;i++) // { // std::cin>>t; // v1.push_back(t); // } // for(int i=0;i<m;i++) // { // std::cin>>t; // v2.push_back(t); // } // std::cout<<LCSS(v1,v2)<<std::endl; // } int text[8] = {1,2,3,4,1,3,1,4},num; int res[8]; int * d_T,*d_res; size = 8*sizeof(int); hipMalloc((void**)&d_res,size); hipMalloc((void**)&d_T,size); hipMemcpy(d_T,text,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( Inverted_List_Create), dim3(1),dim3(8), 0, 0, d_T,1,d_res,num); hipMemcpy(res,d_res,size,hipMemcpyDeviceToHost); for(int i=0;i<num;i++) { std::cout<<res[i]<<" "; } std::cout<<std::endl; ///////////////////////////////////////////////////////////////////////////////// cutilCheckMsg("Kernel execution failed\n"); hipDeviceSynchronize(); cutilCheckError( cutStopTimer( timer)); printf("Processing time: %f (ms)\n", cutGetTimerValue( timer)); cutilCheckError( cutDeleteTimer( timer)); cutilSafeCall( hipMemcpy(host_result, device_result, sizeof(char) * 11, hipMemcpyDeviceToHost)); printf("%s\n", host_result); cutilSafeCall( hipFree(device_result)); return 0; }
6f963c03343f72571e2d41c96201fbeec676de54.cu
/******************************************************************** * main.cu * This is the start point of the project! *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cutil_inline.h> #include <iostream> #include <vector> #include <indexing/lcss.h> #include <kernel/kernel_func.cu> #include <kernel/kernel_idxlcss.cu> /************************************************************************/ /* Init CUDA */ /************************************************************************/ #if __DEVICE_EMULATION__ bool InitCUDA(void){return true;} #else bool InitCUDA(void) { int count = 0; int i = 0; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } cudaSetDevice(i); printf("CUDA initialized.\n"); return true; } #endif int main(int argc, char* argv[]) { float h_A[32*128],h_B[128]; float *d_A,*d_B; unsigned int size = 32*128*sizeof(float); if(!InitCUDA()) { return 0; } char *device_result = 0; char host_result[12] ={0}; cutilSafeCall( cudaMalloc((void**) &device_result, sizeof(char) * 11)); unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); //////////////////////////////////º¯Êýµ÷ÓÃ//////////////////////////////////////// cudaMalloc((void**)&d_A,size); cudaMalloc((void**)&d_B,32*sizeof(float)); for(int i=0;i<32*128;i++) { h_A[i] = 1; } cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); SumMatrix<<<32,128>>>(d_A,d_B); cudaMemcpy(h_B,d_B,32*sizeof(float),cudaMemcpyDeviceToHost); for(int i=0;i<32;i++) { std::cout<<h_B[i]<<" "; } std::cout<<std::endl; std::vector<int> v1,v2; // int n,m,t; // while(std::cin>>n>>m&&n&&m) // { // v1.clear(); // v2.clear(); // for(int i=0;i<n;i++) // { // std::cin>>t; // v1.push_back(t); // } // for(int i=0;i<m;i++) // { // std::cin>>t; // v2.push_back(t); // } // std::cout<<LCSS(v1,v2)<<std::endl; // } int text[8] = {1,2,3,4,1,3,1,4},num; int res[8]; int * d_T,*d_res; size = 8*sizeof(int); cudaMalloc((void**)&d_res,size); cudaMalloc((void**)&d_T,size); cudaMemcpy(d_T,text,size,cudaMemcpyHostToDevice); Inverted_List_Create<<<1,8>>>(d_T,1,d_res,num); cudaMemcpy(res,d_res,size,cudaMemcpyDeviceToHost); for(int i=0;i<num;i++) { std::cout<<res[i]<<" "; } std::cout<<std::endl; ///////////////////////////////////////////////////////////////////////////////// cutilCheckMsg("Kernel execution failed\n"); cudaThreadSynchronize(); cutilCheckError( cutStopTimer( timer)); printf("Processing time: %f (ms)\n", cutGetTimerValue( timer)); cutilCheckError( cutDeleteTimer( timer)); cutilSafeCall( cudaMemcpy(host_result, device_result, sizeof(char) * 11, cudaMemcpyDeviceToHost)); printf("%s\n", host_result); cutilSafeCall( cudaFree(device_result)); return 0; }
d1bf5d2271f9189b89539f33c6948fa1c49535fa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <iomanip> #include <hip/hip_runtime.h> using namespace std; void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) { for (int i = 0; i < a_rows; i++) { for (int j = 0; j < b_cols; j++) { float t = 0; for (int k = 0; k < b_rows; k++) { t += a[i*a_cols+k]*b[k*b_cols+j]; } c[i*b_cols+j] = t; } } } void MatrixRandBin(float *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if ((float)rand()/RAND_MAX > 0.5) { mat[i*cols+j] = 1.0f; }else { mat[i*cols+j] = -1.0f; } } } } float MatrixCompare(float *a,float *b,int rows,int cols){ float err=0; for (int i=0;i<rows;i++){ for (int j=0;j<cols;j++){ err+=abs(a[i*cols+j]-b[i*cols+j]); } } return err; } __global__ void MatrixMul_device(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) { int tix = threadIdx.x; int tiy = threadIdx.y; int bix = blockIdx.x; int biy = blockIdx.y; int bdx = blockDim.x; int bdy = blockDim.y; int gdx = gridDim.x; int gdy = gridDim.y; for (int i = tix; i < b_cols; i += bdx) { float sum = 0; for (int k = 0; k < a_cols; k++) { sum += a[bix*a_rows+k]*b[k*b_cols+i]; } c[bix*a_cols+i] = sum; } } int main() { int Matrixsize=1000; float *a_host; float *a_device; float *b_host; float *b_device; float *result_host; float *result_device; float *result_cpu; a_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); b_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); result_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); result_cpu = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); srand(0); MatrixRandBin(a_host,Matrixsize,Matrixsize); MatrixRandBin(b_host,Matrixsize,Matrixsize); hipMalloc((void**)&a_device,sizeof(float) *Matrixsize * Matrixsize); hipMalloc((void**)&b_device,sizeof(float) *Matrixsize * Matrixsize); hipMalloc((void**)&result_device,sizeof(float) *Matrixsize * Matrixsize); hipMemcpy(a_device,a_host,sizeof(float) *Matrixsize * Matrixsize,hipMemcpyHostToDevice); hipMemcpy(b_device,b_host,sizeof(float) *Matrixsize * Matrixsize,hipMemcpyHostToDevice); hipEvent_t start_device, stop_device; float time_device; hipEventCreate(&start_device); hipEventCreate(&stop_device); hipEventRecord( start_device, 0 ); dim3 gridsize(1000,1,1); dim3 blocksize(256,1,1); hipLaunchKernelGGL(( MatrixMul_device), dim3(gridsize),dim3(blocksize), 0, 0, a_device,Matrixsize,Matrixsize,b_device,Matrixsize,Matrixsize,result_device); hipEventRecord( stop_device, 0 ); hipEventSynchronize( stop_device ); hipEventElapsedTime( &time_device, start_device, stop_device ); hipEventDestroy( start_device ); hipEventDestroy( stop_device ); cout<<"gputime="<<time_device<<"ms"<<endl; hipMemcpy(result_host, result_device,sizeof(float) *Matrixsize * Matrixsize,hipMemcpyDeviceToHost); hipFree(a_device); hipFree(b_device); hipFree(result_device); clock_t start_host = clock(); MatrixMul_host(a_host,Matrixsize,Matrixsize,b_host,Matrixsize,Matrixsize,result_cpu); cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl; float err=MatrixCompare(result_cpu,result_host,Matrixsize,Matrixsize); cout<<"err in gpu and cpu = "<<err<<endl; }
d1bf5d2271f9189b89539f33c6948fa1c49535fa.cu
#include <stdio.h> #include <iostream> #include <iomanip> #include <cuda_runtime.h> using namespace std; void MatrixMul_host(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) { for (int i = 0; i < a_rows; i++) { for (int j = 0; j < b_cols; j++) { float t = 0; for (int k = 0; k < b_rows; k++) { t += a[i*a_cols+k]*b[k*b_cols+j]; } c[i*b_cols+j] = t; } } } void MatrixRandBin(float *mat, int rows, int cols) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if ((float)rand()/RAND_MAX > 0.5) { mat[i*cols+j] = 1.0f; }else { mat[i*cols+j] = -1.0f; } } } } float MatrixCompare(float *a,float *b,int rows,int cols){ float err=0; for (int i=0;i<rows;i++){ for (int j=0;j<cols;j++){ err+=abs(a[i*cols+j]-b[i*cols+j]); } } return err; } __global__ void MatrixMul_device(float *a, int a_rows, int a_cols, float *b, int b_rows, int b_cols, float *c) { int tix = threadIdx.x; int tiy = threadIdx.y; int bix = blockIdx.x; int biy = blockIdx.y; int bdx = blockDim.x; int bdy = blockDim.y; int gdx = gridDim.x; int gdy = gridDim.y; for (int i = tix; i < b_cols; i += bdx) { float sum = 0; for (int k = 0; k < a_cols; k++) { sum += a[bix*a_rows+k]*b[k*b_cols+i]; } c[bix*a_cols+i] = sum; } } int main() { int Matrixsize=1000; float *a_host; float *a_device; float *b_host; float *b_device; float *result_host; float *result_device; float *result_cpu; a_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); b_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); result_host = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); result_cpu = (float*) malloc(sizeof(float) * Matrixsize * Matrixsize); srand(0); MatrixRandBin(a_host,Matrixsize,Matrixsize); MatrixRandBin(b_host,Matrixsize,Matrixsize); cudaMalloc((void**)&a_device,sizeof(float) *Matrixsize * Matrixsize); cudaMalloc((void**)&b_device,sizeof(float) *Matrixsize * Matrixsize); cudaMalloc((void**)&result_device,sizeof(float) *Matrixsize * Matrixsize); cudaMemcpy(a_device,a_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice); cudaMemcpy(b_device,b_host,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyHostToDevice); cudaEvent_t start_device, stop_device; float time_device; cudaEventCreate(&start_device); cudaEventCreate(&stop_device); cudaEventRecord( start_device, 0 ); dim3 gridsize(1000,1,1); dim3 blocksize(256,1,1); MatrixMul_device<<<gridsize,blocksize>>>(a_device,Matrixsize,Matrixsize,b_device,Matrixsize,Matrixsize,result_device); cudaEventRecord( stop_device, 0 ); cudaEventSynchronize( stop_device ); cudaEventElapsedTime( &time_device, start_device, stop_device ); cudaEventDestroy( start_device ); cudaEventDestroy( stop_device ); cout<<"gputime="<<time_device<<"ms"<<endl; cudaMemcpy(result_host, result_device,sizeof(float) *Matrixsize * Matrixsize,cudaMemcpyDeviceToHost); cudaFree(a_device); cudaFree(b_device); cudaFree(result_device); clock_t start_host = clock(); MatrixMul_host(a_host,Matrixsize,Matrixsize,b_host,Matrixsize,Matrixsize,result_cpu); cout<<"cputime="<<(double)(clock() - start_host)/1000<<"ms"<<endl; float err=MatrixCompare(result_cpu,result_host,Matrixsize,Matrixsize); cout<<"err in gpu and cpu = "<<err<<endl; }
02e88ccb9ea9686360a8d49ccb9c19221c805f5e.hip
// !!! This is a file automatically generated by hipify!!! // CUDA runtime + CUBLAS #define HALF_ENABLE_CPP11_CMATH #include "hip/hip_runtime.h" #include "rocblas.h" #include<device_launch_parameters.h> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include "SimTester.h" #include <iostream> #ifdef __INTELLISENSE__ #define __HIPCC__ #include <hip/device_functions.h> #undef __constant__ #undef __global__ #undef __shared__ #undef __device__ #define __global__ #define __shared__ #define __device__ #define __constant__ #endif // __INTELLISENSE__ //#include <hip/hip_fp16.h> //#include "float.hpp" //cuda #define BUCKET_NUM 100 #define STACK_SPACE 96 #define SAVED_TREES 128 #define HEAP_SIZE 64 #define LEAF_OFFSET 32 #define RIGHT_MASK 0 #define LEFT_MASK 1 #define BOTH_MASK 2 #define NEITHER_MASK 3 #define HALF_SAMPLE 8/*distance based->curvature based*/ #define CROP_LAST6 0xffc0 #define GET_LAST6 0x3f #undef max #undef min //#define r2 dslf*dslf extern size_t pitch; __constant__ float d_hash[49]; __constant__ float epsilon[1]; void constant_cpy(float *linearhash, float _epsilon) { hipMemcpyToSymbol(d_hash, linearhash, 48 * sizeof(float)); hipMemcpyToSymbol(epsilon, &_epsilon, sizeof(float)); } //template <bool add = false> //change heap to unsigned int; __global__ void cuDeterminingAvaillines(int *avail, float *val, int n) { int i = threadIdx.x + blockIdx.x *blockDim.x; if (i < n) { val += i*HEAP_SIZE; if (*val > 0) { int j; for (j = 0; j < 64; j++) if (val[j] <= 0) break; avail[i] = j; } else avail[i] = 0; } } __global__ void CudaSimilarWithLines( int n, //n of pts int *lineinfo, float *h_all, //line information int *buckets, float *segments, //LSH int *heap, float *val, float *variation, //outputs int *searched, //massive tmp int *lkd, int*id, //Kdtree bool *pt_availablility ) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; if (pidx >= n) return; if (!pt_availablility[pidx]) return; int *lineinfo_ptr = lineinfo + (pidx << 1); if ((pidx - *lineinfo_ptr) < HALF_SAMPLE || *(lineinfo_ptr + 1) - pidx + *lineinfo_ptr - 1 < HALF_SAMPLE) return; float d_tmp; float *si, *siSampled; float *sjSampled; short tmp[8]; val += pidx*HEAP_SIZE - 1; heap += pidx*HEAP_SIZE - 1; variation += pidx * HEAP_SIZE - 1; searched += pidx*SAVED_TREES; si = h_all + 3 * *lineinfo_ptr; siSampled = si + 3 * ((pidx - *lineinfo_ptr) - HALF_SAMPLE); int l[32]; unsigned int stack[16]; int *leaf = l - 1; float pt[3];//= { siSampled[3 * HALF_SAMPLE] ,siSampled[3 * HALF_SAMPLE + 1] ,siSampled[3 * HALF_SAMPLE + 2] }; pt[0] = siSampled[15]; pt[1] = siSampled[16];//siSampled[3 * HALF_SAMPLE];// siSampled[3 * HALF_SAMPLE + 1]; pt[2] = siSampled[17];//siSampled[3 * HALF_SAMPLE];// siSampled[3 * HALF_SAMPLE + 2]; int i_tmp = 0; #pragma unroll for (int i = 0; i < 8; i++, i_tmp += 6) { d_tmp = pt[0] * d_hash[i_tmp]; d_tmp += pt[1] * d_hash[i_tmp + 1]; d_tmp += pt[2] * d_hash[i_tmp + 2]; d_tmp += d_hash[i_tmp + 3]; d_tmp -= d_hash[i_tmp + 4]; d_tmp /= d_hash[i_tmp + 5]; tmp[i] = d_tmp; if (tmp[i] < 0) tmp[i] = 0; else if (tmp[i] >= BUCKET_NUM) tmp[i] = BUCKET_NUM - 1; } int size = 0; bool dir = true; int dither = 0; float x, y, z, min, a, v_tmp; int ptr = 1, lfptr = 1; int lob = 0, rob = 0, toi; int index, end, idx, i_tmp2; while (size < STACK_SPACE && dither < BUCKET_NUM) //infinite loop when size > remaining segmemts; { #pragma region LSH toi = 1; #pragma unroll for (int i = 0; i < 8; i++) { //int i = 0; toi <<= 1; index = tmp[i] + dir ? dither : -dither; if (index < 0) { if ((lob&toi) && !dir) goto finalize; index = 0; lob |= toi; } else if (index >= BUCKET_NUM) { if ((rob&toi) && dir) goto finalize; index = BUCKET_NUM - 1; rob |= toi; }//dithering index += 100 * i; end = buckets[index + 1]; index = buckets[index]; if (index < 0) goto finalize;// blank bucket - attention needed on linearization found: while (index < end) { if (buckets[index] < 0) { index += buckets[index + 1] + 2; goto found; } for (int j = 0; j < size; j++) if (buckets[index] == searched[j]) { index += buckets[index + 1] + 2; goto found; } searched[size++] = buckets[index]; if (buckets[index + 1] > 1) { min = INT_MAX; i_tmp = index + buckets[index + 1] + 2; for (int j = index + 2; j < i_tmp; j++) { i_tmp2 = buckets[j] * 7; x = pt[0] - segments[i_tmp2]; y = pt[1] - segments[i_tmp2 + 1]; z = pt[2] - segments[i_tmp2 + 2]; d_tmp = 0; d_tmp += x*segments[i_tmp2 + 3]; d_tmp = 0; d_tmp += y*segments[i_tmp2 + 4]; d_tmp = 0; d_tmp += z*segments[i_tmp2 + 5]; d_tmp /= segments[i_tmp2 + 6];//padding if (d_tmp <= 0) d_tmp = x*x + y*y + z*z; else if (d_tmp >= 1) d_tmp = pow(x + segments[i_tmp2], 2) + pow(y + segments[i_tmp2 + 1], 2) + pow(z + segments[i_tmp2 + 2], 2);//d_tmp = segments[j * 8 + 7]; else d_tmp = pow(x + d_tmp*segments[i_tmp2], 2) + pow(y + d_tmp*segments[i_tmp2 + 1], 2) + pow(z + d_tmp*segments[i_tmp2 + 2], 2); if (d_tmp < min) { min = d_tmp; idx = buckets[j]; } } } else//blank bucket? idx = buckets[index + 2]; #pragma endregion #pragma region KD-Tree int *linearKD = lkd + id[idx]; int next = 0; int dim = 1, stackidx = 0; float currdata; float dss = INT_MAX; float ds; int ptidx = -1; while (next != -1) {//using mask to reduce memory usage currdata = pt[dim]; ds = pow(pt[0] - linearKD[next + 1], 2) + pow(pt[1] - linearKD[next + 2], 2) + pow(pt[2] - linearKD[next + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[next]; } if (linearKD[next + dim] < currdata) { if (linearKD[next + 4] != -1 && linearKD[next + 11] != -1) stack[stackidx++] = (linearKD[next + 11] << 2) + (linearKD[linearKD[next + 11] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 11] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 4]; } else { if (linearKD[next + 4] != -1) { stack[stackidx++] = (linearKD[next + 4] << 2) + (linearKD[linearKD[next + 4] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 4] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 11]; } else break; } dim = (dim++ - 3) ? dim : dim - 3; } /*faster implementation: 1: no pruning; 2: calc the minimum dist while getting into the point; */ //backtrace; int r; int rt; ctn: while (stackidx > 0) { rt = stack[--stackidx]; r = rt&NEITHER_MASK; rt >>= 2; ds = pow(pt[0] - linearKD[rt + 1], 2) + pow(pt[1] - linearKD[rt + 2], 2) + pow(pt[2] - linearKD[rt + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: rt = rt + 11; break; case 1: rt = rt + 4; break; case 3:continue; default: rt = rt + 4; break; } if (pt[0] < linearKD[rt + 1]) ds += pow(pt[0] - linearKD[rt + 1], 2); else if (pt[0] > linearKD[rt + 4]) ds += pow(pt[0] - linearKD[rt + 4], 2); if (pt[1] < linearKD[rt + 2]) ds += pow(pt[1] - linearKD[rt + 2], 2); else if (pt[1] > linearKD[rt + 5]) ds += pow(pt[1] - linearKD[rt + 5], 2); if (pt[2] < linearKD[rt + 3]) ds += pow(pt[2] - linearKD[rt + 3], 2); else if (pt[2] > linearKD[rt + 6]) ds += pow(pt[2] - linearKD[rt + 6], 2); if (ds < dss&&linearKD[rt]>0) { stack[stackidx] = linearKD[rt] << 2; stack[stackidx++] += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK;//BOTH_MASK; } if (r == 2) { rt = rt + 7; ds = 0; if (pt[0] < linearKD[rt + 1]) ds += pow(pt[0] - linearKD[rt + 1], 2); else if (pt[0] > linearKD[rt + 4]) ds += pow(pt[0] - linearKD[rt + 4], 2); if (pt[1] < linearKD[rt + 2]) ds += pow(pt[1] - linearKD[rt + 2], 2); else if (pt[1] > linearKD[rt + 5]) ds += pow(pt[1] - linearKD[rt + 5], 2); if (pt[2] < linearKD[rt + 3]) ds += pow(pt[2] - linearKD[rt + 3], 2); else if (pt[2] > linearKD[rt + 6]) ds += pow(pt[2] - linearKD[rt + 6], 2); if (ds < dss) { stack[stackidx] = linearKD[rt] << 2; stack[stackidx++] += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK; } } } dss = sqrt(dss); #pragma endregion #pragma region AdditionalCalc //int t = buckets[index]; if (ptidx < HALF_SAMPLE || lineinfo[buckets[ptidx + 2] + 1] - ptidx - 1 < HALF_SAMPLE) { index += buckets[index + 1] + 2; continue; } else sjSampled = h_all + (3 * lineinfo[buckets[ptidx + 2]]) + 3 * (ptidx - HALF_SAMPLE); a = 0; //float *sis = pt - 3 * HALF_SAMPLE; //#pragma unroll for (int j = 0; j < 2 * HALF_SAMPLE + 1; j++) a += sqrt(pow(siSampled[3 * j] - sjSampled[3 * j] - (pt[0] - sjSampled[15]), 2) + pow(siSampled[3 * j + 1] - sjSampled[3 * j + 1] - (pt[1] - sjSampled[16]), 2) + pow(siSampled[3 * j + 2] - sjSampled[3 * j + 2] - (pt[2] - sjSampled[17]), 2)); #pragma endregion #pragma region Heap //heap op //*(int*)&val[1]&CROP_LAST6; int j = 0, t; if (ptr > HEAP_SIZE)//offset { if (val[leaf[1]] <= dss) { index += buckets[index + 1] + 2; continue; } j = leaf[1]; t = j >> 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; while (j > 1 && d_tmp > dss) { val[j] = d_tmp; heap[j] = i_tmp; variation[j] = v_tmp; j = t; t >>= 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; } val[j] = dss; heap[j] = (buckets[index]);// << 18) + idx; variation[j] = 100 * a; //leaf-heap operation i_tmp2 = leaf[1]; j = 2; i_tmp = val[leaf[2]] > val[leaf[3]] ? leaf[2] : leaf[++j]; while (val[i_tmp] > dss) { leaf[j >> 1] = i_tmp; if ((j <<= 1) >= LEAF_OFFSET) break; i_tmp = val[leaf[j]] > val[leaf[j + 1]] ? leaf[j] : leaf[++j]; } leaf[j >> 1] = i_tmp2; //end leaf-heap op } else { j = ptr++; t = j >> 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; while (j > 1 && d_tmp > dss) { heap[j] = i_tmp; val[j] = d_tmp; variation[j] = v_tmp; j = t; t >>= 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; } val[j] = dss; heap[j] = (buckets[index]);// << 18) + idx;//seg variation[j] = 100 * a; //leaf_op if (ptr > LEAF_OFFSET) { j = lfptr++; dss = val[ptr - 1]; i_tmp = leaf[j >> 1]; while (j > 1 && val[i_tmp] < dss) { leaf[j] = i_tmp; j >>= 1; i_tmp = leaf[j >> 1]; } leaf[j] = ptr - 1; } //end leaf_op } #pragma endregion //return heap-as-{size}-nearst-pts; index += buckets[index + 1] + 2; if (size > STACK_SPACE) return; } finalize: if (!dir) dither++; dir = !dir; } } if (ptr <= HEAP_SIZE) val[ptr] = -1; } union reused_t { float fp; uint32_t uint; }; #define THREADS_PER_BLOCK 128 #define LSH_SEARCH_BUFFER 128 /*__test__:2 */ #define MODULO_8_CROPPER 0x7 #define LSB_EXTRACTOR 0x1 /*extract lsb from binary number by a logic and*/ #define UCHAR_MAX 0xff //Macro functions #define pt(i) shared[blockDim.x *(i) + threadIdx.x] #define stack(i) i_shared[blockDim.x *((i) + 3) + threadIdx.x] #define check(i) ((searched[n * ((i)>>3) + ptidx])>>((i)&MODULO_8_CROPPER))&LSB_EXTRACTOR #define mark(i) (searched[n * ((i)>>3) + ptidx]) &= (unsigned char)(UCHAR_MAX - (1<<((i)&MODULO_8_CROPPER))) #define availibility_check(i) (ptavail[((i)>>3)]>>((i)&MODULO_8_CROPPER))&LSB_EXTRACTOR/*12 I32Add-equivalents for bit calculation*/ #define ispt(i) (i)<0 #define linearKD(i) __uint_as_float(linearKD[(i)]) __global__ void LSH_determining_bucket( //unsigned char* searched, int *streamlineoffsets, uchar4 *bucketsforpt, float *ptinfo, int *output, int n ) { int ptidx = blockIdx.x*blockDim.x + threadIdx.x; unsigned char tmp[8]; //max of 256 buckets; uchar4 *parts = reinterpret_cast<uchar4 *> (tmp); float pt[3]; if (ptidx < n) { #pragma unroll for (int i = 0; i < 3; i++) pt[i] = ptinfo[n * i + ptidx]; pt[0] = fabs(pt[0]); float d_tmp;//float int i_tmp = 0; #pragma unroll for (int i = 0; i < 8; i++, i_tmp += 6) { d_tmp = pt[0] * d_hash[i_tmp]; d_tmp += pt[1] * d_hash[i_tmp + 1]; d_tmp += pt[2] * d_hash[i_tmp + 2]; d_tmp += d_hash[i_tmp + 3]; d_tmp -= d_hash[i_tmp + 4]; d_tmp /= d_hash[i_tmp + 5]; d_tmp = d_tmp < 100 ? d_tmp : 100; tmp[i] =(unsigned char) d_tmp > 0 ? __float2uint_rn(d_tmp) : 0; //printf("%d %d %f \n", __float2uint_rn(d_tmp), (unsigned int)(d_tmp), d_tmp); } bucketsforpt[ptidx] = parts[0]; bucketsforpt[n + ptidx] = parts[1]; #pragma unroll for (int i = 0; i < LSH_SEARCH_BUFFER; i++) output[n*i + ptidx] = INT32_MIN; } }//preproc; __global__ void LSHSearch( //`output` need to be zeroed before first use; unsigned char *searched, unsigned char *ptavail, int *buckets, float *segments, float *ptinfo, uchar4 *bucketsforpt, int *output, int n, int slotsize ) { int ptidx = blockIdx.x*blockDim.x + threadIdx.x; if (ptidx < n) { __shared__ float shared[THREADS_PER_BLOCK * 3]; unsigned char tmp[8]; //max of 256 buckets; #pragma unroll for (int i = 0; i < 3; i++) pt(i) = ptinfo[ptidx + i * n]; pt(0) = fabs(pt(0)); if (ptidx < n && availibility_check(ptidx)) { int size = 0; float x, y, z, min, a, v_tmp; int ptr = 1, lfptr = 1; //int lob = 0, rob = 0, toi; int index, end, idx, i_tmp2, i_tmp; float d_tmp; reinterpret_cast<uchar4 *> (tmp)[0] = bucketsforpt[ptidx]; reinterpret_cast<uchar4 *> (tmp)[1] = bucketsforpt[ptidx + n]; int dither = 0; bool dir = true; int size1 = -1; int size2 = -2; while (size < LSH_SEARCH_BUFFER && dither <= BUCKET_NUM) { //#pragma unroll for (int i = 0; i < 8; i++) //infinite loop when size > remaining segmemts; { index = tmp[i] + (dir ? dither : -dither); if (index < 0) { continue; } else if (index >= BUCKET_NUM) { continue; }//dithering index += 100 * i; end = buckets[index + 1]; index = buckets[index]; if (index < 0) continue;// blank bucket - attention needed on linearization while (index < end) { if (buckets[index] < 0 || !(check(buckets[index]))) { index += buckets[index + 1] + 2; continue; } mark(buckets[index]); idx = -1; if (buckets[index + 1] > 1) { min = INT_MAX; i_tmp = index + buckets[index + 1] + 2; for (int j = index + 2; j < i_tmp; j++) { i_tmp2 = buckets[j] * 7; x = pt(0) - segments[i_tmp2]; y = pt(1) - segments[i_tmp2 + 1]; z = pt(2) - segments[i_tmp2 + 2]; d_tmp = 0; d_tmp += x*segments[i_tmp2 + 3]; d_tmp = 0; d_tmp += y*segments[i_tmp2 + 4]; d_tmp = 0; d_tmp += z*segments[i_tmp2 + 5]; d_tmp /= segments[i_tmp2 + 6];//padding if (d_tmp <= 0) d_tmp = x*x + y*y + z*z; else if (d_tmp >= 1) d_tmp = pow(x + segments[i_tmp2], 2) + pow(y + segments[i_tmp2 + 1], 2) + pow(z + segments[i_tmp2 + 2], 2);//d_tmp = segments[j * 8 + 7]; else d_tmp = pow(x + d_tmp*segments[i_tmp2], 2) + pow(y + d_tmp*segments[i_tmp2 + 1], 2) + pow(z + d_tmp*segments[i_tmp2 + 2], 2); if (d_tmp < min) { min = d_tmp; idx = buckets[j]; } } } else//blank bucket? idx = buckets[index + 2]; while ( ispt(output[size*n + ptidx]) && output[size*n + ptidx] > INT32_MIN && availibility_check(-output[size*n + ptidx]) ) { size++; } if (idx == -1) { index += buckets[index + 1] + 2; continue; } output[size++ * n + ptidx] = (idx << 13) + buckets[index];//H19 seg L13 line //index += index += buckets[index + 1] + 2; if (size >= LSH_SEARCH_BUFFER) return; } } finalize: if (dir) dither++; dir = !dir; } if (size < LSH_SEARCH_BUFFER) output[size *n + ptidx] = INT32_MIN; } } } __global__ void KDSearch( int *lkd, int*id, int *outputs, unsigned short *variation, float *lineinfo, unsigned char* ptavail, //ignore negative values in lineinfo.x(s) int n ) { int kernelIdx = threadIdx.x + blockIdx.x * blockDim.x; if (kernelIdx < n && availibility_check(kernelIdx)) { __shared__ float shared[16 * THREADS_PER_BLOCK]; int *i_shared = (int *)shared; for (int i = 0; i < 3; i++) pt(i) = lineinfo[kernelIdx + i * n]; pt(0) = fabs(pt(0)); //pt(3) = pt(1) * pt(1) + pt(0) * pt(0) + pt(2) * pt(2); bool finished = false; for (int i = 0; i < LSH_SEARCH_BUFFER; i++) { float dss = INT_MAX; int ptidx = -1; int opti = outputs[n * i + kernelIdx]; if (ispt(opti) || finished)//point or segment? { return; if (opti <= INT32_MIN) continue; else opti = -opti; if (availibility_check(opti)) { dss = sqrt(pow(pt(0) - fabs(lineinfo[opti]), 2) + pow(pt(1) - lineinfo[opti + n], 2) + pow(pt(2) - lineinfo[opti + 2 * n], 2)); } else { finished = true; continue; } } else { int *linearKD = lkd + id[(opti >> 13)];//kd-search for segment idx int next = 0, dim = 1, stackidx = 0; int rt = 3; float currdata, ds; while (next != -1) {//using mask to reduce memory usage currdata = pt((dim - 1)); ds = pow(pt(0) - linearKD(next + 1), 2) + pow(pt(1) - linearKD(next + 2), 2) + pow(pt(2) - linearKD(next + 3), 2); if (ds < dss) { dss = ds; ptidx = linearKD[next]; } if (linearKD(next + dim) < currdata) { if (linearKD[next + 4] != -1 && linearKD[next + 11] != -1) { stack(stackidx) = (linearKD[next + 11] << 2) + (linearKD[linearKD[next + 11] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[next + 11] + 11] == -1 ? LEFT_MASK : BOTH_MASK);//lv 2 opt stackidx++; } next = linearKD[next + 4]; } else { if (linearKD[next + 4] != -1) { stack(stackidx) = (linearKD[next + 4] << 2) + (linearKD[linearKD[next + 4] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[next + 4] + 11] == -1 ? LEFT_MASK : BOTH_MASK); stackidx++; next = linearKD[next + 11]; } else break; } dim = (dim++ - 3) ? dim : dim - 3; } /*better implementation: Half precision for boxing. (16 bytes aligning, 1~2 g.mem fetches) 0 1 2 3 4 5 5 6 6 7 7 8 9 9 10 10 11 11 id x y z (l lmx lmy lmz lMx lMy lMz (r rmx rmy rmz rMx rMy rMz)) */ int r;// backtrace ctn: while (stackidx > 0) { stackidx--; rt = stack(stackidx); r = rt&NEITHER_MASK; rt >>= 2; ds = pow(pt(0) - linearKD[rt + 1], 2) + pow(pt(1) - linearKD[rt + 2], 2) + pow(pt(2) - linearKD[rt + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: rt = rt + 11; break; case 1: rt = rt + 4; break; case 3:continue; default: rt = rt + 4; break; } /*r = linearKD[rt] & 0x3;//rt&NEITHER_MASK; //rt >>= 2; ds = pow(pt(0) - linearKD(rt + 1), 2) + pow(pt(1) - linearKD(rt + 2), 2) + pow(pt(2) - linearKD(rt + 3), 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: continue;// rt = rt + 11; break; case 1: rt = rt + 4; break; case 3: rt = rt + 4; break;// continue; default: printf("error!"); continue; rt = rt + 4; break; }*/ if (pt(0) < linearKD(rt + 1)) ds += pow(pt(0) - linearKD(rt + 1), 2); else if (pt(0) > linearKD(rt + 4)) ds += pow(pt(0) - linearKD(rt + 4), 2); if (pt(1) < linearKD(rt + 2)) ds += pow(pt(1) - linearKD(rt + 2), 2); else if (pt(1) > linearKD(rt + 5)) ds += pow(pt(1) - linearKD(rt + 5), 2); if (pt(2) < linearKD(rt + 3)) ds += pow(pt(2) - linearKD(rt + 3), 2); else if (pt(2) > linearKD(rt + 6)) ds += pow(pt(2) - linearKD(rt + 6), 2); if (ds < dss&&linearKD[rt]>0) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx) += linearKD[linearKD[rt] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] == -1 ? LEFT_MASK : BOTH_MASK;//BOTH_MASK; stackidx++; } if (r == 3) { rt = rt + 7; ds = 0; if (pt(0) < linearKD(rt + 1)) ds += pow(pt(0) - linearKD(rt + 1), 2); else if (pt(0) > linearKD(rt + 4)) ds += pow(pt(0) - linearKD(rt + 4), 2); if (pt(1) < linearKD(rt + 2)) ds += pow(pt(1) - linearKD(rt + 2), 2); else if (pt(1) > linearKD(rt + 5)) ds += pow(pt(1) - linearKD(rt + 5), 2); if (pt(2) < linearKD(rt + 3)) ds += pow(pt(2) - linearKD(rt + 3), 2); else if (pt(2) > linearKD(rt + 6)) ds += pow(pt(2) - linearKD(rt + 6), 2); if (ds < dss) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx) += linearKD[linearKD[rt] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] == -1 ? LEFT_MASK : BOTH_MASK; stackidx++; } } } outputs[n * i + kernelIdx] = -(ptidx);//>>2 variation[kernelIdx * LSH_SEARCH_BUFFER + i] = opti & 0x1fff;//get L13, line } #pragma endregion } } } //1.8.23->1.5.10/6.10 __device__ float& hf2float(const short& hf) { int sf = ((0x8000 & hf) << 16) + ((0x7c00 & hf) << 13) + ((0x03ff & hf) << 13); return *(float*)&sf; } __device__ float& uhf2float(const short& uhf) { int sf = ((0xfc00 & uhf) << 13) + ((0x03ff & uhf) << 13); return *(float*)&sf; } union _4bit{ float f; int i; }; __global__ void VectorizedHashing( int *linearsegs, short* offsets, int *outputs, unsigned short *variation, float *linfo, /* linfo optimized for random access */ unsigned char* ptavail, //ignore negative values in lineinfo.x(s) int n ) { int kernelIdx = threadIdx.x + blockIdx.x * blockDim.x; if (kernelIdx < n && availibility_check(kernelIdx)) { __shared__ float shared[16 * THREADS_PER_BLOCK]; int *i_shared = (int *)shared; for (int i = 0; i < 3; i++) pt(i) = linfo[kernelIdx * 3 + i]; pt(0) = fabs(pt(0)); //pt(3) = pt(1) * pt(1) + pt(0) * pt(0) + pt(2) * pt(2); bool finished = false; for (int i = 0; i < LSH_SEARCH_BUFFER; i++) { float dss = INT_MAX; int ptidx = -1; int opti = outputs[n * i + kernelIdx]; if (ispt(opti) || finished)//point or segment? { return; if (opti <= INT32_MIN) continue; else opti = -opti; if (availibility_check(opti)) { dss = sqrt(pow(pt(0) - fabs(linfo[opti *3]), 2) + pow(pt(1) - linfo[opti * 3 + 1], 2) + pow(pt(2) - linfo[opti *3+ 2], 2)); } else { finished = true; continue; } } else { int *vecs = linearsegs + ((opti >> 13)*10); //(opti >> 13 <<3) float *fvecs = (float *)vecs; int base = vecs[6]; float length = 0; int bucket; /* Record format: * 0 1 2 3 4 5 6 7 * base vec - - fp step len offset * Pipeline: * bucket = ((pt - [base])*vec - fp)/step */ #pragma unroll for (int i = 0; i < 3; i++) length += (pt(i) - fvecs[i]) * fvecs[i + 3]; if (length < fvecs[7]) ptidx = base; else { short* this_offsets = offsets + vecs[9]; unsigned short n_bucket = *(unsigned short*)this_offsets; int bucket = (length - fvecs[7]) / fvecs[8]; if (bucket >= n_bucket) ptidx = base + this_offsets[this_offsets[n_bucket] + n_bucket]; else { const int bias = ((float)(this_offsets[bucket + 1] - (bucket == 0 ? 0 : this_offsets[bucket]))) *(length - bucket * fvecs[8]) / fvecs[8]; ptidx = base + this_offsets[(bucket == 0?0:this_offsets[bucket]) + n_bucket + 1 + bias]; } } outputs[n * i + kernelIdx] = -(ptidx);//>>2 variation[kernelIdx * LSH_SEARCH_BUFFER + i] = opti & 0x1fff;//get L13, line } #pragma endregion } } } __global__ void CoupledHeapsFiltration( float *lineinfo, unsigned char *ptavail, int *heap_, float *val_, float *variation_, int *outputs, int n ) {//max lsh_search_buffer = 255 int kernelIdx = threadIdx.x + blockDim.x * blockIdx.x; if (kernelIdx < n && availibility_check(kernelIdx)) { __shared__ unsigned char shared[32 * THREADS_PER_BLOCK];// 32 bytes per thread 2K bytes/block int *heap = heap_ + kernelIdx * HEAP_SIZE - 1; float *val = val_ + kernelIdx * HEAP_SIZE - 1; float *variation = variation_ + kernelIdx * HEAP_SIZE - 1; unsigned short *lines = reinterpret_cast<unsigned short*>(variation_); unsigned char *leaf = shared + threadIdx.x * 32 - 1; int ptr = 1, j, t, i_tmp, i_tmp2, lfptr = 1; float pt[3]; float d_tmp; #pragma unroll for (int i = 0; i < 3; i++) { pt[i] = lineinfo[kernelIdx + i * n]; } pt[0] = fabs(pt[0]); //#pragma unroll for (int i = 0; i < LSH_SEARCH_BUFFER; i++) { int currpt = outputs[i * n + kernelIdx]; if (currpt < 0) { if (currpt <= INT32_MIN) continue; float dss = 0; currpt = -currpt; #pragma unroll for (int k = 0; k < 3; k++) dss += pow(pt[k] - fabs(lineinfo[currpt + k * n]), 2); if (kernelIdx == currpt) continue; dss = sqrt(dss); if (ptr > HEAP_SIZE)//offset { if (val[leaf[1]] <= dss) continue; j = leaf[1]; t = j >> 1; while (j > 1) { d_tmp = val[t]; i_tmp = heap[t]; if (d_tmp <= dss) break; val[j] = d_tmp; heap[j] = i_tmp; j = t; t >>= 1; } val[j] = dss; heap[j] = (((unsigned)lines[kernelIdx * LSH_SEARCH_BUFFER + i]) << 8) + i;// << 18) + idx; //leaf-heap operation i_tmp2 = leaf[1]; j = 2; i_tmp = val[leaf[2]] > val[leaf[3]] ? leaf[2] : leaf[++j]; while (val[i_tmp] > dss) { leaf[j >> 1] = i_tmp; if ((j <<= 1) >= LEAF_OFFSET) break; i_tmp = val[leaf[j]] > val[leaf[j + 1]] ? leaf[j] : leaf[++j]; } leaf[j >> 1] = i_tmp2; //end leaf-heap op } else { j = ptr++; t = j >> 1; while (j > 1) { d_tmp = val[t]; i_tmp = heap[t]; if (d_tmp <= dss) break; heap[j] = i_tmp; val[j] = d_tmp; j = t; t >>= 1; } val[j] = dss; heap[j] = (((unsigned)(lines[kernelIdx * LSH_SEARCH_BUFFER + i])) << 8) + i;// << 18) + idx;//seg //leaf_op if (ptr > LEAF_OFFSET + 1) { j = lfptr++; dss = val[ptr - 1]; //i_tmp = j>1?leaf[j >> 1]:0; while (j > 1) { i_tmp = leaf[j >> 1]; if (val[i_tmp] >= dss) break; leaf[j] = i_tmp; j >>= 1; } //if (j <= 32) leaf[j] = ptr - 1; } //end leaf_op } } } if (ptr <= HEAP_SIZE&&ptr >= 1) val[ptr] = heap[ptr] = -1; ptr--; #pragma region variation //bool signi = pt[0] > 0, signj; int starti = 0, endi = 0; while (starti < HALF_SAMPLE + 1 && (kernelIdx - starti > 0) && lineinfo[kernelIdx - starti] > 0) starti++; while (endi < HALF_SAMPLE + 1 && kernelIdx + endi<n&& lineinfo[kernelIdx + endi] > 0) endi++; //#pragma unroll for (int i = 1; i <= ptr; i++) { int currptj = -outputs[n*(heap[i] & 0xff) + kernelIdx]; heap[i] >>= 8;//corrected float current_variation = 0; if (currptj < 0) break; int startj = 1, endj = 1; while (startj < starti &&currptj - startj >0 && lineinfo[currptj - startj] > 0) { float di = 0; di += pow(fabs(lineinfo[kernelIdx - startj]) - fabs(lineinfo[currptj - startj]) - (pt[0] - fabs(lineinfo[currptj])), 2); #pragma unroll for (int i = 1; i < 3; i++) di += pow(lineinfo[kernelIdx - startj + i *n] - lineinfo[currptj - startj + i * n] - (pt[i] - lineinfo[currptj + i * n]), 2); current_variation += sqrt(di);//pow(di, 2); startj++; } //endj = 1; while (endj < endi && currptj + endj<n&& lineinfo[currptj + endj]>0) { float di = 0; di += pow(fabs(lineinfo[kernelIdx + endj]) - fabs(lineinfo[currptj + endj]) - (pt[0] - fabs(lineinfo[currptj])), 2); #pragma unroll for (int i = 1; i < 3; i++) di += pow(lineinfo[kernelIdx + endj + i *n] - lineinfo[currptj + endj + i * n] - (pt[i] - lineinfo[currptj + i * n]), 2); //di = sqrt(di); //current_variation += sqrt(di);// pow(di - dist, 2); endj++; } /*if (endj + startj) current_variation /= (float)(endj + startj); else current_variation = 1; */ //current_variation += .1 * (2 * HALF_SAMPLE - (endj + startj) + 2) ;//__test__:1 if(!(endj+startj)) current_variation /= (float)(endj + startj); variation[i] = current_variation;// -val[i];// *(val[i] + 100); } #pragma endregion } } __global__ void UpdateLsh(int *buckets, int target) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < 800) { int end = buckets[i + 1] - buckets[i]; buckets += buckets[i]; for (int j = 0; j < end;) { if (buckets[j] == target) buckets[j] = -buckets[j]; j += buckets[j + 1] + 2; } } } __global__ void RollbackLsh(int *buckets) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < 800) { int end = buckets[i + 1]; buckets += buckets[i]; for (int j = 0; j < end; j++) if (buckets[j]<0) buckets[j] = -buckets[j]; } } __global__ void Avg(float *similarity, int n, float *avg_back) { int i = threadIdx.x + blockDim.x * blockIdx.x;//(2,64),256 double sum = 0; __shared__ double reduced[32]; int wid = threadIdx.y >> 5; int lane = threadIdx.y - (wid << 5); for (; i < n; i += blockDim.x * gridDim.x) { __syncthreads(); sum = similarity[HEAP_SIZE * i + threadIdx.y]; for (int j = 16; j >= 1; j >>= 1) { sum += __shfl_down(sum, j); } if (lane == 0) { reduced[wid + threadIdx.x * 2] = sum; } __syncthreads(); if (threadIdx.y == 0) { atomicAdd(avg_back, (reduced[threadIdx.x * 2] + reduced[threadIdx.x * 2 + 1])); } } } __global__ void saliency_1(float *similarity, float *distance, float *output, int n, float c = 3.f) { int i = threadIdx.x + blockDim.x * blockIdx.x;//(2,64),256 float sum = 0; __shared__ float reduced[2]; int wid = threadIdx.y >> 5; int lane = threadIdx.y - (wid << 5); sum = 0; float sum_divisor = 0; for (; i < n; i += blockDim.x * gridDim.x) { sum = similarity[HEAP_SIZE*i+threadIdx.x]/((1.f+.5*distance[HEAP_SIZE*i + threadIdx.x])); for (int j = 16; j >= 1; j >>= 1) { sum += __shfl_down(sum, j); } if (lane == 0 && wid) { reduced[threadIdx.x * 2] = sum; } __syncthreads(); if (threadIdx.y == 0) { sum += reduced[threadIdx.x * 2]; if (isnan(sum) || isinf(sum)) sum = 0; output[i] = 1 - exp(-sum / 64.); } } sum = 0; } #define _SUM_INV_X_2 1.62918636078388701094 __global__ void AlphaCalc(float *similarity, float *distance, float *output, int n, float avg, float alpha, float _min) { int i = threadIdx.x + blockDim.x * blockIdx.x;//(2,64),256 float sum = 0; float range = 1 - _min; __shared__ float reduced[4]; __shared__ float reduced_divisor[4]; int wid = threadIdx.y >> 5; int lane = threadIdx.y - (wid << 5); sum = 0; float sum_divisor = 0; for (; i < n; i += blockDim.x * gridDim.x) { if (avg != 0) { if (distance[HEAP_SIZE*i + threadIdx.y] == 0) sum_divisor = 0; else sum_divisor = 1 / (distance[HEAP_SIZE*i + threadIdx.y] *distance[HEAP_SIZE*i + threadIdx.y]); sum = 1 - avg*alpha*(pow(2.718281828f, -pow(similarity[HEAP_SIZE * i + threadIdx.y], 2.f) / 2.f) - _min) / range; //sum = sum > 0 ? sum : 0; sum *= sum_divisor; for (int j = 16; j >= 1; j >>= 1) { sum += __shfl_down(sum, j); sum_divisor += __shfl_down(sum_divisor, j); } if (lane == 0) { reduced[wid + threadIdx.x * 2] = sum; reduced_divisor[wid + threadIdx.x * 2] = sum_divisor; } __syncthreads(); if (threadIdx.y == 0) { sum_divisor = reduced_divisor[threadIdx.x * 2] + reduced_divisor[threadIdx.x * 2 + 1]; output[i] = (reduced[threadIdx.x * 2] + reduced[threadIdx.x * 2 + 1]) / sum_divisor; } } } sum = 0; } __global__ void cuMax(float *similarity, int n, unsigned int *max) { int i = blockIdx.x*blockDim.x + threadIdx.x; //natural padding with width of 64 float _max = 0; __shared__ float shared[32]; int warp = threadIdx.x << 5; int lane = threadIdx.x - warp >> 5; for (; i < n * 64; i += blockDim.x*gridDim.x) { _max = similarity[i]; for (int offset = 16; offset >= 1; offset >>= 1) { float tmp = __shfl_down(_max, offset); _max = tmp > _max ? tmp : _max; } if (lane == 0) { shared[warp] = _max; } __syncthreads(); if (warp == 0) { _max = shared[lane]; for (int offset = 16; offset >= 1; offset >>= 1) { float tmp = __shfl_down(_max, offset); _max = tmp > _max ? tmp : _max; } if (threadIdx.x == 0) { atomicMax(max, __float_as_uint(_max)); } } } } void cavg(float *similarity, int n, float *avg_back, float *max = 0) { Avg << < 256, dim3(2, 64) >> >(similarity, n, avg_back); if (max) { cuMax << <32, 1024 >> > (similarity, n, (unsigned *)max); } } __global__ void simple_simlarity(float *output, float * variation, float * distances, int N) { int i = threadIdx.x + blockDim.x * blockIdx.x; for (; i < N; i += blockDim.x * gridDim.x) { output[i] = variation[i*HEAP_SIZE]; } } void cuda(float *similarity, float *distance, float *output, int n, float avg = -1, float alpha = -1, float min = -1) { //saliency_1 << <256, dim3(2, 64) >> >(similarity, distance, output, n, alpha); //AlphaCalc << <256, dim3(2, 64) >> >(similarity, distance, output, n, avg, alpha, min); simple_simlarity << <256, 256 >> > (output, similarity, distance, n); } __device__ int pos; //double buffering 2-way __global__ void deletion(float *val, float *variation, int *heap, unsigned char *avail, int *idx, int *p2seg, int n, int p, unsigned char* next_avail, int *next_idx) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= n) return; int pt = idx[i];//n of pt; if (p2seg[pt] == p) return; unsigned char availible = avail[i]; val += pt * HEAP_SIZE; variation += pt*HEAP_SIZE; heap += pt*HEAP_SIZE; for (int i = 0; i <= availible; i++) { if (p2seg[heap[i]] == p) { while (p2seg[heap[--availible]] == p&&availible > 0); int k; if (heap[i] > heap[availible]) { k = i; int j = i << 1; while (heap[j] < heap[availible] && j<availible) { heap[j] = heap[k];//2b verified val[j] = val[k]; variation[j] = variation[k]; k = j; j <<= 1; } //up } else { k = i; int j = i >> 1; while (heap[j] > heap[availible] && k>0) { heap[j] = heap[k];//2b verified val[j] = val[k]; variation[j] = variation[k]; k = j; j >>= 1; } //down } variation[k] = variation[availible]; val[k] = val[availible]; heap[k] = heap[availible]; } } if (availible < 0) return; int next = atomicAdd(&pos, 1); next_idx[next] = pt; next_avail[next] = availible + 1; } __global__ void local_sim(float *sel_pts, int *othersegs, float *out, int *lkd, float *vari_data, int prob_n, int n) { int thread = threadIdx.x + blockIdx.x * blockDim.x; if (thread < prob_n) { static __shared__ float shared[16 * THREADS_PER_BLOCK]; int *i_shared = (int *)shared; //float *stack = shared + threadIdx.x * 16; // float *pt = shared + threadIdx.x * 16; // float *stack = pt + 4; for (int i = 0; i < 3; i++) pt(i) = sel_pts[thread * 3 + i]; /*ushort3 upts; upts.x = (__float_as_uint(pt(0))>>12); */ pt(3) = pt(1) * pt(1) + pt(0) * pt(0) + pt(2) * pt(2); for (int i = 0; i < n; i++) { int *linearKD = lkd + othersegs[i]; //kd-search for segment idx int next = 0; int dim = 1, stackidx = 0; float currdata; float dss = INT_MAX; float ds; int ptidx = -1; while (next != -1) {//using mask to reduce memory usage currdata = pt(dim); ds = pow(pt(0) - linearKD[next + 1], 2) + pow(pt(1) - linearKD[next + 2], 2) + pow(pt(2) - linearKD[next + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[next]; } if (linearKD[next + dim] < currdata) { if (linearKD[next + 4] != -1 && linearKD[next + 11] != -1) stack(stackidx++) = (linearKD[next + 11] << 2) + (linearKD[linearKD[next + 11] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 11] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 4]; } else { if (linearKD[next + 4] != -1) { stack(stackidx++) = (linearKD[next + 4] << 2) + (linearKD[linearKD[next + 4] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 4] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 11]; } else break; } dim = (dim++ - 3) ? dim : dim - 3; } /*faster implementation: 1: no pruning; 2: calc the minimum dist while getting into the point; */ //backtrace; int r; int rt; ctn: while (stackidx > 0) { rt = stack(--stackidx); r = rt&NEITHER_MASK; rt >>= 2; ds = pow(pt(0) - linearKD[rt + 1], 2) + pow(pt(1) - linearKD[rt + 2], 2) + pow(pt(2) - linearKD[rt + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: rt = rt + 11; break; case 1: rt = rt + 4; break; case 3:continue; default: rt = rt + 4; break; } if (pt(0) < linearKD[rt + 1]) ds += pow(pt(0) - linearKD[rt + 1], 2); else if (pt(0) > linearKD[rt + 4]) ds += pow(pt(0) - linearKD[rt + 4], 2); if (pt(1) < linearKD[rt + 2]) ds += pow(pt(1) - linearKD[rt + 2], 2); else if (pt(1) > linearKD[rt + 5]) ds += pow(pt(1) - linearKD[rt + 5], 2); if (pt(2) < linearKD[rt + 3]) ds += pow(pt(2) - linearKD[rt + 3], 2); else if (pt(2) > linearKD[rt + 6]) ds += pow(pt(2) - linearKD[rt + 6], 2); if (ds < dss&&linearKD[rt]>0) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx++) += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK;//BOTH_MASK; } if (r == 2) { rt = rt + 7; ds = 0; if (pt(0) < linearKD[rt + 1]) ds += pow(pt(0) - linearKD[rt + 1], 2); else if (pt(0) > linearKD[rt + 4]) ds += pow(pt(0) - linearKD[rt + 4], 2); if (pt(1) < linearKD[rt + 2]) ds += pow(pt(1) - linearKD[rt + 2], 2); else if (pt(1) > linearKD[rt + 5]) ds += pow(pt(1) - linearKD[rt + 5], 2); if (pt(2) < linearKD[rt + 3]) ds += pow(pt(2) - linearKD[rt + 3], 2); else if (pt(2) > linearKD[rt + 6]) ds += pow(pt(2) - linearKD[rt + 6], 2); if (ds < dss) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx++) += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK; } } } //dss = sqrt(dss); #pragma endregion #pragma region AdditionalCalc if (ptidx != -1) { dss += vari_data[ptidx * 4] - pt(0) * vari_data[ptidx * 4 + 1] - pt(1) * vari_data[ptidx * 4 + 2] - pt(2) * vari_data[ptidx * 4 + 3]; atomicAdd(/*(unsigned int *)*/out + i, /*__float_as_uint(*/dss/*)*/); } #pragma endregion //int t = buckets[index]; } } } bool SimTester::isSimilarWithSelf(std::deque<vec3> &si, int siIdx) { float lineLength = g_param.w; int nHalfSample = g_param.nHalfSample; vector<vec3> siSampled; int siPos; if (!sampleLine(si, siIdx, lineLength, nHalfSample, siSampled, siPos)) return false; vec3 p = siSampled[nHalfSample]; int lowID, highID; findIdxRange(si, p, g_param.dMin, lowID, highID); deque<vec3>& sj = si; vec3 q; int sjIdx = -1; float min_dist = FLT_MAX; for (int j = 0; j< sj.size(); j++) { if (j >= lowID && j <= highID) continue; float l = length(p - sj[j]); if (l < min_dist) { q = sj[j]; min_dist = l; sjIdx = j; } } if (min_dist >= g_param.dSelfsep || sjIdx == -1) return false; // sample line vector<vec3> sjSampled; int sjPos; if (!sampleLine(sj, sjIdx, lineLength, nHalfSample, sjSampled, sjPos)) return false; // enough points to compare float term1 = (siSampled[nHalfSample] - sjSampled[nHalfSample]).length();//min_dist; float term2 = 0.0f; for (int i = 0; i < siSampled.size(); ++i) { float a = length(siSampled[i] - sjSampled[i]); term2 += abs(a - term1); } float alpha = 5; term2 = alpha * term2 / siSampled.size(); if ((term1 + term2) < g_param.dSelfsep) return true; return false; } bool SimTester::self_line_similarty(std::vector<vec3> &si_tmp, int id) { vec3 p = si_tmp[id]; vec3 q = si_tmp[0]; int compare_id = -1; float min_dist = 100000;; for (int j = 0; j<si_tmp.size() - g_param.w / 2.0f; j++) if (min_dist > length(p - si_tmp[j]) && length(p - si_tmp[j]) > g_param.dMin) { min_dist = length(p - si_tmp[j]); q = si_tmp[j]; compare_id = j; } if (compare_id == -1) return false; if (compare_id < g_param.w / 2 || compare_id > si_tmp.size() - g_param.w / 2) return false; std::vector<vec3> si; std::vector<vec3> sj; for (int i = id - g_param.w / 2.0f; i<id + g_param.w / 2.0f; i++) si.push_back(si_tmp[i]); for (int i = compare_id - g_param.w / 2.0f; i<compare_id + g_param.w / 2.0f; i++) sj.push_back(si_tmp[i]); float term1 = length(p - q); float term2 = 0.0f; float a; for (int k = 0; k<si.size(); k++) { a = length(si[k] - sj[k]); term2 += abs(a - term1); } term2 = g_param.alpha * term2 / si.size(); if ((term1 + term2) > g_param.dSelfsep) return true; return false; } bool SimTester::sampleLine(const std::deque<vec3>& line, int idx, float lineLength, int nHalfSample, vector<vec3>& result, int& idxPos) { if (idx<0 || idx >= line.size()) return false; float segmentLength = lineLength / (nHalfSample * 2); vector<vec3> buffer[2]; float totLength[2] = { 0, 0 }; int idxDir[2] = { 1, -1 }; int idxBound[2] = { line.size() - 1, 0 }; for (int ithDir = 0; ithDir<2; ++ithDir) { buffer[ithDir].reserve(nHalfSample * 2 + 1); if (idx != idxBound[ithDir]) { int thisIdx = idx, nextIdx = idx + idxDir[ithDir]; vec3 curPnt = line[thisIdx]; vec3 curDir = line[nextIdx] - curPnt; float allocateLength = curDir.length(); curDir /= allocateLength; while (buffer[ithDir].size() < nHalfSample * 2 + 1) { if (totLength[ithDir] > allocateLength) { nextIdx += idxDir[ithDir]; thisIdx += idxDir[ithDir]; if (nextIdx >= line.size() || nextIdx < 0) break; vec3 delta = line[nextIdx] - line[thisIdx]; float deltaLength = delta.length(); float remainLength = totLength[ithDir] - allocateLength; allocateLength += deltaLength; curDir = delta / deltaLength; curPnt = line[thisIdx] + curDir * remainLength; } else { buffer[ithDir].push_back(curPnt); curPnt += curDir * segmentLength; totLength[ithDir] += segmentLength; } } totLength[ithDir] -= segmentLength; } else buffer[ithDir].push_back(line[idx]); } // line is too short if (buffer[0].size() + buffer[1].size() < nHalfSample * 2 + 2) return false; int nSample; int validData[2] = { nHalfSample, nHalfSample }; for (int i = 0; i < 2; ++i) { nSample = buffer[i].size() - 1; if (nSample < nHalfSample) { validData[i] = nSample; validData[1 - i] += nHalfSample - nSample; } } result.clear(); result.reserve(nHalfSample * 2 + 1); for (int i = validData[1]; i > 0; i--) result.push_back(buffer[1][i]); idxPos = result.size(); for (int i = 0; i <= validData[0]; i++) result.push_back(buffer[0][i]); return true; } bool SimTester::MysampleLine(const std::deque<vec3>& line, int idx, int nHalfSample, vector<vec3>& result) { if (idx < nHalfSample || line.size() - idx - 1 < nHalfSample) return false; result.resize(nHalfSample * 2 + 1); for (int i = 0; i < 2 * nHalfSample + 1; ++i) result[i] = line[i + idx - nHalfSample]; return true; } bool SimTester::findIdxRange(const std::deque<vec3>&line, const vec3& centerPnt, float radius, int& lowID, int& highID) { lowID = 0; highID = line.size(); int i; int centerID[2] = { 0, line.size() - 1 }; float initDist[2] = { 0, 0 }; float minDist = FLT_MAX; for (i = 0; i < line.size() - 1; ++i) { vec3 d1 = line[i + 1] - line[i]; vec3 d2 = (centerPnt - line[i]); float t = d2.dot(d1) / d1.dot(d1); t = min(1.0, max(0.0, t)); vec3 td1 = t * d1; float dist = (d2 - td1).length(); if (dist < minDist) { minDist = dist; centerID[0] = i; centerID[1] = i + 1; initDist[0] = td1.length(); initDist[1] = d1.length() - initDist[0]; } } for (i = centerID[0] - 1; i > 0; --i) { initDist[0] += (line[i] - line[i + 1]).length(); if (initDist[0] >= radius) { lowID = i; break; } } for (i = centerID[1] + 1; i < line.size(); ++i) { initDist[1] += (line[i] - line[i - 1]).length(); if (initDist[1] >= radius) { highID = i; break; } } return true; }
02e88ccb9ea9686360a8d49ccb9c19221c805f5e.cu
// CUDA runtime �� + CUBLAS �� #define HALF_ENABLE_CPP11_CMATH #include "cuda_runtime.h" #include "cublas_v2.h" #include<device_launch_parameters.h> #include<thrust/host_vector.h> #include<thrust/device_vector.h> #include "SimTester.h" #include <iostream> #ifdef __INTELLISENSE__ #define __CUDACC__ #include <device_functions.h> #undef __constant__ #undef __global__ #undef __shared__ #undef __device__ #define __global__ #define __shared__ #define __device__ #define __constant__ #endif // __INTELLISENSE__ //#include <cuda_fp16.h> //#include "float.hpp" //cuda ���� #define BUCKET_NUM 100 #define STACK_SPACE 96 #define SAVED_TREES 128 #define HEAP_SIZE 64 #define LEAF_OFFSET 32 #define RIGHT_MASK 0 #define LEFT_MASK 1 #define BOTH_MASK 2 #define NEITHER_MASK 3 #define HALF_SAMPLE 8/*distance based->curvature based*/ #define CROP_LAST6 0xffc0 #define GET_LAST6 0x3f #undef max #undef min //#define r2 dslf*dslf extern size_t pitch; __constant__ float d_hash[49]; __constant__ float epsilon[1]; void constant_cpy(float *linearhash, float _epsilon) { cudaMemcpyToSymbol(d_hash, linearhash, 48 * sizeof(float)); cudaMemcpyToSymbol(epsilon, &_epsilon, sizeof(float)); } //template <bool add = false> //change heap to unsigned int; __global__ void cuDeterminingAvaillines(int *avail, float *val, int n) { int i = threadIdx.x + blockIdx.x *blockDim.x; if (i < n) { val += i*HEAP_SIZE; if (*val > 0) { int j; for (j = 0; j < 64; j++) if (val[j] <= 0) break; avail[i] = j; } else avail[i] = 0; } } __global__ void CudaSimilarWithLines( int n, //n of pts int *lineinfo, float *h_all, //line information int *buckets, float *segments, //LSH int *heap, float *val, float *variation, //outputs int *searched, //massive tmp int *lkd, int*id, //Kdtree bool *pt_availablility ) { int pidx = blockIdx.x*blockDim.x + threadIdx.x; if (pidx >= n) return; if (!pt_availablility[pidx]) return; int *lineinfo_ptr = lineinfo + (pidx << 1); if ((pidx - *lineinfo_ptr) < HALF_SAMPLE || *(lineinfo_ptr + 1) - pidx + *lineinfo_ptr - 1 < HALF_SAMPLE) return; float d_tmp; float *si, *siSampled; float *sjSampled; short tmp[8]; val += pidx*HEAP_SIZE - 1; heap += pidx*HEAP_SIZE - 1; variation += pidx * HEAP_SIZE - 1; searched += pidx*SAVED_TREES; si = h_all + 3 * *lineinfo_ptr; siSampled = si + 3 * ((pidx - *lineinfo_ptr) - HALF_SAMPLE); int l[32]; unsigned int stack[16]; int *leaf = l - 1; float pt[3];//= { siSampled[3 * HALF_SAMPLE] ,siSampled[3 * HALF_SAMPLE + 1] ,siSampled[3 * HALF_SAMPLE + 2] }; pt[0] = siSampled[15]; pt[1] = siSampled[16];//siSampled[3 * HALF_SAMPLE];// siSampled[3 * HALF_SAMPLE + 1]; pt[2] = siSampled[17];//siSampled[3 * HALF_SAMPLE];// siSampled[3 * HALF_SAMPLE + 2]; int i_tmp = 0; #pragma unroll for (int i = 0; i < 8; i++, i_tmp += 6) { d_tmp = pt[0] * d_hash[i_tmp]; d_tmp += pt[1] * d_hash[i_tmp + 1]; d_tmp += pt[2] * d_hash[i_tmp + 2]; d_tmp += d_hash[i_tmp + 3]; d_tmp -= d_hash[i_tmp + 4]; d_tmp /= d_hash[i_tmp + 5]; tmp[i] = d_tmp; if (tmp[i] < 0) tmp[i] = 0; else if (tmp[i] >= BUCKET_NUM) tmp[i] = BUCKET_NUM - 1; } int size = 0; bool dir = true; int dither = 0; float x, y, z, min, a, v_tmp; int ptr = 1, lfptr = 1; int lob = 0, rob = 0, toi; int index, end, idx, i_tmp2; while (size < STACK_SPACE && dither < BUCKET_NUM) //infinite loop when size > remaining segmemts; { #pragma region LSH toi = 1; #pragma unroll for (int i = 0; i < 8; i++) { //int i = 0; toi <<= 1; index = tmp[i] + dir ? dither : -dither; if (index < 0) { if ((lob&toi) && !dir) goto finalize; index = 0; lob |= toi; } else if (index >= BUCKET_NUM) { if ((rob&toi) && dir) goto finalize; index = BUCKET_NUM - 1; rob |= toi; }//dithering index += 100 * i; end = buckets[index + 1]; index = buckets[index]; if (index < 0) goto finalize;// blank bucket - attention needed on linearization found: while (index < end) { if (buckets[index] < 0) { index += buckets[index + 1] + 2; goto found; } for (int j = 0; j < size; j++) if (buckets[index] == searched[j]) { index += buckets[index + 1] + 2; goto found; } searched[size++] = buckets[index]; if (buckets[index + 1] > 1) { min = INT_MAX; i_tmp = index + buckets[index + 1] + 2; for (int j = index + 2; j < i_tmp; j++) { i_tmp2 = buckets[j] * 7; x = pt[0] - segments[i_tmp2]; y = pt[1] - segments[i_tmp2 + 1]; z = pt[2] - segments[i_tmp2 + 2]; d_tmp = 0; d_tmp += x*segments[i_tmp2 + 3]; d_tmp = 0; d_tmp += y*segments[i_tmp2 + 4]; d_tmp = 0; d_tmp += z*segments[i_tmp2 + 5]; d_tmp /= segments[i_tmp2 + 6];//padding if (d_tmp <= 0) d_tmp = x*x + y*y + z*z; else if (d_tmp >= 1) d_tmp = pow(x + segments[i_tmp2], 2) + pow(y + segments[i_tmp2 + 1], 2) + pow(z + segments[i_tmp2 + 2], 2);//d_tmp = segments[j * 8 + 7]; else d_tmp = pow(x + d_tmp*segments[i_tmp2], 2) + pow(y + d_tmp*segments[i_tmp2 + 1], 2) + pow(z + d_tmp*segments[i_tmp2 + 2], 2); if (d_tmp < min) { min = d_tmp; idx = buckets[j]; } } } else//blank bucket? idx = buckets[index + 2]; #pragma endregion #pragma region KD-Tree int *linearKD = lkd + id[idx]; int next = 0; int dim = 1, stackidx = 0; float currdata; float dss = INT_MAX; float ds; int ptidx = -1; while (next != -1) {//using mask to reduce memory usage currdata = pt[dim]; ds = pow(pt[0] - linearKD[next + 1], 2) + pow(pt[1] - linearKD[next + 2], 2) + pow(pt[2] - linearKD[next + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[next]; } if (linearKD[next + dim] < currdata) { if (linearKD[next + 4] != -1 && linearKD[next + 11] != -1) stack[stackidx++] = (linearKD[next + 11] << 2) + (linearKD[linearKD[next + 11] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 11] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 4]; } else { if (linearKD[next + 4] != -1) { stack[stackidx++] = (linearKD[next + 4] << 2) + (linearKD[linearKD[next + 4] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 4] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 11]; } else break; } dim = (dim++ - 3) ? dim : dim - 3; } /*faster implementation: 1: no pruning; 2: calc the minimum dist while getting into the point; */ //backtrace; int r; int rt; ctn: while (stackidx > 0) { rt = stack[--stackidx]; r = rt&NEITHER_MASK; rt >>= 2; ds = pow(pt[0] - linearKD[rt + 1], 2) + pow(pt[1] - linearKD[rt + 2], 2) + pow(pt[2] - linearKD[rt + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: rt = rt + 11; break; case 1: rt = rt + 4; break; case 3:continue; default: rt = rt + 4; break; } if (pt[0] < linearKD[rt + 1]) ds += pow(pt[0] - linearKD[rt + 1], 2); else if (pt[0] > linearKD[rt + 4]) ds += pow(pt[0] - linearKD[rt + 4], 2); if (pt[1] < linearKD[rt + 2]) ds += pow(pt[1] - linearKD[rt + 2], 2); else if (pt[1] > linearKD[rt + 5]) ds += pow(pt[1] - linearKD[rt + 5], 2); if (pt[2] < linearKD[rt + 3]) ds += pow(pt[2] - linearKD[rt + 3], 2); else if (pt[2] > linearKD[rt + 6]) ds += pow(pt[2] - linearKD[rt + 6], 2); if (ds < dss&&linearKD[rt]>0) { stack[stackidx] = linearKD[rt] << 2; stack[stackidx++] += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK;//BOTH_MASK; } if (r == 2) { rt = rt + 7; ds = 0; if (pt[0] < linearKD[rt + 1]) ds += pow(pt[0] - linearKD[rt + 1], 2); else if (pt[0] > linearKD[rt + 4]) ds += pow(pt[0] - linearKD[rt + 4], 2); if (pt[1] < linearKD[rt + 2]) ds += pow(pt[1] - linearKD[rt + 2], 2); else if (pt[1] > linearKD[rt + 5]) ds += pow(pt[1] - linearKD[rt + 5], 2); if (pt[2] < linearKD[rt + 3]) ds += pow(pt[2] - linearKD[rt + 3], 2); else if (pt[2] > linearKD[rt + 6]) ds += pow(pt[2] - linearKD[rt + 6], 2); if (ds < dss) { stack[stackidx] = linearKD[rt] << 2; stack[stackidx++] += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK; } } } dss = sqrt(dss); #pragma endregion #pragma region AdditionalCalc //int t = buckets[index]; if (ptidx < HALF_SAMPLE || lineinfo[buckets[ptidx + 2] + 1] - ptidx - 1 < HALF_SAMPLE) { index += buckets[index + 1] + 2; continue; } else sjSampled = h_all + (3 * lineinfo[buckets[ptidx + 2]]) + 3 * (ptidx - HALF_SAMPLE); a = 0; //float *sis = pt - 3 * HALF_SAMPLE; //#pragma unroll for (int j = 0; j < 2 * HALF_SAMPLE + 1; j++) a += sqrt(pow(siSampled[3 * j] - sjSampled[3 * j] - (pt[0] - sjSampled[15]), 2) + pow(siSampled[3 * j + 1] - sjSampled[3 * j + 1] - (pt[1] - sjSampled[16]), 2) + pow(siSampled[3 * j + 2] - sjSampled[3 * j + 2] - (pt[2] - sjSampled[17]), 2)); #pragma endregion #pragma region Heap //heap op //*(int*)&val[1]&CROP_LAST6; int j = 0, t; if (ptr > HEAP_SIZE)//offset { if (val[leaf[1]] <= dss) { index += buckets[index + 1] + 2; continue; } j = leaf[1]; t = j >> 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; while (j > 1 && d_tmp > dss) { val[j] = d_tmp; heap[j] = i_tmp; variation[j] = v_tmp; j = t; t >>= 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; } val[j] = dss; heap[j] = (buckets[index]);// << 18) + idx; variation[j] = 100 * a; //leaf-heap operation i_tmp2 = leaf[1]; j = 2; i_tmp = val[leaf[2]] > val[leaf[3]] ? leaf[2] : leaf[++j]; while (val[i_tmp] > dss) { leaf[j >> 1] = i_tmp; if ((j <<= 1) >= LEAF_OFFSET) break; i_tmp = val[leaf[j]] > val[leaf[j + 1]] ? leaf[j] : leaf[++j]; } leaf[j >> 1] = i_tmp2; //end leaf-heap op } else { j = ptr++; t = j >> 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; while (j > 1 && d_tmp > dss) { heap[j] = i_tmp; val[j] = d_tmp; variation[j] = v_tmp; j = t; t >>= 1; d_tmp = val[t]; i_tmp = heap[t]; v_tmp = variation[t]; } val[j] = dss; heap[j] = (buckets[index]);// << 18) + idx;//seg variation[j] = 100 * a; //leaf_op if (ptr > LEAF_OFFSET) { j = lfptr++; dss = val[ptr - 1]; i_tmp = leaf[j >> 1]; while (j > 1 && val[i_tmp] < dss) { leaf[j] = i_tmp; j >>= 1; i_tmp = leaf[j >> 1]; } leaf[j] = ptr - 1; } //end leaf_op } #pragma endregion //return heap-as-{size}-nearst-pts; index += buckets[index + 1] + 2; if (size > STACK_SPACE) return; } finalize: if (!dir) dither++; dir = !dir; } } if (ptr <= HEAP_SIZE) val[ptr] = -1; } union reused_t { float fp; uint32_t uint; }; #define THREADS_PER_BLOCK 128 #define LSH_SEARCH_BUFFER 128 /*__test__:2 */ #define MODULO_8_CROPPER 0x7 #define LSB_EXTRACTOR 0x1 /*extract lsb from binary number by a logic and*/ #define UCHAR_MAX 0xff //Macro functions #define pt(i) shared[blockDim.x *(i) + threadIdx.x] #define stack(i) i_shared[blockDim.x *((i) + 3) + threadIdx.x] #define check(i) ((searched[n * ((i)>>3) + ptidx])>>((i)&MODULO_8_CROPPER))&LSB_EXTRACTOR #define mark(i) (searched[n * ((i)>>3) + ptidx]) &= (unsigned char)(UCHAR_MAX - (1<<((i)&MODULO_8_CROPPER))) #define availibility_check(i) (ptavail[((i)>>3)]>>((i)&MODULO_8_CROPPER))&LSB_EXTRACTOR/*12 I32Add-equivalents for bit calculation*/ #define ispt(i) (i)<0 #define linearKD(i) __uint_as_float(linearKD[(i)]) __global__ void LSH_determining_bucket( //unsigned char* searched, int *streamlineoffsets, uchar4 *bucketsforpt, float *ptinfo, int *output, int n ) { int ptidx = blockIdx.x*blockDim.x + threadIdx.x; unsigned char tmp[8]; //max of 256 buckets; uchar4 *parts = reinterpret_cast<uchar4 *> (tmp); float pt[3]; if (ptidx < n) { #pragma unroll for (int i = 0; i < 3; i++) pt[i] = ptinfo[n * i + ptidx]; pt[0] = fabs(pt[0]); float d_tmp;//float int i_tmp = 0; #pragma unroll for (int i = 0; i < 8; i++, i_tmp += 6) { d_tmp = pt[0] * d_hash[i_tmp]; d_tmp += pt[1] * d_hash[i_tmp + 1]; d_tmp += pt[2] * d_hash[i_tmp + 2]; d_tmp += d_hash[i_tmp + 3]; d_tmp -= d_hash[i_tmp + 4]; d_tmp /= d_hash[i_tmp + 5]; d_tmp = d_tmp < 100 ? d_tmp : 100; tmp[i] =(unsigned char) d_tmp > 0 ? __float2uint_rn(d_tmp) : 0; //printf("%d %d %f \n", __float2uint_rn(d_tmp), (unsigned int)(d_tmp), d_tmp); } bucketsforpt[ptidx] = parts[0]; bucketsforpt[n + ptidx] = parts[1]; #pragma unroll for (int i = 0; i < LSH_SEARCH_BUFFER; i++) output[n*i + ptidx] = INT32_MIN; } }//preproc; __global__ void LSHSearch( //`output` need to be zeroed before first use; unsigned char *searched, unsigned char *ptavail, int *buckets, float *segments, float *ptinfo, uchar4 *bucketsforpt, int *output, int n, int slotsize ) { int ptidx = blockIdx.x*blockDim.x + threadIdx.x; if (ptidx < n) { __shared__ float shared[THREADS_PER_BLOCK * 3]; unsigned char tmp[8]; //max of 256 buckets; #pragma unroll for (int i = 0; i < 3; i++) pt(i) = ptinfo[ptidx + i * n]; pt(0) = fabs(pt(0)); if (ptidx < n && availibility_check(ptidx)) { int size = 0; float x, y, z, min, a, v_tmp; int ptr = 1, lfptr = 1; //int lob = 0, rob = 0, toi; int index, end, idx, i_tmp2, i_tmp; float d_tmp; reinterpret_cast<uchar4 *> (tmp)[0] = bucketsforpt[ptidx]; reinterpret_cast<uchar4 *> (tmp)[1] = bucketsforpt[ptidx + n]; int dither = 0; bool dir = true; int size1 = -1; int size2 = -2; while (size < LSH_SEARCH_BUFFER && dither <= BUCKET_NUM) { //#pragma unroll for (int i = 0; i < 8; i++) //infinite loop when size > remaining segmemts; { index = tmp[i] + (dir ? dither : -dither); if (index < 0) { continue; } else if (index >= BUCKET_NUM) { continue; }//dithering index += 100 * i; end = buckets[index + 1]; index = buckets[index]; if (index < 0) continue;// blank bucket - attention needed on linearization while (index < end) { if (buckets[index] < 0 || !(check(buckets[index]))) { index += buckets[index + 1] + 2; continue; } mark(buckets[index]); idx = -1; if (buckets[index + 1] > 1) { min = INT_MAX; i_tmp = index + buckets[index + 1] + 2; for (int j = index + 2; j < i_tmp; j++) { i_tmp2 = buckets[j] * 7; x = pt(0) - segments[i_tmp2]; y = pt(1) - segments[i_tmp2 + 1]; z = pt(2) - segments[i_tmp2 + 2]; d_tmp = 0; d_tmp += x*segments[i_tmp2 + 3]; d_tmp = 0; d_tmp += y*segments[i_tmp2 + 4]; d_tmp = 0; d_tmp += z*segments[i_tmp2 + 5]; d_tmp /= segments[i_tmp2 + 6];//padding if (d_tmp <= 0) d_tmp = x*x + y*y + z*z; else if (d_tmp >= 1) d_tmp = pow(x + segments[i_tmp2], 2) + pow(y + segments[i_tmp2 + 1], 2) + pow(z + segments[i_tmp2 + 2], 2);//d_tmp = segments[j * 8 + 7]; else d_tmp = pow(x + d_tmp*segments[i_tmp2], 2) + pow(y + d_tmp*segments[i_tmp2 + 1], 2) + pow(z + d_tmp*segments[i_tmp2 + 2], 2); if (d_tmp < min) { min = d_tmp; idx = buckets[j]; } } } else//blank bucket? idx = buckets[index + 2]; while ( ispt(output[size*n + ptidx]) && output[size*n + ptidx] > INT32_MIN && availibility_check(-output[size*n + ptidx]) ) { size++; } if (idx == -1) { index += buckets[index + 1] + 2; continue; } output[size++ * n + ptidx] = (idx << 13) + buckets[index];//H19 seg L13 line //index += index += buckets[index + 1] + 2; if (size >= LSH_SEARCH_BUFFER) return; } } finalize: if (dir) dither++; dir = !dir; } if (size < LSH_SEARCH_BUFFER) output[size *n + ptidx] = INT32_MIN; } } } __global__ void KDSearch( int *lkd, int*id, int *outputs, unsigned short *variation, float *lineinfo, unsigned char* ptavail, //ignore negative values in lineinfo.x(s) int n ) { int kernelIdx = threadIdx.x + blockIdx.x * blockDim.x; if (kernelIdx < n && availibility_check(kernelIdx)) { __shared__ float shared[16 * THREADS_PER_BLOCK]; int *i_shared = (int *)shared; for (int i = 0; i < 3; i++) pt(i) = lineinfo[kernelIdx + i * n]; pt(0) = fabs(pt(0)); //pt(3) = pt(1) * pt(1) + pt(0) * pt(0) + pt(2) * pt(2); bool finished = false; for (int i = 0; i < LSH_SEARCH_BUFFER; i++) { float dss = INT_MAX; int ptidx = -1; int opti = outputs[n * i + kernelIdx]; if (ispt(opti) || finished)//point or segment? { return; if (opti <= INT32_MIN) continue; else opti = -opti; if (availibility_check(opti)) { dss = sqrt(pow(pt(0) - fabs(lineinfo[opti]), 2) + pow(pt(1) - lineinfo[opti + n], 2) + pow(pt(2) - lineinfo[opti + 2 * n], 2)); } else { finished = true; continue; } } else { int *linearKD = lkd + id[(opti >> 13)];//kd-search for segment idx int next = 0, dim = 1, stackidx = 0; int rt = 3; float currdata, ds; while (next != -1) {//using mask to reduce memory usage currdata = pt((dim - 1)); ds = pow(pt(0) - linearKD(next + 1), 2) + pow(pt(1) - linearKD(next + 2), 2) + pow(pt(2) - linearKD(next + 3), 2); if (ds < dss) { dss = ds; ptidx = linearKD[next]; } if (linearKD(next + dim) < currdata) { if (linearKD[next + 4] != -1 && linearKD[next + 11] != -1) { stack(stackidx) = (linearKD[next + 11] << 2) + (linearKD[linearKD[next + 11] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[next + 11] + 11] == -1 ? LEFT_MASK : BOTH_MASK);//lv 2 opt stackidx++; } next = linearKD[next + 4]; } else { if (linearKD[next + 4] != -1) { stack(stackidx) = (linearKD[next + 4] << 2) + (linearKD[linearKD[next + 4] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[next + 4] + 11] == -1 ? LEFT_MASK : BOTH_MASK); stackidx++; next = linearKD[next + 11]; } else break; } dim = (dim++ - 3) ? dim : dim - 3; } /*better implementation: Half precision for boxing. (16 bytes aligning, 1~2 g.mem fetches) 0 1 2 3 4 5 5 6 6 7 7 8 9 9 10 10 11 11 id x y z (l lmx lmy lmz lMx lMy lMz (r rmx rmy rmz rMx rMy rMz)) */ int r;// backtrace ctn: while (stackidx > 0) { stackidx--; rt = stack(stackidx); r = rt&NEITHER_MASK; rt >>= 2; ds = pow(pt(0) - linearKD[rt + 1], 2) + pow(pt(1) - linearKD[rt + 2], 2) + pow(pt(2) - linearKD[rt + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: rt = rt + 11; break; case 1: rt = rt + 4; break; case 3:continue; default: rt = rt + 4; break; } /*r = linearKD[rt] & 0x3;//rt&NEITHER_MASK; //rt >>= 2; ds = pow(pt(0) - linearKD(rt + 1), 2) + pow(pt(1) - linearKD(rt + 2), 2) + pow(pt(2) - linearKD(rt + 3), 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: continue;// rt = rt + 11; break; case 1: rt = rt + 4; break; case 3: rt = rt + 4; break;// continue; default: printf("error!"); continue; rt = rt + 4; break; }*/ if (pt(0) < linearKD(rt + 1)) ds += pow(pt(0) - linearKD(rt + 1), 2); else if (pt(0) > linearKD(rt + 4)) ds += pow(pt(0) - linearKD(rt + 4), 2); if (pt(1) < linearKD(rt + 2)) ds += pow(pt(1) - linearKD(rt + 2), 2); else if (pt(1) > linearKD(rt + 5)) ds += pow(pt(1) - linearKD(rt + 5), 2); if (pt(2) < linearKD(rt + 3)) ds += pow(pt(2) - linearKD(rt + 3), 2); else if (pt(2) > linearKD(rt + 6)) ds += pow(pt(2) - linearKD(rt + 6), 2); if (ds < dss&&linearKD[rt]>0) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx) += linearKD[linearKD[rt] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] == -1 ? LEFT_MASK : BOTH_MASK;//BOTH_MASK; stackidx++; } if (r == 3) { rt = rt + 7; ds = 0; if (pt(0) < linearKD(rt + 1)) ds += pow(pt(0) - linearKD(rt + 1), 2); else if (pt(0) > linearKD(rt + 4)) ds += pow(pt(0) - linearKD(rt + 4), 2); if (pt(1) < linearKD(rt + 2)) ds += pow(pt(1) - linearKD(rt + 2), 2); else if (pt(1) > linearKD(rt + 5)) ds += pow(pt(1) - linearKD(rt + 5), 2); if (pt(2) < linearKD(rt + 3)) ds += pow(pt(2) - linearKD(rt + 3), 2); else if (pt(2) > linearKD(rt + 6)) ds += pow(pt(2) - linearKD(rt + 6), 2); if (ds < dss) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx) += linearKD[linearKD[rt] + 4] == -1 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] == -1 ? LEFT_MASK : BOTH_MASK; stackidx++; } } } outputs[n * i + kernelIdx] = -(ptidx);//>>2 variation[kernelIdx * LSH_SEARCH_BUFFER + i] = opti & 0x1fff;//get L13, line } #pragma endregion } } } //1.8.23->1.5.10/6.10 __device__ float& hf2float(const short& hf) { int sf = ((0x8000 & hf) << 16) + ((0x7c00 & hf) << 13) + ((0x03ff & hf) << 13); return *(float*)&sf; } __device__ float& uhf2float(const short& uhf) { int sf = ((0xfc00 & uhf) << 13) + ((0x03ff & uhf) << 13); return *(float*)&sf; } union _4bit{ float f; int i; }; __global__ void VectorizedHashing( int *linearsegs, short* offsets, int *outputs, unsigned short *variation, float *linfo, /* linfo optimized for random access */ unsigned char* ptavail, //ignore negative values in lineinfo.x(s) int n ) { int kernelIdx = threadIdx.x + blockIdx.x * blockDim.x; if (kernelIdx < n && availibility_check(kernelIdx)) { __shared__ float shared[16 * THREADS_PER_BLOCK]; int *i_shared = (int *)shared; for (int i = 0; i < 3; i++) pt(i) = linfo[kernelIdx * 3 + i]; pt(0) = fabs(pt(0)); //pt(3) = pt(1) * pt(1) + pt(0) * pt(0) + pt(2) * pt(2); bool finished = false; for (int i = 0; i < LSH_SEARCH_BUFFER; i++) { float dss = INT_MAX; int ptidx = -1; int opti = outputs[n * i + kernelIdx]; if (ispt(opti) || finished)//point or segment? { return; if (opti <= INT32_MIN) continue; else opti = -opti; if (availibility_check(opti)) { dss = sqrt(pow(pt(0) - fabs(linfo[opti *3]), 2) + pow(pt(1) - linfo[opti * 3 + 1], 2) + pow(pt(2) - linfo[opti *3+ 2], 2)); } else { finished = true; continue; } } else { int *vecs = linearsegs + ((opti >> 13)*10); //(opti >> 13 <<3) float *fvecs = (float *)vecs; int base = vecs[6]; float length = 0; int bucket; /* Record format: * 0 1 2 3 4 5 6 7 * base vec - - fp step len offset * Pipeline: * bucket = ((pt - [base])*vec - fp)/step */ #pragma unroll for (int i = 0; i < 3; i++) length += (pt(i) - fvecs[i]) * fvecs[i + 3]; if (length < fvecs[7]) ptidx = base; else { short* this_offsets = offsets + vecs[9]; unsigned short n_bucket = *(unsigned short*)this_offsets; int bucket = (length - fvecs[7]) / fvecs[8]; if (bucket >= n_bucket) ptidx = base + this_offsets[this_offsets[n_bucket] + n_bucket]; else { const int bias = ((float)(this_offsets[bucket + 1] - (bucket == 0 ? 0 : this_offsets[bucket]))) *(length - bucket * fvecs[8]) / fvecs[8]; ptidx = base + this_offsets[(bucket == 0?0:this_offsets[bucket]) + n_bucket + 1 + bias]; } } outputs[n * i + kernelIdx] = -(ptidx);//>>2 variation[kernelIdx * LSH_SEARCH_BUFFER + i] = opti & 0x1fff;//get L13, line } #pragma endregion } } } __global__ void CoupledHeapsFiltration( float *lineinfo, unsigned char *ptavail, int *heap_, float *val_, float *variation_, int *outputs, int n ) {//max lsh_search_buffer = 255 int kernelIdx = threadIdx.x + blockDim.x * blockIdx.x; if (kernelIdx < n && availibility_check(kernelIdx)) { __shared__ unsigned char shared[32 * THREADS_PER_BLOCK];// 32 bytes per thread 2K bytes/block int *heap = heap_ + kernelIdx * HEAP_SIZE - 1; float *val = val_ + kernelIdx * HEAP_SIZE - 1; float *variation = variation_ + kernelIdx * HEAP_SIZE - 1; unsigned short *lines = reinterpret_cast<unsigned short*>(variation_); unsigned char *leaf = shared + threadIdx.x * 32 - 1; int ptr = 1, j, t, i_tmp, i_tmp2, lfptr = 1; float pt[3]; float d_tmp; #pragma unroll for (int i = 0; i < 3; i++) { pt[i] = lineinfo[kernelIdx + i * n]; } pt[0] = fabs(pt[0]); //#pragma unroll for (int i = 0; i < LSH_SEARCH_BUFFER; i++) { int currpt = outputs[i * n + kernelIdx]; if (currpt < 0) { if (currpt <= INT32_MIN) continue; float dss = 0; currpt = -currpt; #pragma unroll for (int k = 0; k < 3; k++) dss += pow(pt[k] - fabs(lineinfo[currpt + k * n]), 2); if (kernelIdx == currpt) continue; dss = sqrt(dss); if (ptr > HEAP_SIZE)//offset { if (val[leaf[1]] <= dss) continue; j = leaf[1]; t = j >> 1; while (j > 1) { d_tmp = val[t]; i_tmp = heap[t]; if (d_tmp <= dss) break; val[j] = d_tmp; heap[j] = i_tmp; j = t; t >>= 1; } val[j] = dss; heap[j] = (((unsigned)lines[kernelIdx * LSH_SEARCH_BUFFER + i]) << 8) + i;// << 18) + idx; //leaf-heap operation i_tmp2 = leaf[1]; j = 2; i_tmp = val[leaf[2]] > val[leaf[3]] ? leaf[2] : leaf[++j]; while (val[i_tmp] > dss) { leaf[j >> 1] = i_tmp; if ((j <<= 1) >= LEAF_OFFSET) break; i_tmp = val[leaf[j]] > val[leaf[j + 1]] ? leaf[j] : leaf[++j]; } leaf[j >> 1] = i_tmp2; //end leaf-heap op } else { j = ptr++; t = j >> 1; while (j > 1) { d_tmp = val[t]; i_tmp = heap[t]; if (d_tmp <= dss) break; heap[j] = i_tmp; val[j] = d_tmp; j = t; t >>= 1; } val[j] = dss; heap[j] = (((unsigned)(lines[kernelIdx * LSH_SEARCH_BUFFER + i])) << 8) + i;// << 18) + idx;//seg //leaf_op if (ptr > LEAF_OFFSET + 1) { j = lfptr++; dss = val[ptr - 1]; //i_tmp = j>1?leaf[j >> 1]:0; while (j > 1) { i_tmp = leaf[j >> 1]; if (val[i_tmp] >= dss) break; leaf[j] = i_tmp; j >>= 1; } //if (j <= 32) leaf[j] = ptr - 1; } //end leaf_op } } } if (ptr <= HEAP_SIZE&&ptr >= 1) val[ptr] = heap[ptr] = -1; ptr--; #pragma region variation //bool signi = pt[0] > 0, signj; int starti = 0, endi = 0; while (starti < HALF_SAMPLE + 1 && (kernelIdx - starti > 0) && lineinfo[kernelIdx - starti] > 0) starti++; while (endi < HALF_SAMPLE + 1 && kernelIdx + endi<n&& lineinfo[kernelIdx + endi] > 0) endi++; //#pragma unroll for (int i = 1; i <= ptr; i++) { int currptj = -outputs[n*(heap[i] & 0xff) + kernelIdx]; heap[i] >>= 8;//corrected float current_variation = 0; if (currptj < 0) break; int startj = 1, endj = 1; while (startj < starti &&currptj - startj >0 && lineinfo[currptj - startj] > 0) { float di = 0; di += pow(fabs(lineinfo[kernelIdx - startj]) - fabs(lineinfo[currptj - startj]) - (pt[0] - fabs(lineinfo[currptj])), 2); #pragma unroll for (int i = 1; i < 3; i++) di += pow(lineinfo[kernelIdx - startj + i *n] - lineinfo[currptj - startj + i * n] - (pt[i] - lineinfo[currptj + i * n]), 2); current_variation += sqrt(di);//pow(di, 2); startj++; } //endj = 1; while (endj < endi && currptj + endj<n&& lineinfo[currptj + endj]>0) { float di = 0; di += pow(fabs(lineinfo[kernelIdx + endj]) - fabs(lineinfo[currptj + endj]) - (pt[0] - fabs(lineinfo[currptj])), 2); #pragma unroll for (int i = 1; i < 3; i++) di += pow(lineinfo[kernelIdx + endj + i *n] - lineinfo[currptj + endj + i * n] - (pt[i] - lineinfo[currptj + i * n]), 2); //di = sqrt(di); //current_variation += sqrt(di);// pow(di - dist, 2); endj++; } /*if (endj + startj) current_variation /= (float)(endj + startj); else current_variation = 1; */ //current_variation += .1 * (2 * HALF_SAMPLE - (endj + startj) + 2) ;//__test__:1 短线补偿策略 if(!(endj+startj)) current_variation /= (float)(endj + startj); variation[i] = current_variation;// -val[i];// *(val[i] + 100); } #pragma endregion } } __global__ void UpdateLsh(int *buckets, int target) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < 800) { int end = buckets[i + 1] - buckets[i]; buckets += buckets[i]; for (int j = 0; j < end;) { if (buckets[j] == target) buckets[j] = -buckets[j]; j += buckets[j + 1] + 2; } } } __global__ void RollbackLsh(int *buckets) { int i = threadIdx.x + blockIdx.x*blockDim.x; if (i < 800) { int end = buckets[i + 1]; buckets += buckets[i]; for (int j = 0; j < end; j++) if (buckets[j]<0) buckets[j] = -buckets[j]; } } __global__ void Avg(float *similarity, int n, float *avg_back) { int i = threadIdx.x + blockDim.x * blockIdx.x;//(2,64),256 double sum = 0; __shared__ double reduced[32]; int wid = threadIdx.y >> 5; int lane = threadIdx.y - (wid << 5); for (; i < n; i += blockDim.x * gridDim.x) { __syncthreads(); sum = similarity[HEAP_SIZE * i + threadIdx.y]; for (int j = 16; j >= 1; j >>= 1) { sum += __shfl_down(sum, j); } if (lane == 0) { reduced[wid + threadIdx.x * 2] = sum; } __syncthreads(); if (threadIdx.y == 0) { atomicAdd(avg_back, (reduced[threadIdx.x * 2] + reduced[threadIdx.x * 2 + 1])); } } } __global__ void saliency_1(float *similarity, float *distance, float *output, int n, float c = 3.f) { int i = threadIdx.x + blockDim.x * blockIdx.x;//(2,64),256 float sum = 0; __shared__ float reduced[2]; int wid = threadIdx.y >> 5; int lane = threadIdx.y - (wid << 5); sum = 0; float sum_divisor = 0; for (; i < n; i += blockDim.x * gridDim.x) { sum = similarity[HEAP_SIZE*i+threadIdx.x]/((1.f+.5*distance[HEAP_SIZE*i + threadIdx.x])); for (int j = 16; j >= 1; j >>= 1) { sum += __shfl_down(sum, j); } if (lane == 0 && wid) { reduced[threadIdx.x * 2] = sum; } __syncthreads(); if (threadIdx.y == 0) { sum += reduced[threadIdx.x * 2]; if (isnan(sum) || isinf(sum)) sum = 0; output[i] = 1 - exp(-sum / 64.); } } sum = 0; } #define _SUM_INV_X_2 1.62918636078388701094 __global__ void AlphaCalc(float *similarity, float *distance, float *output, int n, float avg, float alpha, float _min) { int i = threadIdx.x + blockDim.x * blockIdx.x;//(2,64),256 float sum = 0; float range = 1 - _min; __shared__ float reduced[4]; __shared__ float reduced_divisor[4]; int wid = threadIdx.y >> 5; int lane = threadIdx.y - (wid << 5); sum = 0; float sum_divisor = 0; for (; i < n; i += blockDim.x * gridDim.x) { if (avg != 0) { if (distance[HEAP_SIZE*i + threadIdx.y] == 0) sum_divisor = 0; else sum_divisor = 1 / (distance[HEAP_SIZE*i + threadIdx.y] *distance[HEAP_SIZE*i + threadIdx.y]); sum = 1 - avg*alpha*(pow(2.718281828f, -pow(similarity[HEAP_SIZE * i + threadIdx.y], 2.f) / 2.f) - _min) / range; //sum = sum > 0 ? sum : 0; sum *= sum_divisor; for (int j = 16; j >= 1; j >>= 1) { sum += __shfl_down(sum, j); sum_divisor += __shfl_down(sum_divisor, j); } if (lane == 0) { reduced[wid + threadIdx.x * 2] = sum; reduced_divisor[wid + threadIdx.x * 2] = sum_divisor; } __syncthreads(); if (threadIdx.y == 0) { sum_divisor = reduced_divisor[threadIdx.x * 2] + reduced_divisor[threadIdx.x * 2 + 1]; output[i] = (reduced[threadIdx.x * 2] + reduced[threadIdx.x * 2 + 1]) / sum_divisor; } } } sum = 0; } __global__ void cuMax(float *similarity, int n, unsigned int *max) { int i = blockIdx.x*blockDim.x + threadIdx.x; //natural padding with width of 64 float _max = 0; __shared__ float shared[32]; int warp = threadIdx.x << 5; int lane = threadIdx.x - warp >> 5; for (; i < n * 64; i += blockDim.x*gridDim.x) { _max = similarity[i]; for (int offset = 16; offset >= 1; offset >>= 1) { float tmp = __shfl_down(_max, offset); _max = tmp > _max ? tmp : _max; } if (lane == 0) { shared[warp] = _max; } __syncthreads(); if (warp == 0) { _max = shared[lane]; for (int offset = 16; offset >= 1; offset >>= 1) { float tmp = __shfl_down(_max, offset); _max = tmp > _max ? tmp : _max; } if (threadIdx.x == 0) { atomicMax(max, __float_as_uint(_max)); } } } } void cavg(float *similarity, int n, float *avg_back, float *max = 0) { Avg << < 256, dim3(2, 64) >> >(similarity, n, avg_back); if (max) { cuMax << <32, 1024 >> > (similarity, n, (unsigned *)max); } } __global__ void simple_simlarity(float *output, float * variation, float * distances, int N) { int i = threadIdx.x + blockDim.x * blockIdx.x; for (; i < N; i += blockDim.x * gridDim.x) { output[i] = variation[i*HEAP_SIZE]; } } void cuda(float *similarity, float *distance, float *output, int n, float avg = -1, float alpha = -1, float min = -1) { //saliency_1 << <256, dim3(2, 64) >> >(similarity, distance, output, n, alpha); //AlphaCalc << <256, dim3(2, 64) >> >(similarity, distance, output, n, avg, alpha, min); simple_simlarity << <256, 256 >> > (output, similarity, distance, n); } __device__ int pos; //double buffering 2-way __global__ void deletion(float *val, float *variation, int *heap, unsigned char *avail, int *idx, int *p2seg, int n, int p, unsigned char* next_avail, int *next_idx) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >= n) return; int pt = idx[i];//n of pt; if (p2seg[pt] == p) return; unsigned char availible = avail[i]; val += pt * HEAP_SIZE; variation += pt*HEAP_SIZE; heap += pt*HEAP_SIZE; for (int i = 0; i <= availible; i++) { if (p2seg[heap[i]] == p) { while (p2seg[heap[--availible]] == p&&availible > 0); int k; if (heap[i] > heap[availible]) { k = i; int j = i << 1; while (heap[j] < heap[availible] && j<availible) { heap[j] = heap[k];//2b verified val[j] = val[k]; variation[j] = variation[k]; k = j; j <<= 1; } //up } else { k = i; int j = i >> 1; while (heap[j] > heap[availible] && k>0) { heap[j] = heap[k];//2b verified val[j] = val[k]; variation[j] = variation[k]; k = j; j >>= 1; } //down } variation[k] = variation[availible]; val[k] = val[availible]; heap[k] = heap[availible]; } } if (availible < 0) return; int next = atomicAdd(&pos, 1); next_idx[next] = pt; next_avail[next] = availible + 1; } __global__ void local_sim(float *sel_pts, int *othersegs, float *out, int *lkd, float *vari_data, int prob_n, int n) { int thread = threadIdx.x + blockIdx.x * blockDim.x; if (thread < prob_n) { static __shared__ float shared[16 * THREADS_PER_BLOCK]; int *i_shared = (int *)shared; //float *stack = shared + threadIdx.x * 16; // float *pt = shared + threadIdx.x * 16; // float *stack = pt + 4; for (int i = 0; i < 3; i++) pt(i) = sel_pts[thread * 3 + i]; /*ushort3 upts; upts.x = (__float_as_uint(pt(0))>>12); */ pt(3) = pt(1) * pt(1) + pt(0) * pt(0) + pt(2) * pt(2); for (int i = 0; i < n; i++) { int *linearKD = lkd + othersegs[i]; //kd-search for segment idx int next = 0; int dim = 1, stackidx = 0; float currdata; float dss = INT_MAX; float ds; int ptidx = -1; while (next != -1) {//using mask to reduce memory usage currdata = pt(dim); ds = pow(pt(0) - linearKD[next + 1], 2) + pow(pt(1) - linearKD[next + 2], 2) + pow(pt(2) - linearKD[next + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[next]; } if (linearKD[next + dim] < currdata) { if (linearKD[next + 4] != -1 && linearKD[next + 11] != -1) stack(stackidx++) = (linearKD[next + 11] << 2) + (linearKD[linearKD[next + 11] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 11] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 4]; } else { if (linearKD[next + 4] != -1) { stack(stackidx++) = (linearKD[next + 4] << 2) + (linearKD[linearKD[next + 4] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[next + 4] + 11] < 0 ? LEFT_MASK : BOTH_MASK); next = linearKD[next + 11]; } else break; } dim = (dim++ - 3) ? dim : dim - 3; } /*faster implementation: 1: no pruning; 2: calc the minimum dist while getting into the point; */ //backtrace; int r; int rt; ctn: while (stackidx > 0) { rt = stack(--stackidx); r = rt&NEITHER_MASK; rt >>= 2; ds = pow(pt(0) - linearKD[rt + 1], 2) + pow(pt(1) - linearKD[rt + 2], 2) + pow(pt(2) - linearKD[rt + 3], 2); if (ds < dss) { dss = ds; ptidx = linearKD[rt]; } ds = 0; switch (r) { case 0: rt = rt + 11; break; case 1: rt = rt + 4; break; case 3:continue; default: rt = rt + 4; break; } if (pt(0) < linearKD[rt + 1]) ds += pow(pt(0) - linearKD[rt + 1], 2); else if (pt(0) > linearKD[rt + 4]) ds += pow(pt(0) - linearKD[rt + 4], 2); if (pt(1) < linearKD[rt + 2]) ds += pow(pt(1) - linearKD[rt + 2], 2); else if (pt(1) > linearKD[rt + 5]) ds += pow(pt(1) - linearKD[rt + 5], 2); if (pt(2) < linearKD[rt + 3]) ds += pow(pt(2) - linearKD[rt + 3], 2); else if (pt(2) > linearKD[rt + 6]) ds += pow(pt(2) - linearKD[rt + 6], 2); if (ds < dss&&linearKD[rt]>0) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx++) += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK;//BOTH_MASK; } if (r == 2) { rt = rt + 7; ds = 0; if (pt(0) < linearKD[rt + 1]) ds += pow(pt(0) - linearKD[rt + 1], 2); else if (pt(0) > linearKD[rt + 4]) ds += pow(pt(0) - linearKD[rt + 4], 2); if (pt(1) < linearKD[rt + 2]) ds += pow(pt(1) - linearKD[rt + 2], 2); else if (pt(1) > linearKD[rt + 5]) ds += pow(pt(1) - linearKD[rt + 5], 2); if (pt(2) < linearKD[rt + 3]) ds += pow(pt(2) - linearKD[rt + 3], 2); else if (pt(2) > linearKD[rt + 6]) ds += pow(pt(2) - linearKD[rt + 6], 2); if (ds < dss) { stack(stackidx) = linearKD[rt] << 2; stack(stackidx++) += linearKD[linearKD[rt] + 4] < 0 ? NEITHER_MASK : linearKD[linearKD[rt] + 11] < 0 ? LEFT_MASK : BOTH_MASK; } } } //dss = sqrt(dss); #pragma endregion #pragma region AdditionalCalc if (ptidx != -1) { dss += vari_data[ptidx * 4] - pt(0) * vari_data[ptidx * 4 + 1] - pt(1) * vari_data[ptidx * 4 + 2] - pt(2) * vari_data[ptidx * 4 + 3]; atomicAdd(/*(unsigned int *)*/out + i, /*__float_as_uint(*/dss/*)*/); } #pragma endregion //int t = buckets[index]; } } } bool SimTester::isSimilarWithSelf(std::deque<vec3> &si, int siIdx) { float lineLength = g_param.w; int nHalfSample = g_param.nHalfSample; vector<vec3> siSampled; int siPos; if (!sampleLine(si, siIdx, lineLength, nHalfSample, siSampled, siPos)) return false; vec3 p = siSampled[nHalfSample]; int lowID, highID; findIdxRange(si, p, g_param.dMin, lowID, highID); deque<vec3>& sj = si; vec3 q; int sjIdx = -1; float min_dist = FLT_MAX; for (int j = 0; j< sj.size(); j++) { if (j >= lowID && j <= highID) continue; float l = length(p - sj[j]); if (l < min_dist) { q = sj[j]; min_dist = l; sjIdx = j; } } if (min_dist >= g_param.dSelfsep || sjIdx == -1) return false; // sample line vector<vec3> sjSampled; int sjPos; if (!sampleLine(sj, sjIdx, lineLength, nHalfSample, sjSampled, sjPos)) return false; // enough points to compare float term1 = (siSampled[nHalfSample] - sjSampled[nHalfSample]).length();//min_dist; float term2 = 0.0f; for (int i = 0; i < siSampled.size(); ++i) { float a = length(siSampled[i] - sjSampled[i]); term2 += abs(a - term1); } float alpha = 5; term2 = alpha * term2 / siSampled.size(); if ((term1 + term2) < g_param.dSelfsep) return true; return false; } bool SimTester::self_line_similarty(std::vector<vec3> &si_tmp, int id) { vec3 p = si_tmp[id]; vec3 q = si_tmp[0]; int compare_id = -1; float min_dist = 100000;; for (int j = 0; j<si_tmp.size() - g_param.w / 2.0f; j++) if (min_dist > length(p - si_tmp[j]) && length(p - si_tmp[j]) > g_param.dMin) { min_dist = length(p - si_tmp[j]); q = si_tmp[j]; compare_id = j; } if (compare_id == -1) return false; if (compare_id < g_param.w / 2 || compare_id > si_tmp.size() - g_param.w / 2) return false; std::vector<vec3> si; std::vector<vec3> sj; for (int i = id - g_param.w / 2.0f; i<id + g_param.w / 2.0f; i++) si.push_back(si_tmp[i]); for (int i = compare_id - g_param.w / 2.0f; i<compare_id + g_param.w / 2.0f; i++) sj.push_back(si_tmp[i]); float term1 = length(p - q); float term2 = 0.0f; float a; for (int k = 0; k<si.size(); k++) { a = length(si[k] - sj[k]); term2 += abs(a - term1); } term2 = g_param.alpha * term2 / si.size(); if ((term1 + term2) > g_param.dSelfsep) return true; return false; } bool SimTester::sampleLine(const std::deque<vec3>& line, int idx, float lineLength, int nHalfSample, vector<vec3>& result, int& idxPos) { if (idx<0 || idx >= line.size()) return false; float segmentLength = lineLength / (nHalfSample * 2); vector<vec3> buffer[2]; float totLength[2] = { 0, 0 }; int idxDir[2] = { 1, -1 }; int idxBound[2] = { line.size() - 1, 0 }; for (int ithDir = 0; ithDir<2; ++ithDir) { buffer[ithDir].reserve(nHalfSample * 2 + 1); if (idx != idxBound[ithDir]) { int thisIdx = idx, nextIdx = idx + idxDir[ithDir]; vec3 curPnt = line[thisIdx]; vec3 curDir = line[nextIdx] - curPnt; float allocateLength = curDir.length(); curDir /= allocateLength; while (buffer[ithDir].size() < nHalfSample * 2 + 1) { if (totLength[ithDir] > allocateLength) { nextIdx += idxDir[ithDir]; thisIdx += idxDir[ithDir]; if (nextIdx >= line.size() || nextIdx < 0) break; vec3 delta = line[nextIdx] - line[thisIdx]; float deltaLength = delta.length(); float remainLength = totLength[ithDir] - allocateLength; allocateLength += deltaLength; curDir = delta / deltaLength; curPnt = line[thisIdx] + curDir * remainLength; } else { buffer[ithDir].push_back(curPnt); curPnt += curDir * segmentLength; totLength[ithDir] += segmentLength; } } totLength[ithDir] -= segmentLength; } else buffer[ithDir].push_back(line[idx]); } // line is too short if (buffer[0].size() + buffer[1].size() < nHalfSample * 2 + 2) return false; int nSample; int validData[2] = { nHalfSample, nHalfSample }; for (int i = 0; i < 2; ++i) { nSample = buffer[i].size() - 1; if (nSample < nHalfSample) { validData[i] = nSample; validData[1 - i] += nHalfSample - nSample; } } result.clear(); result.reserve(nHalfSample * 2 + 1); for (int i = validData[1]; i > 0; i--) result.push_back(buffer[1][i]); idxPos = result.size(); for (int i = 0; i <= validData[0]; i++) result.push_back(buffer[0][i]); return true; } bool SimTester::MysampleLine(const std::deque<vec3>& line, int idx, int nHalfSample, vector<vec3>& result) { if (idx < nHalfSample || line.size() - idx - 1 < nHalfSample) return false; result.resize(nHalfSample * 2 + 1); for (int i = 0; i < 2 * nHalfSample + 1; ++i) result[i] = line[i + idx - nHalfSample]; return true; } bool SimTester::findIdxRange(const std::deque<vec3>&line, const vec3& centerPnt, float radius, int& lowID, int& highID) { lowID = 0; highID = line.size(); int i; int centerID[2] = { 0, line.size() - 1 }; float initDist[2] = { 0, 0 }; float minDist = FLT_MAX; for (i = 0; i < line.size() - 1; ++i) { vec3 d1 = line[i + 1] - line[i]; vec3 d2 = (centerPnt - line[i]); float t = d2.dot(d1) / d1.dot(d1); t = min(1.0, max(0.0, t)); vec3 td1 = t * d1; float dist = (d2 - td1).length(); if (dist < minDist) { minDist = dist; centerID[0] = i; centerID[1] = i + 1; initDist[0] = td1.length(); initDist[1] = d1.length() - initDist[0]; } } for (i = centerID[0] - 1; i > 0; --i) { initDist[0] += (line[i] - line[i + 1]).length(); if (initDist[0] >= radius) { lowID = i; break; } } for (i = centerID[1] + 1; i < line.size(); ++i) { initDist[1] += (line[i] - line[i - 1]).length(); if (initDist[1] >= radius) { highID = i; break; } } return true; }
386196a1c779fce16ecf5da8a1da130fb1cc5425.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "gather_elements_impl.h" namespace onnxruntime { namespace cuda { namespace { constexpr int threads_per_block = GridDim::maxThreadsPerBlock; constexpr int thread_worksize = 16; } // namespace __host__ __device__ inline int64_t GetIndexValue(const void* index_data, size_t index_element_size, size_t offset) { switch (index_element_size) { case sizeof(int32_t): return *(reinterpret_cast<const int32_t*>(index_data) + offset); break; case sizeof(int64_t): return *(reinterpret_cast<const int64_t*>(index_data) + offset); break; default: break; } // What is a sensible thing to do here? assert(false); return std::numeric_limits<int64_t>::max(); } template <typename T> __global__ void _GatherElementsKernel( const int64_t rank, const T* input_data, const int64_t input_dim_along_axis, const TArray<int64_t> input_strides, const void* indices_data, const int64_t indices_size, const size_t index_element_size, const TArray<fast_divmod> indices_strides, const int64_t axis, T* output_data) { CUDA_LONG indices_index = threads_per_block * thread_worksize * blockIdx.x + threadIdx.x; #pragma unroll for (int work = 0; work < thread_worksize; ++work) { if (indices_index < indices_size) { int dim = 0; int remain = indices_index; int64_t data_idx = 0; int i = 0; for (; i < axis && remain > 0; ++i) { indices_strides[i].divmod(remain, dim, remain); data_idx += input_strides[i] * dim; } i = axis; indices_strides[i].divmod(remain, dim, remain); dim = GetIndexValue(indices_data, index_element_size, indices_index); if (dim < -input_dim_along_axis || dim >= input_dim_along_axis) { return; // Invalid index } if (dim < 0) { dim += input_dim_along_axis; } data_idx += input_strides[i] * dim; ++i; // past axis for (; i < rank && remain > 0; ++i) { indices_strides[i].divmod(remain, dim, remain); data_idx += input_strides[i] * dim; } output_data[indices_index] = input_data[data_idx]; indices_index += threads_per_block; } } } void GatherElementsImpl( hipStream_t stream, const int64_t rank, const void* input_data, const int64_t input_dim_along_axis, const TArray<int64_t>& input_strides, const void* indices_data, const int64_t indices_size, const TArray<fast_divmod>& indices_strides, const int64_t axis, void* output_data, size_t element_size, size_t index_element_size) { if (indices_size > 0) { dim3 block(threads_per_block); dim3 blocksPerGrid((static_cast<int>(indices_size + block.x * thread_worksize - 1) / (block.x * thread_worksize))); switch (element_size) { case sizeof(int8_t): { using CudaType = typename ToCudaType<int8_t>::MappedType; hipLaunchKernelGGL(( _GatherElementsKernel), dim3(blocksPerGrid), dim3(block), 0, stream, rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; case sizeof(int16_t): { using CudaType = typename ToCudaType<int16_t>::MappedType; hipLaunchKernelGGL(( _GatherElementsKernel), dim3(blocksPerGrid), dim3(block), 0, stream, rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; case sizeof(int32_t): { using CudaType = typename ToCudaType<int32_t>::MappedType; hipLaunchKernelGGL(( _GatherElementsKernel), dim3(blocksPerGrid), dim3(block), 0, stream, rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; case sizeof(int64_t): { using CudaType = typename ToCudaType<int64_t>::MappedType; hipLaunchKernelGGL(( _GatherElementsKernel), dim3(blocksPerGrid), dim3(block), 0, stream, rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; // should not reach here as we validate if the all relevant types are supported in the Compute method default: ORT_THROW("Unsupported element size by the GatherElements CUDA kernel"); } } } // namespace cuda } // namespace cuda } // namespace onnxruntime
386196a1c779fce16ecf5da8a1da130fb1cc5425.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "gather_elements_impl.h" namespace onnxruntime { namespace cuda { namespace { constexpr int threads_per_block = GridDim::maxThreadsPerBlock; constexpr int thread_worksize = 16; } // namespace __host__ __device__ inline int64_t GetIndexValue(const void* index_data, size_t index_element_size, size_t offset) { switch (index_element_size) { case sizeof(int32_t): return *(reinterpret_cast<const int32_t*>(index_data) + offset); break; case sizeof(int64_t): return *(reinterpret_cast<const int64_t*>(index_data) + offset); break; default: break; } // What is a sensible thing to do here? assert(false); return std::numeric_limits<int64_t>::max(); } template <typename T> __global__ void _GatherElementsKernel( const int64_t rank, const T* input_data, const int64_t input_dim_along_axis, const TArray<int64_t> input_strides, const void* indices_data, const int64_t indices_size, const size_t index_element_size, const TArray<fast_divmod> indices_strides, const int64_t axis, T* output_data) { CUDA_LONG indices_index = threads_per_block * thread_worksize * blockIdx.x + threadIdx.x; #pragma unroll for (int work = 0; work < thread_worksize; ++work) { if (indices_index < indices_size) { int dim = 0; int remain = indices_index; int64_t data_idx = 0; int i = 0; for (; i < axis && remain > 0; ++i) { indices_strides[i].divmod(remain, dim, remain); data_idx += input_strides[i] * dim; } i = axis; indices_strides[i].divmod(remain, dim, remain); dim = GetIndexValue(indices_data, index_element_size, indices_index); if (dim < -input_dim_along_axis || dim >= input_dim_along_axis) { return; // Invalid index } if (dim < 0) { dim += input_dim_along_axis; } data_idx += input_strides[i] * dim; ++i; // past axis for (; i < rank && remain > 0; ++i) { indices_strides[i].divmod(remain, dim, remain); data_idx += input_strides[i] * dim; } output_data[indices_index] = input_data[data_idx]; indices_index += threads_per_block; } } } void GatherElementsImpl( cudaStream_t stream, const int64_t rank, const void* input_data, const int64_t input_dim_along_axis, const TArray<int64_t>& input_strides, const void* indices_data, const int64_t indices_size, const TArray<fast_divmod>& indices_strides, const int64_t axis, void* output_data, size_t element_size, size_t index_element_size) { if (indices_size > 0) { dim3 block(threads_per_block); dim3 blocksPerGrid((static_cast<int>(indices_size + block.x * thread_worksize - 1) / (block.x * thread_worksize))); switch (element_size) { case sizeof(int8_t): { using CudaType = typename ToCudaType<int8_t>::MappedType; _GatherElementsKernel<<<blocksPerGrid, block, 0, stream>>>( rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; case sizeof(int16_t): { using CudaType = typename ToCudaType<int16_t>::MappedType; _GatherElementsKernel<<<blocksPerGrid, block, 0, stream>>>( rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; case sizeof(int32_t): { using CudaType = typename ToCudaType<int32_t>::MappedType; _GatherElementsKernel<<<blocksPerGrid, block, 0, stream>>>( rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; case sizeof(int64_t): { using CudaType = typename ToCudaType<int64_t>::MappedType; _GatherElementsKernel<<<blocksPerGrid, block, 0, stream>>>( rank, reinterpret_cast<const CudaType*>(input_data), input_dim_along_axis, input_strides, indices_data, indices_size, index_element_size, indices_strides, axis, reinterpret_cast<CudaType*>(output_data)); } break; // should not reach here as we validate if the all relevant types are supported in the Compute method default: ORT_THROW("Unsupported element size by the GatherElements CUDA kernel"); } } } // namespace cuda } // namespace cuda } // namespace onnxruntime
78434792141aa9d236fbe893f524cd3c8d488e47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 NVIDIA Corporation * * The U.S. Department of Energy funded the development of this software * under subcontract B609478 with Lawrence Livermore National Security, LLC * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "snap.h" #include "snap_cuda_help.h" template<int GROUPS> __global__ void gpu_expand_cross_section(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<double,1>,1> fa_sig, const AccessorRO<int,3> fa_mat, AccessorArray<GROUPS, AccessorWO<double,3>,3> fa_xs) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int mat = fa_mat[p]; #pragma unroll for (int g = 0; g < GROUPS; g++) { const double *sig_ptr = fa_sig[g].ptr(mat); double val; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(val) : "l"(sig_ptr) : "memory"); double *xs_ptr = fa_xs[g].ptr(p); asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(xs_ptr), "d"(val) : "memory"); } } __host__ void run_expand_cross_section(const std::vector<AccessorRO<double,1> > &fa_sig, const AccessorRO<int,3> &fa_mat, const std::vector<AccessorWO<double,3> > &fa_xs, const Rect<3> &subgrid_bounds) { // Figure out the dimensions to launch const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,32), gcd(y_range,4), gcd(z_range,4)); dim3 grid(x_range/block.x, y_range/block.y, z_range/block.z); // Switch on the number of groups assert(fa_sig.size() == fa_xs.size()); // TODO: replace this template foolishness with Terra switch (fa_sig.size()) { case 1: { hipLaunchKernelGGL(( gpu_expand_cross_section<1>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<1, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<1, AccessorWO<double,3>,3>(fa_xs)); break; } case 2: { hipLaunchKernelGGL(( gpu_expand_cross_section<2>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<2, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<2, AccessorWO<double,3>,3>(fa_xs)); break; } case 3: { hipLaunchKernelGGL(( gpu_expand_cross_section<3>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<3, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<3, AccessorWO<double,3>,3>(fa_xs)); break; } case 4: { hipLaunchKernelGGL(( gpu_expand_cross_section<4>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<4, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<4, AccessorWO<double,3>,3>(fa_xs)); break; } case 5: { hipLaunchKernelGGL(( gpu_expand_cross_section<5>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<5, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<5, AccessorWO<double,3>,3>(fa_xs)); break; } case 6: { hipLaunchKernelGGL(( gpu_expand_cross_section<6>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<6, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<6, AccessorWO<double,3>,3>(fa_xs)); break; } case 7: { hipLaunchKernelGGL(( gpu_expand_cross_section<7>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<7, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<7, AccessorWO<double,3>,3>(fa_xs)); break; } case 8: { hipLaunchKernelGGL(( gpu_expand_cross_section<8>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<8, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<8, AccessorWO<double,3>,3>(fa_xs)); break; } case 9: { hipLaunchKernelGGL(( gpu_expand_cross_section<9>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<9, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<9, AccessorWO<double,3>,3>(fa_xs)); break; } case 10: { hipLaunchKernelGGL(( gpu_expand_cross_section<10>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<10, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<10, AccessorWO<double,3>,3>(fa_xs)); break; } case 11: { hipLaunchKernelGGL(( gpu_expand_cross_section<11>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<11, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<11, AccessorWO<double,3>,3>(fa_xs)); break; } case 12: { hipLaunchKernelGGL(( gpu_expand_cross_section<12>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<12, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<12, AccessorWO<double,3>,3>(fa_xs)); break; } case 13: { hipLaunchKernelGGL(( gpu_expand_cross_section<13>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<13, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<13, AccessorWO<double,3>,3>(fa_xs)); break; } case 14: { hipLaunchKernelGGL(( gpu_expand_cross_section<14>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<14, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<14, AccessorWO<double,3>,3>(fa_xs)); break; } case 15: { hipLaunchKernelGGL(( gpu_expand_cross_section<15>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<15, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<15, AccessorWO<double,3>,3>(fa_xs)); break; } case 16: { hipLaunchKernelGGL(( gpu_expand_cross_section<16>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<16, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<16, AccessorWO<double,3>,3>(fa_xs)); break; } case 24: { hipLaunchKernelGGL(( gpu_expand_cross_section<24>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<24, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<24, AccessorWO<double,3>,3>(fa_xs)); break; } case 32: { hipLaunchKernelGGL(( gpu_expand_cross_section<32>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<32, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<32, AccessorWO<double,3>,3>(fa_xs)); break; } case 40: { hipLaunchKernelGGL(( gpu_expand_cross_section<40>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<40, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<40, AccessorWO<double,3>,3>(fa_xs)); break; } case 48: { hipLaunchKernelGGL(( gpu_expand_cross_section<48>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<48, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<48, AccessorWO<double,3>,3>(fa_xs)); break; } case 56: { hipLaunchKernelGGL(( gpu_expand_cross_section<56>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<56, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<56, AccessorWO<double,3>,3>(fa_xs)); break; } case 64: { hipLaunchKernelGGL(( gpu_expand_cross_section<64>), dim3(grid), dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<64, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<64, AccessorWO<double,3>,3>(fa_xs)); break; } default: assert(false); // add more cases } } template<int GROUPS> __global__ void gpu_expand_scattering_cross_section(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2> fa_slgg, const AccessorRO<int,3> fa_mat, AccessorArray<GROUPS, AccessorWO<MomentQuad,3>,3> fa_xs, const int group_start) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int mat = fa_mat[p]; #pragma unroll for (int g = 0; g < GROUPS; g++) fa_xs[g][p] = fa_slgg[g][Point<2>(mat,group_start+g)]; } __host__ void run_expand_scattering_cross_section( const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const AccessorRO<int,3> &fa_mat, const std::vector<AccessorWO<MomentQuad,3> > &fa_xs, const Rect<3> &subgrid_bounds, const int group_start) { // Figure out the dimensions to launch const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,32),gcd(y_range,4),gcd(z_range,4)); dim3 grid(x_range/block.x, y_range/block.y, z_range/block.z); // Switch on the number of groups assert(fa_slgg.size() == fa_xs.size()); // TODO: replace this template foolishness with Terra switch (fa_slgg.size()) { case 1: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<1>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<1,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<1,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 2: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<2>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<2,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<2,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 3: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<3>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<3,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<3,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 4: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<4>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<4,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<4,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 5: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<5>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<5,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<5,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 6: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<6>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<6,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<6,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 7: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<7>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<7,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<7,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 8: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<8>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<8,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<8,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 9: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<9>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<9,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<9,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 10: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<10>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<10,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<10,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 11: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<11>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<11,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<11,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 12: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<12>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<12,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<12,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 13: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<13>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<13,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<13,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 14: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<14>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<14,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<14,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 15: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<15>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<15,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<15,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 16: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<16>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<16,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<16,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 24: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<24>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<24,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<24,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 32: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<32>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<32,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<32,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 40: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<40>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<40,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<40,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 48: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<48>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<48,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<48,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 56: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<56>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<56,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<56,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 64: { hipLaunchKernelGGL(( gpu_expand_scattering_cross_section<64>), dim3(grid),dim3(block), 0, 0, subgrid_bounds.lo, AccessorArray<64,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<64,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } default: assert(false); // add more cases } }
78434792141aa9d236fbe893f524cd3c8d488e47.cu
/* Copyright 2017 NVIDIA Corporation * * The U.S. Department of Energy funded the development of this software * under subcontract B609478 with Lawrence Livermore National Security, LLC * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "snap.h" #include "snap_cuda_help.h" template<int GROUPS> __global__ void gpu_expand_cross_section(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<double,1>,1> fa_sig, const AccessorRO<int,3> fa_mat, AccessorArray<GROUPS, AccessorWO<double,3>,3> fa_xs) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int mat = fa_mat[p]; #pragma unroll for (int g = 0; g < GROUPS; g++) { const double *sig_ptr = fa_sig[g].ptr(mat); double val; asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(val) : "l"(sig_ptr) : "memory"); double *xs_ptr = fa_xs[g].ptr(p); asm volatile("st.global.cs.f64 [%0], %1;" : : "l"(xs_ptr), "d"(val) : "memory"); } } __host__ void run_expand_cross_section(const std::vector<AccessorRO<double,1> > &fa_sig, const AccessorRO<int,3> &fa_mat, const std::vector<AccessorWO<double,3> > &fa_xs, const Rect<3> &subgrid_bounds) { // Figure out the dimensions to launch const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,32), gcd(y_range,4), gcd(z_range,4)); dim3 grid(x_range/block.x, y_range/block.y, z_range/block.z); // Switch on the number of groups assert(fa_sig.size() == fa_xs.size()); // TODO: replace this template foolishness with Terra switch (fa_sig.size()) { case 1: { gpu_expand_cross_section<1><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<1, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<1, AccessorWO<double,3>,3>(fa_xs)); break; } case 2: { gpu_expand_cross_section<2><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<2, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<2, AccessorWO<double,3>,3>(fa_xs)); break; } case 3: { gpu_expand_cross_section<3><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<3, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<3, AccessorWO<double,3>,3>(fa_xs)); break; } case 4: { gpu_expand_cross_section<4><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<4, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<4, AccessorWO<double,3>,3>(fa_xs)); break; } case 5: { gpu_expand_cross_section<5><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<5, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<5, AccessorWO<double,3>,3>(fa_xs)); break; } case 6: { gpu_expand_cross_section<6><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<6, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<6, AccessorWO<double,3>,3>(fa_xs)); break; } case 7: { gpu_expand_cross_section<7><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<7, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<7, AccessorWO<double,3>,3>(fa_xs)); break; } case 8: { gpu_expand_cross_section<8><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<8, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<8, AccessorWO<double,3>,3>(fa_xs)); break; } case 9: { gpu_expand_cross_section<9><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<9, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<9, AccessorWO<double,3>,3>(fa_xs)); break; } case 10: { gpu_expand_cross_section<10><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<10, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<10, AccessorWO<double,3>,3>(fa_xs)); break; } case 11: { gpu_expand_cross_section<11><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<11, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<11, AccessorWO<double,3>,3>(fa_xs)); break; } case 12: { gpu_expand_cross_section<12><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<12, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<12, AccessorWO<double,3>,3>(fa_xs)); break; } case 13: { gpu_expand_cross_section<13><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<13, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<13, AccessorWO<double,3>,3>(fa_xs)); break; } case 14: { gpu_expand_cross_section<14><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<14, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<14, AccessorWO<double,3>,3>(fa_xs)); break; } case 15: { gpu_expand_cross_section<15><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<15, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<15, AccessorWO<double,3>,3>(fa_xs)); break; } case 16: { gpu_expand_cross_section<16><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<16, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<16, AccessorWO<double,3>,3>(fa_xs)); break; } case 24: { gpu_expand_cross_section<24><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<24, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<24, AccessorWO<double,3>,3>(fa_xs)); break; } case 32: { gpu_expand_cross_section<32><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<32, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<32, AccessorWO<double,3>,3>(fa_xs)); break; } case 40: { gpu_expand_cross_section<40><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<40, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<40, AccessorWO<double,3>,3>(fa_xs)); break; } case 48: { gpu_expand_cross_section<48><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<48, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<48, AccessorWO<double,3>,3>(fa_xs)); break; } case 56: { gpu_expand_cross_section<56><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<56, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<56, AccessorWO<double,3>,3>(fa_xs)); break; } case 64: { gpu_expand_cross_section<64><<<grid, block>>>(subgrid_bounds.lo, AccessorArray<64, AccessorRO<double,1>,1>(fa_sig), fa_mat, AccessorArray<64, AccessorWO<double,3>,3>(fa_xs)); break; } default: assert(false); // add more cases } } template<int GROUPS> __global__ void gpu_expand_scattering_cross_section(const Point<3> origin, const AccessorArray<GROUPS, AccessorRO<MomentQuad,2>,2> fa_slgg, const AccessorRO<int,3> fa_mat, AccessorArray<GROUPS, AccessorWO<MomentQuad,3>,3> fa_xs, const int group_start) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; const Point<3> p = origin + Point<3>(x,y,z); const int mat = fa_mat[p]; #pragma unroll for (int g = 0; g < GROUPS; g++) fa_xs[g][p] = fa_slgg[g][Point<2>(mat,group_start+g)]; } __host__ void run_expand_scattering_cross_section( const std::vector<AccessorRO<MomentQuad,2> > &fa_slgg, const AccessorRO<int,3> &fa_mat, const std::vector<AccessorWO<MomentQuad,3> > &fa_xs, const Rect<3> &subgrid_bounds, const int group_start) { // Figure out the dimensions to launch const int x_range = (subgrid_bounds.hi[0] - subgrid_bounds.lo[0]) + 1; const int y_range = (subgrid_bounds.hi[1] - subgrid_bounds.lo[1]) + 1; const int z_range = (subgrid_bounds.hi[2] - subgrid_bounds.lo[2]) + 1; dim3 block(gcd(x_range,32),gcd(y_range,4),gcd(z_range,4)); dim3 grid(x_range/block.x, y_range/block.y, z_range/block.z); // Switch on the number of groups assert(fa_slgg.size() == fa_xs.size()); // TODO: replace this template foolishness with Terra switch (fa_slgg.size()) { case 1: { gpu_expand_scattering_cross_section<1><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<1,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<1,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 2: { gpu_expand_scattering_cross_section<2><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<2,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<2,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 3: { gpu_expand_scattering_cross_section<3><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<3,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<3,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 4: { gpu_expand_scattering_cross_section<4><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<4,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<4,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 5: { gpu_expand_scattering_cross_section<5><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<5,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<5,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 6: { gpu_expand_scattering_cross_section<6><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<6,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<6,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 7: { gpu_expand_scattering_cross_section<7><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<7,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<7,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 8: { gpu_expand_scattering_cross_section<8><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<8,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<8,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 9: { gpu_expand_scattering_cross_section<9><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<9,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<9,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 10: { gpu_expand_scattering_cross_section<10><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<10,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<10,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 11: { gpu_expand_scattering_cross_section<11><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<11,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<11,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 12: { gpu_expand_scattering_cross_section<12><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<12,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<12,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 13: { gpu_expand_scattering_cross_section<13><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<13,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<13,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 14: { gpu_expand_scattering_cross_section<14><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<14,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<14,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 15: { gpu_expand_scattering_cross_section<15><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<15,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<15,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 16: { gpu_expand_scattering_cross_section<16><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<16,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<16,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 24: { gpu_expand_scattering_cross_section<24><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<24,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<24,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 32: { gpu_expand_scattering_cross_section<32><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<32,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<32,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 40: { gpu_expand_scattering_cross_section<40><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<40,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<40,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 48: { gpu_expand_scattering_cross_section<48><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<48,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<48,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 56: { gpu_expand_scattering_cross_section<56><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<56,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<56,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } case 64: { gpu_expand_scattering_cross_section<64><<<grid,block>>>( subgrid_bounds.lo, AccessorArray<64,AccessorRO<MomentQuad,2>,2>(fa_slgg), fa_mat, AccessorArray<64,AccessorWO<MomentQuad,3>,3>(fa_xs), group_start); break; } default: assert(false); // add more cases } }
91c7ab7d31b35e786d348c7cadb388b90c982994.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../cuda_utils.h" #include "sampling_cuda_kernel.h" __device__ void __update(float *dists, int *dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // input xyz: (n, 3), tmp: (b, n_max) // ouput idx (m) template <unsigned int block_size> __global__ void furthestsampling_cuda_kernel(const float *xyz, const int *offset, const int *new_offset, float *tmp, int *idx) { __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int bid = blockIdx.x; int start_n, end_n, start_m, end_m, old; if (bid == 0) { start_n = 0; end_n = offset[0]; start_m = 0; end_m = new_offset[0]; old = 0; } else { start_n = offset[bid - 1]; end_n = offset[bid]; start_m = new_offset[bid - 1]; end_m = new_offset[bid]; old = offset[bid - 1]; } const int stride = block_size; int tid = threadIdx.x; if (tid == 0) idx[start_m] = start_n; __syncthreads(); for (int j = start_m + 1; j < end_m; j++) { int besti = start_n; float best = -1; float x1 = xyz[old * 3 + 0]; float y1 = xyz[old * 3 + 1]; float z1 = xyz[old * 3 + 2]; for (int k = start_n + tid; k < end_n; k += stride) { float x2 = xyz[k * 3 + 0]; float y2 = xyz[k * 3 + 1]; float z2 = xyz[k * 3 + 2]; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, tmp[k]); tmp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idx[j] = old; } } void furthestsampling_cuda_launcher(int b, int n, const float *xyz, const int *offset, const int *new_offset, float *tmp, int *idx) { unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<1024>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 512: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<512>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 256: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<256>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 128: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<128>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 64: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<64>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 32: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<32>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 16: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<16>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 8: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<8>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 4: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<4>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 2: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<2>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; case 1: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<1>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); break; default: hipLaunchKernelGGL(( furthestsampling_cuda_kernel<512>), dim3(b), dim3(n_threads), 0, 0, xyz, offset, new_offset, tmp, idx); } }
91c7ab7d31b35e786d348c7cadb388b90c982994.cu
#include "../cuda_utils.h" #include "sampling_cuda_kernel.h" __device__ void __update(float *dists, int *dists_i, int idx1, int idx2) { const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } // input xyz: (n, 3), tmp: (b, n_max) // ouput idx (m) template <unsigned int block_size> __global__ void furthestsampling_cuda_kernel(const float *xyz, const int *offset, const int *new_offset, float *tmp, int *idx) { __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int bid = blockIdx.x; int start_n, end_n, start_m, end_m, old; if (bid == 0) { start_n = 0; end_n = offset[0]; start_m = 0; end_m = new_offset[0]; old = 0; } else { start_n = offset[bid - 1]; end_n = offset[bid]; start_m = new_offset[bid - 1]; end_m = new_offset[bid]; old = offset[bid - 1]; } const int stride = block_size; int tid = threadIdx.x; if (tid == 0) idx[start_m] = start_n; __syncthreads(); for (int j = start_m + 1; j < end_m; j++) { int besti = start_n; float best = -1; float x1 = xyz[old * 3 + 0]; float y1 = xyz[old * 3 + 1]; float z1 = xyz[old * 3 + 2]; for (int k = start_n + tid; k < end_n; k += stride) { float x2 = xyz[k * 3 + 0]; float y2 = xyz[k * 3 + 1]; float z2 = xyz[k * 3 + 2]; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, tmp[k]); tmp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idx[j] = old; } } void furthestsampling_cuda_launcher(int b, int n, const float *xyz, const int *offset, const int *new_offset, float *tmp, int *idx) { unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: furthestsampling_cuda_kernel<1024><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 512: furthestsampling_cuda_kernel<512><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 256: furthestsampling_cuda_kernel<256><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 128: furthestsampling_cuda_kernel<128><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 64: furthestsampling_cuda_kernel<64><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 32: furthestsampling_cuda_kernel<32><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 16: furthestsampling_cuda_kernel<16><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 8: furthestsampling_cuda_kernel<8><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 4: furthestsampling_cuda_kernel<4><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 2: furthestsampling_cuda_kernel<2><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; case 1: furthestsampling_cuda_kernel<1><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); break; default: furthestsampling_cuda_kernel<512><<<b, n_threads, 0>>>(xyz, offset, new_offset, tmp, idx); } }
89b59a5e96516d719e931c448817d94125de1423.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // "Hello World!" (from tutorial by Ingemar Ragnemalm) #include <stdio.h> const int N = 16; const int blocksize = 16; __global__ void hello(char* a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello \0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); hipMalloc( (void**)&ad, csize); hipMalloc( (void**)&bd, isize); hipMemcpy( ad, a, csize, hipMemcpyHostToDevice ); hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hipLaunchKernelGGL(( hello) , dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd); hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost ); hipFree( ad ); hipFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
89b59a5e96516d719e931c448817d94125de1423.cu
// "Hello World!" (from tutorial by Ingemar Ragnemalm) #include <stdio.h> const int N = 16; const int blocksize = 16; __global__ void hello(char* a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello \0\0\0\0\0"; int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); cudaMalloc( (void**)&ad, csize); cudaMalloc( (void**)&bd, isize); cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice ); cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello <<<dimGrid, dimBlock>>>(ad, bd); cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost ); cudaFree( ad ); cudaFree( bd ); printf("%s\n", a); return EXIT_SUCCESS; }
f4386713091b2c3ea79866ff0131da9a2de7d4a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma diag_suppress 3126 #define RT_FUNCTION static __forceinline__ __device__ #include "kernel.h" #include <hussar/hussar.h> #include <optix_device.h> #include <utility> using namespace hussar; #include "utils.h" #include "polyfills.h" #include "vecmath.h" #define log(...) //#define log(...) printf(__VA_ARGS__) extern "C" { __constant__ GPUParams params; } struct RT { bool visible(Intersection &isect) const { unsigned int visible = 1u; optixTrace( params.handle, vec3_to_float3(isect.ray.o), vec3_to_float3(isect.ray.d), Epsilon, isect.tMax, 0.0f, // rayTime OptixVisibilityMask(1), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex visible ); return visible; } void intersect(Intersection &isect) const { unsigned int u0, u1; packPointer(&isect, u0, u1); optixTrace( params.handle, vec3_to_float3(isect.ray.o), vec3_to_float3(isect.ray.d), Epsilon, isect.tMax, 0.0f, // rayTime OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } }; extern "C" __global__ void __raygen__rg() { const uint3 optixIndex = optixGetLaunchIndex(); const long sampleIndex = optixIndex.y * params.width + optixIndex.x + params.offset; const Scene &scene = *params.d_scene; PathTracer &integrator = *params.d_integrator; integrator.sample(scene, RT {}, sampleIndex); } extern "C" __global__ void __closesthit__radiance() { log(" closesthit_radiance\n"); HitGroupData *rt_data = (HitGroupData *)optixGetSbtDataPointer(); const TriangleMesh::IndexTriplet &indices = rt_data->indices[optixGetPrimitiveIndex()]; const float3 v0 = vec3_to_float3(rt_data->vertices[indices.v0]); const float3 v1 = vec3_to_float3(rt_data->vertices[indices.v1]); const float3 v2 = vec3_to_float3(rt_data->vertices[indices.v2]); float3 normal = normalize(cross(v1 - v0, v2 - v0)); // transform normal to world coordinates // float4 worldToObject[3]; // optix_impl::optixGetWorldToObjectTransformMatrix(worldToObject[0], worldToObject[1], worldToObject[2]); // normal = normalize(optix_impl::optixTransformNormal(worldToObject[0], worldToObject[1], worldToObject[2], normal)); // write intersection data Intersection &isect = getIsect(); isect.t = optixGetRayTmax(); isect.p = float3_to_vec3(optixGetWorldRayOrigin() + optixGetRayTmax() * optixGetWorldRayDirection()); isect.n = float3_to_vec3(faceforward(normal, -optixGetWorldRayDirection(), normal)); } extern "C" __global__ void __miss__radiance() { log(" miss_radiance\n"); // nothing to do } extern "C" __global__ void __closesthit__occlusion() { optixSetPayload_0(0u); // set visible payload to 0 }
f4386713091b2c3ea79866ff0131da9a2de7d4a3.cu
#pragma diag_suppress 3126 #define RT_FUNCTION static __forceinline__ __device__ #include "kernel.h" #include <hussar/hussar.h> #include <optix_device.h> #include <utility> using namespace hussar; #include "utils.h" #include "polyfills.h" #include "vecmath.h" #define log(...) //#define log(...) printf(__VA_ARGS__) extern "C" { __constant__ GPUParams params; } struct RT { bool visible(Intersection &isect) const { unsigned int visible = 1u; optixTrace( params.handle, vec3_to_float3(isect.ray.o), vec3_to_float3(isect.ray.d), Epsilon, isect.tMax, 0.0f, // rayTime OptixVisibilityMask(1), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex visible ); return visible; } void intersect(Intersection &isect) const { unsigned int u0, u1; packPointer(&isect, u0, u1); optixTrace( params.handle, vec3_to_float3(isect.ray.o), vec3_to_float3(isect.ray.d), Epsilon, isect.tMax, 0.0f, // rayTime OptixVisibilityMask(1), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } }; extern "C" __global__ void __raygen__rg() { const uint3 optixIndex = optixGetLaunchIndex(); const long sampleIndex = optixIndex.y * params.width + optixIndex.x + params.offset; const Scene &scene = *params.d_scene; PathTracer &integrator = *params.d_integrator; integrator.sample(scene, RT {}, sampleIndex); } extern "C" __global__ void __closesthit__radiance() { log(" closesthit_radiance\n"); HitGroupData *rt_data = (HitGroupData *)optixGetSbtDataPointer(); const TriangleMesh::IndexTriplet &indices = rt_data->indices[optixGetPrimitiveIndex()]; const float3 v0 = vec3_to_float3(rt_data->vertices[indices.v0]); const float3 v1 = vec3_to_float3(rt_data->vertices[indices.v1]); const float3 v2 = vec3_to_float3(rt_data->vertices[indices.v2]); float3 normal = normalize(cross(v1 - v0, v2 - v0)); // transform normal to world coordinates // float4 worldToObject[3]; // optix_impl::optixGetWorldToObjectTransformMatrix(worldToObject[0], worldToObject[1], worldToObject[2]); // normal = normalize(optix_impl::optixTransformNormal(worldToObject[0], worldToObject[1], worldToObject[2], normal)); // write intersection data Intersection &isect = getIsect(); isect.t = optixGetRayTmax(); isect.p = float3_to_vec3(optixGetWorldRayOrigin() + optixGetRayTmax() * optixGetWorldRayDirection()); isect.n = float3_to_vec3(faceforward(normal, -optixGetWorldRayDirection(), normal)); } extern "C" __global__ void __miss__radiance() { log(" miss_radiance\n"); // nothing to do } extern "C" __global__ void __closesthit__occlusion() { optixSetPayload_0(0u); // set visible payload to 0 }
e2f2148bea373c9822892588bc63c631f42d4957.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define __HIPCC__ #define __cplusplus #include "bin_scan.cu" #define BLOCK_SIZE 512 # define CUDA_SAFE_KERNEL(call) { \ call; \ hipDeviceSynchronize(); \ hipError_t err = hipGetLastError(); \ if ( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } class particle { public: uint pindex; uint binindex; }; class Particlebin { public: Particlebin* parentbin; Particlebin* subbins; uint nptcls; uint ifirstp; }; __device__ bool compare_binids(uint binindex,uint bin_level) { return ((binindex & ((0x0001) << (bin_level))) > 0); } __global__ void populate_splitting_list(particle* particles_in,Particlebin* bins, cudaMatrixui splitting_list, uint nptcls_max,uint bin_level) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint binid = blockIdx.y; uint block_start = blockDim.x*blockIdx.x; uint pid; uint splitting_condition; __shared__ Particlebin parentbin; particle my_particle; if(idx == 0) parentbin = bins[binid]; __syncthreads(); if(gidx < parentbin.nptcls) { pid = parentbin.ifirstp + gidx; /* if(pid < 256) { my_particle = particles_in[pid]; } else { printf("requesting read of pid %i by thread %i in bin %i with %i ptcls\n",pid,gidx,binid,parentbin.nptcls); } */ my_particle = particles_in[pid]; splitting_condition = compare_binids(my_particle.binindex,bin_level); splitting_list(gidx,binid) = splitting_condition; } else if(gidx < nptcls_max) { splitting_list(gidx,binid) = 0; } } __global__ void __launch_bounds__(BLOCK_SIZE,3) find_new_ids(particle* particles_in,particle* particles_out,Particlebin* bins, cudaMatrixui sums,uint nptcls_max, uint bin_level,int* nptcls_max_out) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint binid = blockIdx.y; uint block_start = blockDim.x*blockIdx.x; uint pid; particle my_particle; // No reason to run this block if it is more than the number of particles in the bin. // Might be able to avoid this with a better block mapping system if(block_start > bins[binid].nptcls) return; uint new_bin; __shared__ int nptcls_bin; int nptcls_max_out_temp; uint new_id; __shared__ Particlebin parentbin; __shared__ Particlebin subbins[2]; // We need to figure out how many particles are going to be in each sub bin // We can also use this section to setup the subbins for the next tree level if(idx == 0) { parentbin = bins[binid]; subbins[0].parentbin = &bins[binid]; subbins[1].parentbin = &bins[binid]; nptcls_bin = parentbin.nptcls; parentbin.subbins = (bins+gridDim.y)+2*binid; subbins[1].nptcls = max(0,sums(max(nptcls_bin-1,0),binid)); subbins[0].nptcls = max(0,(nptcls_bin - subbins[1].nptcls)); subbins[0].ifirstp = parentbin.ifirstp; subbins[1].ifirstp = subbins[0].ifirstp+subbins[0].nptcls; if(gidx == 0) { //printf("nptcls in bin %i = %i, with ifirstp = %i\n",2*binid,subbins[0].nptcls,subbins[0].ifirstp); //printf("nptcls in bin %i = %i, with ifirstp = %i\n",2*binid+1,subbins[1].nptcls,subbins[1].ifirstp); } if(gidx == 0) { parentbin.subbins[0] = subbins[0]; parentbin.subbins[1] = subbins[1]; nptcls_max_out_temp = max(subbins[0].nptcls,subbins[1].nptcls); atomicMax(nptcls_max_out,nptcls_max_out_temp); } } __syncthreads(); if(gidx < nptcls_bin) { pid = gidx+parentbin.ifirstp; my_particle = particles_in[pid]; new_bin = compare_binids(my_particle.binindex,bin_level); if(new_bin == 0) { new_id = gidx-sums(gidx,binid); } else { new_id = sums(gidx,binid)-1; } new_id += subbins[new_bin].ifirstp; //printf("particle %i, %i is being moved to index %i\n",pid,2*binid+new_bin,new_id); particles_out[new_id] = my_particle; } } void rebin_particles(Particlebin* bin_tree,particle* &particles,int* nptcls_max_out,int nptcls,int nbins_max) { uint bin_level = 0; uint nptcls_max = nptcls; int nbins_current = 1; dim3 cudaGridSize(1,1,1); dim3 cudaBlockSize(1,1,1); int nlevels = log(nbins_max)/log(2); bin_level = nlevels-1; Particlebin* current_bins = bin_tree; cudaMatrixui splitting_list; particle* particles_temp; CUDA_SAFE_CALL(hipMalloc((void**)&particles_temp,nptcls*sizeof(particle))); CUDA_SAFE_CALL(hipMemcpy(particles_temp,particles,nptcls*sizeof(particle),hipMemcpyDeviceToDevice)); int* nptcls_max_next; CUDA_SAFE_CALL(hipMalloc((void**)&nptcls_max_next,sizeof(int))); int* nptcls_max_next_h = (int*)malloc(sizeof(int)); particle* particles_in = particles; particle* particles_out = particles_temp; particle* particles_swap; // Main tree traversing loop for(int i=0;i<(nlevels);i++) { CUDA_SAFE_CALL(hipMemset(nptcls_max_next,0,sizeof(int))); printf("binlevel = %i, nptcls_max = %i \n", bin_level,nptcls_max); if(nptcls_max > nptcls) { printf("error number of particles is growing\n"); return; } // Setup the kernel launch parameters cudaBlockSize.x = BLOCK_SIZE; cudaGridSize.x = (nptcls_max+cudaBlockSize.x-1)/cudaBlockSize.x; cudaGridSize.y = nbins_current; // Allocate space for the splitting list splitting_list.cudaMatrix_allocate(nptcls_max,nbins_current,1); // Populate the splitting list hipLaunchKernelGGL(( CUDA_SAFE_KERNEL((populate_splitting_list), dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0, particles_in,current_bins,splitting_list,nptcls_max,bin_level))); // Now we take the splitting list and find the cumulative sum for each bin bin_scan(splitting_list,nptcls_max,nbins_current); // Use the cumulative sum to calculate the new indices and move the particles hipLaunchKernelGGL(( CUDA_SAFE_KERNEL((find_new_ids), dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0, particles_in,particles_out,current_bins,splitting_list,nptcls_max,bin_level,nptcls_max_next))); // Swap particles_in and particles_out particles_swap = particles_in; particles_in = particles_out; particles_out = particles_swap; // Advance counters bin_level--; current_bins = current_bins+nbins_current; nbins_current *= 2; CUDA_SAFE_CALL(hipMemcpy(nptcls_max_next_h,nptcls_max_next,sizeof(int),hipMemcpyDeviceToHost)); nptcls_max = *nptcls_max_next_h; splitting_list.cudaMatrixFree(); } particles = particles_in; *nptcls_max_out = nptcls_max; CUDA_SAFE_CALL(hipFree(nptcls_max_next)); CUDA_SAFE_CALL(hipFree(particles_out)); } __global__ void check_sort(particle* particles,Particlebin* bins,int nptcls_max) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint binid = blockIdx.y; uint pidx; __shared__ Particlebin sbin; particle my_particle; if(idx == 0) { sbin = bins[binid]; // printf("nptcls in bin %i = %i, with ifirstp = %i\n",binid,sbin.nptcls,sbin.ifirstp); } __syncthreads(); pidx = gidx+sbin.ifirstp; if(gidx < sbin.nptcls) { my_particle = particles[pidx]; if(my_particle.binindex != (binid)) { printf("Error, particle %i(%i), with binindex %i is in bin %i\n",pidx,gidx,my_particle.binindex,binid); } } } __host__ void rough_test(int nptcls) { int gridsize = 4096; int max_nptclsperbin; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; particle* particles_h = (particle*)malloc(nptcls*sizeof(particle)); particle* particles_d; CUDA_SAFE_CALL(hipMalloc((void**)&particles_d,nptcls*sizeof(particle))); Particlebin* bins; for(int i=0;i<nptcls;i++) { particles_h[i].pindex = i; particles_h[i].binindex = (rand()%gridsize); } // Figure how big the bin tree needs to be int bin_tree_size = 0; int nlevels = log(gridsize)/log(2); for(int i=0;i<(nlevels+1);i++) { bin_tree_size += (1<<i); } CUDA_SAFE_CALL(hipMalloc((void**)&bins,bin_tree_size*sizeof(Particlebin))); // Set up the first particle bin Particlebin parent; parent.ifirstp = 0; parent.nptcls = nptcls; CUDA_SAFE_CALL(hipMemcpy(bins,&parent,sizeof(Particlebin),hipMemcpyHostToDevice)); // Copy Particle data to the device CUDA_SAFE_CALL(hipMemcpy(particles_d,particles_h,nptcls*sizeof(particle),hipMemcpyHostToDevice)); hipEventRecord(start); // Rebin the particles rebin_particles(bins,particles_d,&max_nptclsperbin,nptcls,gridsize); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Binning Sort took %f milliseconds\n", milliseconds); // Check the sort dim3 cudaGridSize(1,gridsize,1); dim3 cudaBlockSize(BLOCK_SIZE,1,1); cudaGridSize.x = (max_nptclsperbin+cudaBlockSize.x-1)/cudaBlockSize.x; Particlebin* bottom_bins = bins+(bin_tree_size-gridsize); hipLaunchKernelGGL(( CUDA_SAFE_KERNEL((check_sort), dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0, particles_d,bottom_bins,max_nptclsperbin))); } __host__ void semi_sorted_test(int nptcls) { int gridsize = 4096; int max_nptclsperbin; int temp_index; particle* particles_h = (particle*)malloc(nptcls*sizeof(particle)); particle* particles_d; CUDA_SAFE_CALL(hipMalloc((void**)&particles_d,nptcls*sizeof(particle))); Particlebin* bins; for(int i=0;i<nptcls;i++) { particles_h[i].pindex = i; particles_h[i].binindex = i/(nptcls/gridsize); if((rand()%1000) < 500) { temp_index = particles_h[i].binindex + rand()%5 - 2; if(temp_index < 0) { temp_index = gridsize-1; } else if(temp_index > gridsize-1) { temp_index = 0; } particles_h[i].binindex = temp_index; } } // Figure how big the bin tree needs to be int bin_tree_size = 0; int nlevels = log(gridsize)/log(2); for(int i=0;i<(nlevels+1);i++) { bin_tree_size += (1<<i); } CUDA_SAFE_CALL(hipMalloc((void**)&bins,bin_tree_size*sizeof(Particlebin))); // Set up the first particle bin Particlebin parent; parent.ifirstp = 0; parent.nptcls = nptcls; CUDA_SAFE_CALL(hipMemcpy(bins,&parent,sizeof(Particlebin),hipMemcpyHostToDevice)); // Copy Particle data to the device CUDA_SAFE_CALL(hipMemcpy(particles_d,particles_h,nptcls*sizeof(particle),hipMemcpyHostToDevice)); // Rebin the particles rebin_particles(bins,particles_d,&max_nptclsperbin,nptcls,gridsize); // Check the sort dim3 cudaGridSize(1,gridsize,1); dim3 cudaBlockSize(BLOCK_SIZE,1,1); cudaGridSize.x = (max_nptclsperbin+cudaBlockSize.x-1)/cudaBlockSize.x; Particlebin* bottom_bins = bins+(bin_tree_size-gridsize); hipLaunchKernelGGL(( CUDA_SAFE_KERNEL((check_sort), dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0, particles_d,bottom_bins,max_nptclsperbin))); } __global__ void check_scan_results_kernel(cudaMatrixui g_results0,uint* g_results1,int n) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint result0; uint result1; if(gidx < n) { result0 = g_results0(gidx); result1 = g_results1[gidx]; if(result0!=result1) { printf("%i != %i for thread %i \n",result0,result1,gidx); } } } __host__ void scan_test(int nptcls) { cudaMatrixui sums(nptcls,1,1); uint* data_h = (uint*)malloc(nptcls*sizeof(uint)); uint* results_d1; CUDA_SAFE_CALL(hipMalloc((void**)&results_d1,2*sizeof(uint)*nptcls)); for(int i=0;i<nptcls;i++) { data_h[i] = 1; } sums.cudaMatrixcpy(data_h,hipMemcpyHostToDevice); bin_scan(sums,nptcls,1); for(int i=1;i<nptcls;i++) { data_h[i] += data_h[i-1]; } CUDA_SAFE_CALL(hipMemcpy(results_d1,data_h,nptcls*sizeof(uint),hipMemcpyHostToDevice)); int cudaGridSize = (nptcls+BLOCK_SIZE-1)/BLOCK_SIZE; int cudaBlockSize = BLOCK_SIZE; hipLaunchKernelGGL(( CUDA_SAFE_KERNEL((check_scan_results_kernel), dim3(cudaGridSize),dim3(cudaBlockSize), 0, 0, sums,results_d1,nptcls))); sums.cudaMatrixFree(); hipFree(results_d1); free(data_h); } int main(void) { hipSetDevice(1); int nptcls = pow(2,24); rough_test(nptcls); return 0; }
e2f2148bea373c9822892588bc63c631f42d4957.cu
#define __CUDACC__ #define __cplusplus #include "bin_scan.cu" #define BLOCK_SIZE 512 # define CUDA_SAFE_KERNEL(call) { \ call; \ cudaDeviceSynchronize(); \ cudaError err = cudaGetLastError(); \ if ( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } class particle { public: uint pindex; uint binindex; }; class Particlebin { public: Particlebin* parentbin; Particlebin* subbins; uint nptcls; uint ifirstp; }; __device__ bool compare_binids(uint binindex,uint bin_level) { return ((binindex & ((0x0001) << (bin_level))) > 0); } __global__ void populate_splitting_list(particle* particles_in,Particlebin* bins, cudaMatrixui splitting_list, uint nptcls_max,uint bin_level) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint binid = blockIdx.y; uint block_start = blockDim.x*blockIdx.x; uint pid; uint splitting_condition; __shared__ Particlebin parentbin; particle my_particle; if(idx == 0) parentbin = bins[binid]; __syncthreads(); if(gidx < parentbin.nptcls) { pid = parentbin.ifirstp + gidx; /* if(pid < 256) { my_particle = particles_in[pid]; } else { printf("requesting read of pid %i by thread %i in bin %i with %i ptcls\n",pid,gidx,binid,parentbin.nptcls); } */ my_particle = particles_in[pid]; splitting_condition = compare_binids(my_particle.binindex,bin_level); splitting_list(gidx,binid) = splitting_condition; } else if(gidx < nptcls_max) { splitting_list(gidx,binid) = 0; } } __global__ void __launch_bounds__(BLOCK_SIZE,3) find_new_ids(particle* particles_in,particle* particles_out,Particlebin* bins, cudaMatrixui sums,uint nptcls_max, uint bin_level,int* nptcls_max_out) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint binid = blockIdx.y; uint block_start = blockDim.x*blockIdx.x; uint pid; particle my_particle; // No reason to run this block if it is more than the number of particles in the bin. // Might be able to avoid this with a better block mapping system if(block_start > bins[binid].nptcls) return; uint new_bin; __shared__ int nptcls_bin; int nptcls_max_out_temp; uint new_id; __shared__ Particlebin parentbin; __shared__ Particlebin subbins[2]; // We need to figure out how many particles are going to be in each sub bin // We can also use this section to setup the subbins for the next tree level if(idx == 0) { parentbin = bins[binid]; subbins[0].parentbin = &bins[binid]; subbins[1].parentbin = &bins[binid]; nptcls_bin = parentbin.nptcls; parentbin.subbins = (bins+gridDim.y)+2*binid; subbins[1].nptcls = max(0,sums(max(nptcls_bin-1,0),binid)); subbins[0].nptcls = max(0,(nptcls_bin - subbins[1].nptcls)); subbins[0].ifirstp = parentbin.ifirstp; subbins[1].ifirstp = subbins[0].ifirstp+subbins[0].nptcls; if(gidx == 0) { //printf("nptcls in bin %i = %i, with ifirstp = %i\n",2*binid,subbins[0].nptcls,subbins[0].ifirstp); //printf("nptcls in bin %i = %i, with ifirstp = %i\n",2*binid+1,subbins[1].nptcls,subbins[1].ifirstp); } if(gidx == 0) { parentbin.subbins[0] = subbins[0]; parentbin.subbins[1] = subbins[1]; nptcls_max_out_temp = max(subbins[0].nptcls,subbins[1].nptcls); atomicMax(nptcls_max_out,nptcls_max_out_temp); } } __syncthreads(); if(gidx < nptcls_bin) { pid = gidx+parentbin.ifirstp; my_particle = particles_in[pid]; new_bin = compare_binids(my_particle.binindex,bin_level); if(new_bin == 0) { new_id = gidx-sums(gidx,binid); } else { new_id = sums(gidx,binid)-1; } new_id += subbins[new_bin].ifirstp; //printf("particle %i, %i is being moved to index %i\n",pid,2*binid+new_bin,new_id); particles_out[new_id] = my_particle; } } void rebin_particles(Particlebin* bin_tree,particle* &particles,int* nptcls_max_out,int nptcls,int nbins_max) { uint bin_level = 0; uint nptcls_max = nptcls; int nbins_current = 1; dim3 cudaGridSize(1,1,1); dim3 cudaBlockSize(1,1,1); int nlevels = log(nbins_max)/log(2); bin_level = nlevels-1; Particlebin* current_bins = bin_tree; cudaMatrixui splitting_list; particle* particles_temp; CUDA_SAFE_CALL(cudaMalloc((void**)&particles_temp,nptcls*sizeof(particle))); CUDA_SAFE_CALL(cudaMemcpy(particles_temp,particles,nptcls*sizeof(particle),cudaMemcpyDeviceToDevice)); int* nptcls_max_next; CUDA_SAFE_CALL(cudaMalloc((void**)&nptcls_max_next,sizeof(int))); int* nptcls_max_next_h = (int*)malloc(sizeof(int)); particle* particles_in = particles; particle* particles_out = particles_temp; particle* particles_swap; // Main tree traversing loop for(int i=0;i<(nlevels);i++) { CUDA_SAFE_CALL(cudaMemset(nptcls_max_next,0,sizeof(int))); printf("binlevel = %i, nptcls_max = %i \n", bin_level,nptcls_max); if(nptcls_max > nptcls) { printf("error number of particles is growing\n"); return; } // Setup the kernel launch parameters cudaBlockSize.x = BLOCK_SIZE; cudaGridSize.x = (nptcls_max+cudaBlockSize.x-1)/cudaBlockSize.x; cudaGridSize.y = nbins_current; // Allocate space for the splitting list splitting_list.cudaMatrix_allocate(nptcls_max,nbins_current,1); // Populate the splitting list CUDA_SAFE_KERNEL((populate_splitting_list<<<cudaGridSize,cudaBlockSize>>> (particles_in,current_bins,splitting_list,nptcls_max,bin_level))); // Now we take the splitting list and find the cumulative sum for each bin bin_scan(splitting_list,nptcls_max,nbins_current); // Use the cumulative sum to calculate the new indices and move the particles CUDA_SAFE_KERNEL((find_new_ids<<<cudaGridSize,cudaBlockSize>>> (particles_in,particles_out,current_bins,splitting_list,nptcls_max,bin_level,nptcls_max_next))); // Swap particles_in and particles_out particles_swap = particles_in; particles_in = particles_out; particles_out = particles_swap; // Advance counters bin_level--; current_bins = current_bins+nbins_current; nbins_current *= 2; CUDA_SAFE_CALL(cudaMemcpy(nptcls_max_next_h,nptcls_max_next,sizeof(int),cudaMemcpyDeviceToHost)); nptcls_max = *nptcls_max_next_h; splitting_list.cudaMatrixFree(); } particles = particles_in; *nptcls_max_out = nptcls_max; CUDA_SAFE_CALL(cudaFree(nptcls_max_next)); CUDA_SAFE_CALL(cudaFree(particles_out)); } __global__ void check_sort(particle* particles,Particlebin* bins,int nptcls_max) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint binid = blockIdx.y; uint pidx; __shared__ Particlebin sbin; particle my_particle; if(idx == 0) { sbin = bins[binid]; // printf("nptcls in bin %i = %i, with ifirstp = %i\n",binid,sbin.nptcls,sbin.ifirstp); } __syncthreads(); pidx = gidx+sbin.ifirstp; if(gidx < sbin.nptcls) { my_particle = particles[pidx]; if(my_particle.binindex != (binid)) { printf("Error, particle %i(%i), with binindex %i is in bin %i\n",pidx,gidx,my_particle.binindex,binid); } } } __host__ void rough_test(int nptcls) { int gridsize = 4096; int max_nptclsperbin; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; particle* particles_h = (particle*)malloc(nptcls*sizeof(particle)); particle* particles_d; CUDA_SAFE_CALL(cudaMalloc((void**)&particles_d,nptcls*sizeof(particle))); Particlebin* bins; for(int i=0;i<nptcls;i++) { particles_h[i].pindex = i; particles_h[i].binindex = (rand()%gridsize); } // Figure how big the bin tree needs to be int bin_tree_size = 0; int nlevels = log(gridsize)/log(2); for(int i=0;i<(nlevels+1);i++) { bin_tree_size += (1<<i); } CUDA_SAFE_CALL(cudaMalloc((void**)&bins,bin_tree_size*sizeof(Particlebin))); // Set up the first particle bin Particlebin parent; parent.ifirstp = 0; parent.nptcls = nptcls; CUDA_SAFE_CALL(cudaMemcpy(bins,&parent,sizeof(Particlebin),cudaMemcpyHostToDevice)); // Copy Particle data to the device CUDA_SAFE_CALL(cudaMemcpy(particles_d,particles_h,nptcls*sizeof(particle),cudaMemcpyHostToDevice)); cudaEventRecord(start); // Rebin the particles rebin_particles(bins,particles_d,&max_nptclsperbin,nptcls,gridsize); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Binning Sort took %f milliseconds\n", milliseconds); // Check the sort dim3 cudaGridSize(1,gridsize,1); dim3 cudaBlockSize(BLOCK_SIZE,1,1); cudaGridSize.x = (max_nptclsperbin+cudaBlockSize.x-1)/cudaBlockSize.x; Particlebin* bottom_bins = bins+(bin_tree_size-gridsize); CUDA_SAFE_KERNEL((check_sort<<<cudaGridSize,cudaBlockSize>>> (particles_d,bottom_bins,max_nptclsperbin))); } __host__ void semi_sorted_test(int nptcls) { int gridsize = 4096; int max_nptclsperbin; int temp_index; particle* particles_h = (particle*)malloc(nptcls*sizeof(particle)); particle* particles_d; CUDA_SAFE_CALL(cudaMalloc((void**)&particles_d,nptcls*sizeof(particle))); Particlebin* bins; for(int i=0;i<nptcls;i++) { particles_h[i].pindex = i; particles_h[i].binindex = i/(nptcls/gridsize); if((rand()%1000) < 500) { temp_index = particles_h[i].binindex + rand()%5 - 2; if(temp_index < 0) { temp_index = gridsize-1; } else if(temp_index > gridsize-1) { temp_index = 0; } particles_h[i].binindex = temp_index; } } // Figure how big the bin tree needs to be int bin_tree_size = 0; int nlevels = log(gridsize)/log(2); for(int i=0;i<(nlevels+1);i++) { bin_tree_size += (1<<i); } CUDA_SAFE_CALL(cudaMalloc((void**)&bins,bin_tree_size*sizeof(Particlebin))); // Set up the first particle bin Particlebin parent; parent.ifirstp = 0; parent.nptcls = nptcls; CUDA_SAFE_CALL(cudaMemcpy(bins,&parent,sizeof(Particlebin),cudaMemcpyHostToDevice)); // Copy Particle data to the device CUDA_SAFE_CALL(cudaMemcpy(particles_d,particles_h,nptcls*sizeof(particle),cudaMemcpyHostToDevice)); // Rebin the particles rebin_particles(bins,particles_d,&max_nptclsperbin,nptcls,gridsize); // Check the sort dim3 cudaGridSize(1,gridsize,1); dim3 cudaBlockSize(BLOCK_SIZE,1,1); cudaGridSize.x = (max_nptclsperbin+cudaBlockSize.x-1)/cudaBlockSize.x; Particlebin* bottom_bins = bins+(bin_tree_size-gridsize); CUDA_SAFE_KERNEL((check_sort<<<cudaGridSize,cudaBlockSize>>> (particles_d,bottom_bins,max_nptclsperbin))); } __global__ void check_scan_results_kernel(cudaMatrixui g_results0,uint* g_results1,int n) { uint idx = threadIdx.x; uint gidx = blockDim.x*blockIdx.x+idx; uint result0; uint result1; if(gidx < n) { result0 = g_results0(gidx); result1 = g_results1[gidx]; if(result0!=result1) { printf("%i != %i for thread %i \n",result0,result1,gidx); } } } __host__ void scan_test(int nptcls) { cudaMatrixui sums(nptcls,1,1); uint* data_h = (uint*)malloc(nptcls*sizeof(uint)); uint* results_d1; CUDA_SAFE_CALL(cudaMalloc((void**)&results_d1,2*sizeof(uint)*nptcls)); for(int i=0;i<nptcls;i++) { data_h[i] = 1; } sums.cudaMatrixcpy(data_h,cudaMemcpyHostToDevice); bin_scan(sums,nptcls,1); for(int i=1;i<nptcls;i++) { data_h[i] += data_h[i-1]; } CUDA_SAFE_CALL(cudaMemcpy(results_d1,data_h,nptcls*sizeof(uint),cudaMemcpyHostToDevice)); int cudaGridSize = (nptcls+BLOCK_SIZE-1)/BLOCK_SIZE; int cudaBlockSize = BLOCK_SIZE; CUDA_SAFE_KERNEL((check_scan_results_kernel<<<cudaGridSize,cudaBlockSize>>>( sums,results_d1,nptcls))); sums.cudaMatrixFree(); cudaFree(results_d1); free(data_h); } int main(void) { cudaSetDevice(1); int nptcls = pow(2,24); rough_test(nptcls); return 0; }
580f2792dee74875e4ae0462798971cf42da288b.hip
// !!! This is a file automatically generated by hipify!!! #include "net.h" //#include "layer.h" #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } static void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } static void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) { if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } static void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) { if (stat != HIPRAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } Net::Net(int nLayers, int cell_dim) { // copy the layers //input_buf_dim_=input_buf_dim; cell_dim_=cell_dim; //layers_=NULL; //input_buf_ = NULL; ///< buffers for forward pass propagate_buf_ = NULL; ///< buffers for forward pass tmp_h_fw_n = NULL; tmp_i_fw_n = NULL; // back-propagation buffer tmp_h_bw_n = NULL; tmp_i_bw_n = NULL; h_data_n = NULL; c_data_n = NULL; h_data_bw_n = NULL; c_data_bw_n = NULL; /*for(int i=0; i<nLayers; i++) { Layer* L=NULL; layers_.push_back(L); }*/ // create empty buffers //propagate_buf_.resize(NumLayers()+1); } Net::~Net() { //cudaErrCheck(hipFree(input_buf_)); cudaErrCheck(hipFree(propagate_buf_)); //2 buffers cudaErrCheck(hipFree(h_data_n)); cudaErrCheck(hipFree(c_data_n)); cudaErrCheck(hipFree(c_data_bw_n)); cudaErrCheck(hipFree(h_data_bw_n)); cudaErrCheck(hipFree(tmp_h_fw_n)); cudaErrCheck(hipFree(tmp_i_fw_n)); cudaErrCheck(hipFree(tmp_h_bw_n)); cudaErrCheck(hipFree(tmp_i_bw_n)); Destroy(); } void Net::Resize(int seqLength){ //cudaErrCheck(hipFree(input_buf_)); cudaErrCheck(hipFree(propagate_buf_)); //2 buffers cudaErrCheck(hipFree(h_data_n)); cudaErrCheck(hipFree(c_data_n)); cudaErrCheck(hipFree(c_data_bw_n)); cudaErrCheck(hipFree(h_data_bw_n)); cudaErrCheck(hipFree(tmp_h_fw_n)); cudaErrCheck(hipFree(tmp_i_fw_n)); cudaErrCheck(hipFree(tmp_h_bw_n)); cudaErrCheck(hipFree(tmp_i_bw_n)); //cudaErrCheck(hipMalloc((void**)&input_buf_, seqLength * input_buf_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&propagate_buf_,2* seqLength * 2 * cell_dim_ * sizeof(float))); //2 buffers cudaErrCheck(hipMalloc((void**)&h_data_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&c_data_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&c_data_bw_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&h_data_bw_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&tmp_h_fw_n, 4 * cell_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&tmp_i_fw_n, (seqLength+1) * 8 * cell_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&tmp_h_bw_n, 4 * cell_dim_ * sizeof(float))); cudaErrCheck(hipMalloc((void**)&tmp_i_bw_n, (seqLength+1) * 4 * cell_dim_ * sizeof(float))); hipMemset(h_data_n, 0, cell_dim_*sizeof(float)); hipMemset(c_data_n, 0, cell_dim_*sizeof(float)); hipMemset(h_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float)); hipMemset(c_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float)); } void Net::Feedforward(hipblasHandle_t handle, float* in, float* out, int seqLength) { // we need at least 2 input buffers // propagate by using exactly 2 auxiliary buffers int L = 0; float time=0.f; hipStream_t stream_fw, stream_bw; hipStreamCreate(&stream_fw); hipStreamCreate(&stream_bw); time+=layers_[L]->Propagate(handle,stream_fw, stream_bw, in, propagate_buf_ + (L%2)*seqLength * 2 * cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n); hipDeviceSynchronize(); for(L++; L<NumLayers(); L++) { time+=layers_[L]->Propagate(handle, stream_fw, stream_bw, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_ ,propagate_buf_ + (L%2)*seqLength*2*cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n); hipDeviceSynchronize(); } time+=Af_l_->Propagate(handle, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_, out, seqLength); //printf("timing precise = %f ms", time); //layers_[L]->Propagate(propagate_buf_[(L-1)%2], out); //not commented // release the buffers we don't need anymore } int Net::OutputDim() { return layers_.back()->OutputDim(); } int Net::InputDim() { return layers_.front()->InputDim(); } Layer* Net::GetLayer(int layer) { return layers_[layer]; } void Net::SetLayer(int c, Layer *layer) { delete layers_[c]; layers_[c] = layer; } void Net::AppendLayer(Layer* dynamically_allocated_layer) { // append, layers_.push_back(dynamically_allocated_layer); } void Net::AppendAffineTransformLayer(AffineTransform *dynamically_allocated_AffineTransform){ Af_l_=dynamically_allocated_AffineTransform; } void Net::Destroy() { for(int i=0; i<NumLayers(); i++) { delete layers_[i]; } delete Af_l_; layers_.resize(0); }
580f2792dee74875e4ae0462798971cf42da288b.cu
#include "net.h" //#include "layer.h" #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } static void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } static void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } static void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } Net::Net(int nLayers, int cell_dim) { // copy the layers //input_buf_dim_=input_buf_dim; cell_dim_=cell_dim; //layers_=NULL; //input_buf_ = NULL; ///< buffers for forward pass propagate_buf_ = NULL; ///< buffers for forward pass tmp_h_fw_n = NULL; tmp_i_fw_n = NULL; // back-propagation buffer tmp_h_bw_n = NULL; tmp_i_bw_n = NULL; h_data_n = NULL; c_data_n = NULL; h_data_bw_n = NULL; c_data_bw_n = NULL; /*for(int i=0; i<nLayers; i++) { Layer* L=NULL; layers_.push_back(L); }*/ // create empty buffers //propagate_buf_.resize(NumLayers()+1); } Net::~Net() { //cudaErrCheck(cudaFree(input_buf_)); cudaErrCheck(cudaFree(propagate_buf_)); //2 buffers cudaErrCheck(cudaFree(h_data_n)); cudaErrCheck(cudaFree(c_data_n)); cudaErrCheck(cudaFree(c_data_bw_n)); cudaErrCheck(cudaFree(h_data_bw_n)); cudaErrCheck(cudaFree(tmp_h_fw_n)); cudaErrCheck(cudaFree(tmp_i_fw_n)); cudaErrCheck(cudaFree(tmp_h_bw_n)); cudaErrCheck(cudaFree(tmp_i_bw_n)); Destroy(); } void Net::Resize(int seqLength){ //cudaErrCheck(cudaFree(input_buf_)); cudaErrCheck(cudaFree(propagate_buf_)); //2 buffers cudaErrCheck(cudaFree(h_data_n)); cudaErrCheck(cudaFree(c_data_n)); cudaErrCheck(cudaFree(c_data_bw_n)); cudaErrCheck(cudaFree(h_data_bw_n)); cudaErrCheck(cudaFree(tmp_h_fw_n)); cudaErrCheck(cudaFree(tmp_i_fw_n)); cudaErrCheck(cudaFree(tmp_h_bw_n)); cudaErrCheck(cudaFree(tmp_i_bw_n)); //cudaErrCheck(cudaMalloc((void**)&input_buf_, seqLength * input_buf_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&propagate_buf_,2* seqLength * 2 * cell_dim_ * sizeof(float))); //2 buffers cudaErrCheck(cudaMalloc((void**)&h_data_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&c_data_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&c_data_bw_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&h_data_bw_n, (seqLength+1) * cell_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&tmp_h_fw_n, 4 * cell_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&tmp_i_fw_n, (seqLength+1) * 8 * cell_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&tmp_h_bw_n, 4 * cell_dim_ * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&tmp_i_bw_n, (seqLength+1) * 4 * cell_dim_ * sizeof(float))); cudaMemset(h_data_n, 0, cell_dim_*sizeof(float)); cudaMemset(c_data_n, 0, cell_dim_*sizeof(float)); cudaMemset(h_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float)); cudaMemset(c_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float)); } void Net::Feedforward(cublasHandle_t handle, float* in, float* out, int seqLength) { // we need at least 2 input buffers // propagate by using exactly 2 auxiliary buffers int L = 0; float time=0.f; cudaStream_t stream_fw, stream_bw; cudaStreamCreate(&stream_fw); cudaStreamCreate(&stream_bw); time+=layers_[L]->Propagate(handle,stream_fw, stream_bw, in, propagate_buf_ + (L%2)*seqLength * 2 * cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n); cudaDeviceSynchronize(); for(L++; L<NumLayers(); L++) { time+=layers_[L]->Propagate(handle, stream_fw, stream_bw, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_ ,propagate_buf_ + (L%2)*seqLength*2*cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n); cudaDeviceSynchronize(); } time+=Af_l_->Propagate(handle, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_, out, seqLength); //printf("timing precise = %f ms", time); //layers_[L]->Propagate(propagate_buf_[(L-1)%2], out); //not commented // release the buffers we don't need anymore } int Net::OutputDim() { return layers_.back()->OutputDim(); } int Net::InputDim() { return layers_.front()->InputDim(); } Layer* Net::GetLayer(int layer) { return layers_[layer]; } void Net::SetLayer(int c, Layer *layer) { delete layers_[c]; layers_[c] = layer; } void Net::AppendLayer(Layer* dynamically_allocated_layer) { // append, layers_.push_back(dynamically_allocated_layer); } void Net::AppendAffineTransformLayer(AffineTransform *dynamically_allocated_AffineTransform){ Af_l_=dynamically_allocated_AffineTransform; } void Net::Destroy() { for(int i=0; i<NumLayers(); i++) { delete layers_[i]; } delete Af_l_; layers_.resize(0); }
98efe5fa6f50184499f14ec9d7c77c70d7309ab9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "add_thread.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( add_thread), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( add_thread), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( add_thread), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
98efe5fa6f50184499f14ec9d7c77c70d7309ab9.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "add_thread.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); add_thread<<<gridBlock,threadBlock>>>(a,b,c); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { add_thread<<<gridBlock,threadBlock>>>(a,b,c); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { add_thread<<<gridBlock,threadBlock>>>(a,b,c); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b3506964baeeb7f3e9436e51e6c7e8c54a7d94a7.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include "hipcub/hipcub.hpp" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to HIPGuardMasqueradingAsCUDA CUDAContext context(-1); // take current device CUDA_ENFORCE(hipMemcpyAsync( dst, src, nbytes, hipMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is guarded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } C10_LOG_API_USAGE_ONCE("caffe2.init.cuda"); // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { HIPGuardMasqueradingAsCUDA g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = ::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for hipDeviceEnablePeerAccess that should always be // zero currently. // It is ok if peer access is already enabled... hipError_t err = hipDeviceEnablePeerAccess(j, 0); if ((err != hipErrorPeerAccessAlreadyEnabled) && (err != hipSuccess)) { CAFFE_THROW(hipGetErrorString(err)); } hipGetLastError(); // reset cuda error code } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new hipcub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } /** * An allocator that does the CPU memory allocation with pinned memory. * * This is needed because if we want to do any asynchronous cuda memcpy, * the underlying CPU memory also needs to be allocated into pinned memory * space. As a result, whenever Caffe2 is built with GPU and there is * GPU present during runtime, at global initialization time we will set * the CPU memory allocator to allocate pinned memory. * * NB: This behavior is probably too aggressive. We should consider asking users * to do on-demand memory pinning (like exposed in PyTorch APIs) instead. */ struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator { PinnedCPUAllocator() { baseAllocator_ = GetDefaultCPUAllocator(); } ~PinnedCPUAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { if (nbytes == 0) { // replicate c10::alloc_cpu behavior - return nullptr return {nullptr, nullptr, &Delete, at::Device(CPU)}; } void* data; at::DataPtr data_ptr; std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter(); data_ptr = baseAllocator_->allocate(nbytes); data = data_ptr.get(); CAFFE_ENFORCE(data); CUDA_ENFORCE(hipHostRegister(data, nbytes, hipHostRegisterDefault)); CAFFE_ENFORCE( data_ptr.compare_exchange_deleter(expected_deleter, &Delete), "Failed to swap deleter (already swapped?)"); } else { CUDA_ENFORCE(hipHostMalloc(&data, nbytes)); profiledCPUMemoryReporter().New(data, nbytes); data_ptr = {data, data, &Delete, at::Device(CPU)}; } memset(data, 0, nbytes); return data_ptr; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* data) { if (!data) { return; } // Caffe2 uses a lazy way to figure out if one is actually going to use GPUs // or not. If a CUDAContext::New() call is made, inside the CUDAContext // function we will switch the cpu side allocator to a PinnedCPUAllocator. // But, if one calls CPUContext::New() before any cuda allocations, // PinnedCPUAllocator can still delete the corresponding memory. std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { CUDA_ENFORCE(hipHostUnregister(data)); GetDefaultCPUAllocator()->raw_deleter()(data); } else { hipError_t err = hipHostFree(data); profiledCPUMemoryReporter().Delete(data); if (err == hipErrorInvalidValue) { free(data); // Calling hipGetLastError will reset the cuda error. hipError_t _err = hipGetLastError(); } else { // For all other errors, still do a cuda check. CUDA_ENFORCE(err); } } } at::Allocator* baseAllocator_; }; static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "hipHostMalloc. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; // If CUDA is enabled, using CPU allocators other than PinnedCPUAllocator // will cause memory corruptions. Therefore, we need to set the priority // to highest to avoid being overwritten. SetCPUAllocator( &g_pinned_cpu_alloc, std::numeric_limits<uint8_t>::max() /* priority */); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } CUDAContext::~CUDAContext() { try { if (curand_generator_) { CURAND_CHECK(hiprandDestroyGenerator(curand_generator_)); } // CUDAContext is used in 2 cases now: // - long-lived instance inside OperatorBase in which case what happens in // destructor doesn't really matter // - short-lived on-the-fly instances that are utilized as HIPGuardMasqueradingAsCUDA - in // this case there's only one stream id (passed to SwitchToDevice) and // it's preferrable to synchronize in the destructor FinishDeviceComputation(); } catch (const std::exception& e) { LOG(ERROR) << "Encountered following in " << __FUNCTION__ << ": " << e.what(); } } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = ::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: if (nbytes != 0) { CUDA_ENFORCE(hipMalloc(&ptr, nbytes)); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: if (nbytes != 0) { CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); } g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. hip::HIPStreamGuardMasqueradingAsCUDA g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = hip::HIPCachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple hipFree. hipError_t error = hipFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != hipSuccess && error != hipErrorDeinitialized) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << hipGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { hip::HIPCachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
b3506964baeeb7f3e9436e51e6c7e8c54a7d94a7.cu
#include <algorithm> #include <atomic> #include <cstdlib> #include <string> #include <unordered_map> #include <c10/cuda/CUDACachingAllocator.h> #include "cub/util_allocator.cuh" // Needed to be included first to check the CAFFE2_USE_CUDNN macros. #include "caffe2/core/macros.h" #include "caffe2/core/asan.h" #include "caffe2/core/blob_stats.h" #ifdef CAFFE2_USE_CUDNN #include "caffe2/core/common_cudnn.h" #endif // CAFFE2_USE_CUDNN #include "caffe2/core/context_gpu.h" #include "caffe2/core/init.h" #include "caffe2/core/logging.h" #include "caffe2/core/tensor.h" #include "caffe2/utils/string_utils.h" C10_DEFINE_string( caffe2_cuda_memory_pool, "", "Sets the memory pool used by caffe2. Possible values are " "none, cnmem, thc and cub."); // For description of CUB caching allocator configuration, see // https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html C10_DEFINE_int( caffe2_cub_bin_growth, 8, "If using cub as the memory allocator, sets the growth of bins " "used by the cub pool."); C10_DEFINE_int( caffe2_cub_min_bin, 3, "If using cub as the memory allocator, sets the min number of " "bins."); C10_DEFINE_int( caffe2_cub_max_bin, 10, "If using cub as the memory allocator, sets the max number of " "bins."); C10_DEFINE_int( caffe2_cub_max_managed_mb, 10 * 1024, "If using cub as the memory allocators, sets the maximum amount " "of memory managed in gigabytes"); C10_DEFINE_bool( caffe2_cub_print_allocation_events, false, "If true CachingDeviceAllocator will print allocation and deallocation " "events to stdout."); C10_DEFINE_bool( caffe2_gpu_memory_tracking, false, "If set, logs changes in GPU memory allocations"); C10_DEFINE_int( caffe2_gpu_memory_report_interval_mb, 128, "The threshold in MB on how frequently to report memory changes"); namespace at { REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext); } // namespace at namespace caffe2 { // Generic implementation - CUDA will handle the right function to call for us void CUDAContext::CopyBytesAsync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // TODO: verify that the CUDA handles copy from device to device correctly // even without SetDevice() // TODO: verify whether source or dest device should be a priority in picking // the stream // NB: right now the cross-device copy logic is invoked only in the contexts // when surrounding code explicitly manages data dependencies and sets up // events, so it's fine. In order to make it a standalone function proper // synchronization between stream is required int gpu_id = 0; if (dst_device.type() == DeviceType::CUDA) { gpu_id = dst_device.index(); } else if (src_device.type() == DeviceType::CUDA) { gpu_id = src_device.index(); } else { LOG(FATAL) << "shouldn't be called with non-cuda device"; } CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, CUDAContext::getCudaObjects().GetStream(gpu_id))); } void CUDAContext::CopyBytesSync( size_t nbytes, const void* src, Device src_device, void* dst, Device dst_device) { // This emulates Caffe2 original behavior where sync copy doesn't change the // device. It's probably better for clarity to switch to the target device // explicitly here, but in the worst case CUDA would sync for us. // TODO: change it to CUDAGuard CUDAContext context(-1); // take current device CUDA_ENFORCE(cudaMemcpyAsync( dst, src, nbytes, cudaMemcpyDefault, context.cuda_stream())); // destructor of context synchronizes } // For the CPU context, we also allow a (probably expensive) function // to copy the data from a cuda context. Inside the function, we create // a temporary CUDAContext object to carry out the copy. From the caller's // side, these functions are synchronous with respect to the host, similar // to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call. template <> inline void CPUContext::CopyBytes<CUDAContext, CPUContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(src)); context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst); } template <> inline void CPUContext::CopyBytes<CPUContext, CUDAContext>( size_t nbytes, const void* src, void* dst) { CUDAContext context(GetGPUIDForPointer(dst)); context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst); } } // namespace caffe2 namespace caffe2 { ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() { static thread_local ThreadLocalCUDAObjects cuda_objects_; return cuda_objects_; } // TODO(jiayq): these variables shouldn't be currently accessed during static // initialization. We should consider moving them to a Mayer's singleton to // be totally safe against SIOF. // Static global variables for setting up the memory pool. CudaMemoryPoolType g_cuda_memory_pool_type; std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator; // an unordered map that holds the map from the cuda memory pointer to the // device id that it is allocated from. This is used in the cuda memory pool // cases, where we need the device id to carry out the deletion. // Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but // that is usually quite slow. We might want to benchmark the speed difference // though. // Note(jiayq): another alternate approach is to augment the Tensor class that // would allow one to record the device id. However, this does not address any // non-tensor allocation and deallocation. // Ideally, a memory pool should already have the device id information, as // long as we are using UVA (as of CUDA 5 and later) so the addresses are // unique. static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation; // Data structures for optional memory tracking. Access to these structures // is guarded by the CUDAContext::mutex. static std::unordered_map<void*, long> g_size_map; static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0); static long g_total_mem = 0; static long g_last_rep = 0; CudaMemoryPoolType GetCudaMemoryPoolType() { return g_cuda_memory_pool_type; } /////////////////////////////////////////////////////////////////////////////// // A wrapper to allow us to lazily initialize all cuda environments that Caffe // uses. This gets done the first time a caffe2::CUDAContext::New() gets called // which is probably the decisive indication that this caffe2 run is going to // use GPUs. We avoid cuda initialization with core/init.h functionalities so // that we have minimal resource impact in case we will need to run multiple // caffe2 instances on a GPU machine. /////////////////////////////////////////////////////////////////////////////// static void Caffe2InitializeCuda() { // If the current run does not have any cuda devices, do nothing. if (!HasCudaGPU()) { VLOG(1) << "No cuda gpu present. Skipping."; return; } C10_LOG_API_USAGE_ONCE("caffe2.init.cuda"); // Check if the number of GPUs matches the expected compile-time max number // of GPUs. CAFFE_ENFORCE_LE( NumCudaDevices(), C10_COMPILE_TIME_MAX_GPUS, "Number of CUDA devices on the machine is larger than the compiled " "max number of gpus expected (", C10_COMPILE_TIME_MAX_GPUS, "). Increase that and recompile."); for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) { CUDAGuard g(i); // Enable peer access. const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE; const int peer_end = std::min( NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE); VLOG(1) << "Enabling peer access within group #" << peer_group << ", from gpuid " << peer_start << " to " << peer_end - 1 << ", for gpuid " << i << "."; for (int j = peer_start; j < peer_end; ++j) { if (i == j) continue; int can_access; CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j)); if (can_access) { VLOG(1) << "Enabling peer access from " << i << " to " << j; // Note: just for future reference, the 0 here is not a gpu id, it is // a reserved flag for cudaDeviceEnablePeerAccess that should always be // zero currently. // It is ok if peer access is already enabled... cudaError_t err = cudaDeviceEnablePeerAccess(j, 0); if ((err != cudaErrorPeerAccessAlreadyEnabled) && (err != cudaSuccess)) { CAFFE_THROW(cudaGetErrorString(err)); } cudaGetLastError(); // reset cuda error code } } } #ifdef CAFFE2_USE_CUDNN // Check the versions of cuDNN that were compiled and linked with are compatible CheckCuDNNVersions(); #endif // CAFFE2_USE_CUDNN } static void SetUpCub() { VLOG(1) << "Setting up cub memory pool."; // Sets up the cub memory pool try { g_cub_allocator.reset(new cub::CachingDeviceAllocator( FLAGS_caffe2_cub_bin_growth, FLAGS_caffe2_cub_min_bin, FLAGS_caffe2_cub_max_bin, size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L, false, FLAGS_caffe2_cub_print_allocation_events)); } catch (...) { CAFFE_THROW("Some error happened at cub initialization."); } VLOG(1) << "Done setting up cub memory pool."; } static void Caffe2SetCUDAMemoryPool() { if (FLAGS_caffe2_cuda_memory_pool == "" || FLAGS_caffe2_cuda_memory_pool == "none") { g_cuda_memory_pool_type = CudaMemoryPoolType::NONE; } else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") { CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. " "This error message may go away in the future."); } else if (FLAGS_caffe2_cuda_memory_pool == "cub") { // Sets up cub. g_cuda_memory_pool_type = CudaMemoryPoolType::CUB; SetUpCub(); } else if (FLAGS_caffe2_cuda_memory_pool == "thc") { g_cuda_memory_pool_type = CudaMemoryPoolType::THC; } else { CAFFE_THROW( "Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool); } } /** * An allocator that does the CPU memory allocation with pinned memory. * * This is needed because if we want to do any asynchronous cuda memcpy, * the underlying CPU memory also needs to be allocated into pinned memory * space. As a result, whenever Caffe2 is built with GPU and there is * GPU present during runtime, at global initialization time we will set * the CPU memory allocator to allocate pinned memory. * * NB: This behavior is probably too aggressive. We should consider asking users * to do on-demand memory pinning (like exposed in PyTorch APIs) instead. */ struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator { PinnedCPUAllocator() { baseAllocator_ = GetDefaultCPUAllocator(); } ~PinnedCPUAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { if (nbytes == 0) { // replicate c10::alloc_cpu behavior - return nullptr return {nullptr, nullptr, &Delete, at::Device(CPU)}; } void* data; at::DataPtr data_ptr; std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter(); data_ptr = baseAllocator_->allocate(nbytes); data = data_ptr.get(); CAFFE_ENFORCE(data); CUDA_ENFORCE(cudaHostRegister(data, nbytes, cudaHostRegisterDefault)); CAFFE_ENFORCE( data_ptr.compare_exchange_deleter(expected_deleter, &Delete), "Failed to swap deleter (already swapped?)"); } else { CUDA_ENFORCE(cudaMallocHost(&data, nbytes)); profiledCPUMemoryReporter().New(data, nbytes); data_ptr = {data, data, &Delete, at::Device(CPU)}; } memset(data, 0, nbytes); return data_ptr; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* data) { if (!data) { return; } // Caffe2 uses a lazy way to figure out if one is actually going to use GPUs // or not. If a CUDAContext::New() call is made, inside the CUDAContext // function we will switch the cpu side allocator to a PinnedCPUAllocator. // But, if one calls CPUContext::New() before any cuda allocations, // PinnedCPUAllocator can still delete the corresponding memory. std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (IsNUMAEnabled()) { CUDA_ENFORCE(cudaHostUnregister(data)); GetDefaultCPUAllocator()->raw_deleter()(data); } else { cudaError_t err = cudaFreeHost(data); profiledCPUMemoryReporter().Delete(data); if (err == cudaErrorInvalidValue) { free(data); // Calling cudaGetLastError will reset the cuda error. cudaError_t _err = cudaGetLastError(); } else { // For all other errors, still do a cuda check. CUDA_ENFORCE(err); } } } at::Allocator* baseAllocator_; }; static PinnedCPUAllocator g_pinned_cpu_alloc; // An initialization function that sets the CPU side to use pinned cpu // allocator. void Caffe2UsePinnedCPUAllocator() { #if CAFFE2_ASAN_ENABLED // Note(jiayq): for more details, see // https://github.com/google/sanitizers/issues/629 LOG(WARNING) << "There are known issues between address sanitizer and " "cudaMallocHost. As a result, caffe2 will not enable pinned " "memory allocation in asan mode. If you are expecting any " "behavior that depends on asan, be advised that it is not " "turned on."; #else if (!HasCudaGPU()) { VLOG(1) << "No GPU present. I won't use pinned allocator then."; return; } VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator."; // If CUDA is enabled, using CPU allocators other than PinnedCPUAllocator // will cause memory corruptions. Therefore, we need to set the priority // to highest to avoid being overwritten. SetCPUAllocator( &g_pinned_cpu_alloc, std::numeric_limits<uint8_t>::max() /* priority */); #endif } // Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to // detect the first hint that this Caffe2 run is going to use GPU: either // CUDAContext is initialized or CUDAContext::New is called. It then runs // all the related cuda initialization functions. namespace { struct Caffe2CudaInitializerHelper { Caffe2CudaInitializerHelper() { // We cannot use bool because nvcc changes bool to __nv_bool which does // not have a std::atomic instantiation. static std::atomic<char> first_call(1); if (first_call.fetch_and((char)0)) { Caffe2InitializeCuda(); Caffe2SetCUDAMemoryPool(); Caffe2UsePinnedCPUAllocator(); } } }; } // namespace /** * A utility function to rectify the gpu id. If the context specifies the * gpu id to be -1, it means that we will just use the current gpu id when * the function is being called. */ static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) { return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id; } CUDAContext::CUDAContext(DeviceIndex gpu_id) : gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; } CUDAContext::CUDAContext(const DeviceOption& option) : gpu_id_( option.has_device_id() ? RectifyGPUID(option.device_id()) : CaffeCudaGetDevice()), random_seed_( option.has_random_seed() ? option.random_seed() : RandomNumberSeed()) { static Caffe2CudaInitializerHelper g_cuda_initializer_; DCHECK_EQ(option.device_type(), PROTO_CUDA); } CUDAContext::~CUDAContext() { try { if (curand_generator_) { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); } // CUDAContext is used in 2 cases now: // - long-lived instance inside OperatorBase in which case what happens in // destructor doesn't really matter // - short-lived on-the-fly instances that are utilized as CUDAGuard - in // this case there's only one stream id (passed to SwitchToDevice) and // it's preferrable to synchronize in the destructor FinishDeviceComputation(); } catch (const std::exception& e) { LOG(ERROR) << "Encountered following in " << __FUNCTION__ << ": " << e.what(); } } // shared mutex to lock out alloc / free during NCCL launches std::mutex& CUDAContext::mutex() { static std::mutex m; return m; } std::vector<long> CUDAContext::TotalMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_total_by_gpu_map; } std::vector<long> CUDAContext::MaxMemoryByGpu() { std::lock_guard<std::mutex> lock(CUDAContext::mutex()); CAFFE_ENFORCE( FLAGS_caffe2_gpu_memory_tracking, "Pass --caffe2_gpu_memory_tracking to enable memory stats"); return g_max_by_gpu_map; } namespace { void TrackMemoryAlloc(size_t nbytes) { int this_gpu = CaffeCudaGetDevice(); g_total_by_gpu_map[this_gpu] += nbytes; g_max_by_gpu_map[this_gpu] = std::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]); g_total_mem += nbytes; if (g_total_mem - g_last_rep > FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) { for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) { long t = g_total_by_gpu_map[gpu]; long max_t = g_max_by_gpu_map[gpu]; if (max_t > 0) { if (max_t != t) { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB" << " (max: " << max_t / 1024 / 1024 << " MB)"; } else { VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"; } } } VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB"; g_last_rep = g_total_mem; } } } struct DefaultCUDAAllocator final : public at::Allocator { DefaultCUDAAllocator() {} ~DefaultCUDAAllocator() override {} at::DataPtr allocate(size_t nbytes) const override { // Lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); // A one-time caffe2 cuda initializer. static Caffe2CudaInitializerHelper g_cuda_initializer_; void* ptr = nullptr; if (FLAGS_caffe2_gpu_memory_tracking) { TrackMemoryAlloc(nbytes); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: if (nbytes != 0) { CUDA_ENFORCE(cudaMalloc(&ptr, nbytes)); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::CUB: if (nbytes != 0) { CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes)); } g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); VLOG(2) << "CUB allocating pointer " << ptr << " on device " << CaffeCudaGetDevice(); if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; case CudaMemoryPoolType::THC: { // The reason we have this stream guard here is to preserve // the historical behavior of the 'thc' allocator in Caffe2, // which is to put all allocations on the same (default) // stream. This behavior is morally wrong (since passing // allocations between streams allows for the possibility // of you handing out some memory that an old stream // is still working on), but it doesn't seem to cause issues // in Caffe2 today. Our hypothesis for why this is the case // is that Caffe2 doesn't really do very many allocations // on the fly; instead they allocate once and then reuse // the allocations for the whole program. In this case, // the hazard is avoided. // // We intend to remove this stream guard, but the benefit // to putting all allocations on the same stream is it // reduces per-stream fragmentation, and this helps // some models that are currently running with the thc // allocator fit in memory. We will need to find some // way of resolving this problem. cuda::CUDAStreamGuard g( Stream( Stream::DEFAULT, Device(kCUDA, CaffeCudaGetDevice()) )); ptr = cuda::CUDACachingAllocator::raw_alloc(nbytes); } if (FLAGS_caffe2_gpu_memory_tracking) { g_size_map[ptr] = nbytes; g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice(); } return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())}; } at::DeleterFnPtr raw_deleter() const override { return &Delete; } private: static void Delete(void* ptr) { // lock the mutex std::lock_guard<std::mutex> lock(CUDAContext::mutex()); if (FLAGS_caffe2_gpu_memory_tracking) { auto sz_it = g_size_map.find(ptr); DCHECK(sz_it != g_size_map.end()); auto aff_it = g_cuda_device_affiliation.find(ptr); DCHECK(aff_it != g_cuda_device_affiliation.end()); g_total_mem -= sz_it->second; g_total_by_gpu_map[aff_it->second] -= sz_it->second; g_size_map.erase(sz_it); } switch (g_cuda_memory_pool_type) { case CudaMemoryPoolType::NONE: { // If memory pool is not set up, use simple cudaFree. cudaError_t error = cudaFree(ptr); // For some reason, in Python runtime we sometimes delete a data pointer // after the cuda runtime exits - this is odd but is probably caused by // a static workspace that pycaffe2 uses, and the destruction got // entangled in some race condition. Anyway, since cuda runtime is // exiting anyway, we will not need to worry about memory leak, so we // basically ignore it. This is definitely not ideal but works for now. if (error != cudaSuccess && error != cudaErrorCudartUnloading) { LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " << cudaGetErrorString(error); } if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } case CudaMemoryPoolType::CUB: { auto it = g_cuda_device_affiliation.find(ptr); DCHECK(it != g_cuda_device_affiliation.end()); VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second; CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr)); g_cuda_device_affiliation.erase(it); break; } case CudaMemoryPoolType::THC: { cuda::CUDACachingAllocator::raw_delete(ptr); if (FLAGS_caffe2_gpu_memory_tracking) { g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr)); } break; } } } }; static DefaultCUDAAllocator g_cuda_alloc; REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc); } // namespace caffe2 namespace at { REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CUDA, DeviceType::CPU, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); REGISTER_COPY_BYTES_FUNCTION( DeviceType::CPU, DeviceType::CUDA, caffe2::CUDAContext::CopyBytesSync, caffe2::CUDAContext::CopyBytesAsync); } // namespace at
2ee27b8358ac70bdf6284beca1bf095c82b2e3f8.hip
// !!! This is a file automatically generated by hipify!!! /* * fastHog.cpp * * Created on: May 14, 2009 * Author: viprad */ #include <stdio.h> #include <stdlib.h> #include <sys/mman.h> #include "HOGEngine.h" #include "HOGEngineDevice.h" #include "HOGImage.h" #include "Others/persondetectorwt.tcc" extern "C" { #include "../../gpusync.h" } HOGImage image; hipStream_t stream; char file_name[] = "../Samples/Copy/FastHOG/Files/Images/testImage.bmp"; void init(int sync_level) { switch (sync_level) { case 0: hipSetDeviceFlags(hipDeviceScheduleSpin); break; case 1: hipSetDeviceFlags(hipDeviceScheduleYield); break; case 2: hipSetDeviceFlags(hipDeviceScheduleBlockingSync); break; default: printf("Unknown sync level: %d\n", sync_level); break; } if (!HOGImageFile(file_name, &image)) { printf("Unable to load image file.\n"); exit(1); } if (hipSetDevice(0) != hipSuccess) { printf("Unable to set cuda device.\n"); exit(1); } if (hipFree(0) != hipSuccess) { printf("Error running hipFree(0).\n"); exit(1); } // Pin code if(!mlockall(MCL_CURRENT | MCL_FUTURE)) { fprintf(stderr, "Failed to lock code pages.\n"); exit(EXIT_FAILURE); } if (hipStreamCreate(&stream) != hipSuccess) { printf("Unable to create cuda stream.\n"); exit(1); } InitializeHOG(image.width, image.height, PERSON_LINEAR_BIAS, PERSON_WEIGHT_VEC, PERSON_WEIGHT_VEC_LENGTH); } void mallocCPU(int numElements) { HostAllocHOGEngineDeviceMemory(); } void mallocGPU(int numElements) { DeviceAllocHOGEngineDeviceMemory(); } void copyin(int numElements) { CopyInHOGEngineDevice(); } void exec(int numElements) { // There are still memcpys to the device in HOGScale and HOGPadding--they // may require more work to get rid of because they seem to rely on variables // determined during the execution phase. BeginProcess(&image, -1, -1, -1, -1, -1.0f, -1.0f); } void copyout() { EndProcess(); } void freeGPU() { DeviceFreeHOGEngineDeviceMemory(); } void freeCPU() { HostFreeHOGEngineDeviceMemory(); } void finish() { FinalizeHOG(); hipStreamSynchronize(stream); hipStreamDestroy(stream); DestroyHOGImage(&image); if (hipDeviceReset() != hipSuccess) { printf("Failed to reset the device.\n"); exit(1); } } /* int main(void) { init(0); mallocCPU(0); mallocGPU(0); copyin(0); exec(0); copyout(); freeGPU(); freeCPU(); finish(); return 0; } */
2ee27b8358ac70bdf6284beca1bf095c82b2e3f8.cu
/* * fastHog.cpp * * Created on: May 14, 2009 * Author: viprad */ #include <stdio.h> #include <stdlib.h> #include <sys/mman.h> #include "HOGEngine.h" #include "HOGEngineDevice.h" #include "HOGImage.h" #include "Others/persondetectorwt.tcc" extern "C" { #include "../../gpusync.h" } HOGImage image; cudaStream_t stream; char file_name[] = "../Samples/Copy/FastHOG/Files/Images/testImage.bmp"; void init(int sync_level) { switch (sync_level) { case 0: cudaSetDeviceFlags(cudaDeviceScheduleSpin); break; case 1: cudaSetDeviceFlags(cudaDeviceScheduleYield); break; case 2: cudaSetDeviceFlags(cudaDeviceBlockingSync); break; default: printf("Unknown sync level: %d\n", sync_level); break; } if (!HOGImageFile(file_name, &image)) { printf("Unable to load image file.\n"); exit(1); } if (cudaSetDevice(0) != cudaSuccess) { printf("Unable to set cuda device.\n"); exit(1); } if (cudaFree(0) != cudaSuccess) { printf("Error running cudaFree(0).\n"); exit(1); } // Pin code if(!mlockall(MCL_CURRENT | MCL_FUTURE)) { fprintf(stderr, "Failed to lock code pages.\n"); exit(EXIT_FAILURE); } if (cudaStreamCreate(&stream) != cudaSuccess) { printf("Unable to create cuda stream.\n"); exit(1); } InitializeHOG(image.width, image.height, PERSON_LINEAR_BIAS, PERSON_WEIGHT_VEC, PERSON_WEIGHT_VEC_LENGTH); } void mallocCPU(int numElements) { HostAllocHOGEngineDeviceMemory(); } void mallocGPU(int numElements) { DeviceAllocHOGEngineDeviceMemory(); } void copyin(int numElements) { CopyInHOGEngineDevice(); } void exec(int numElements) { // There are still memcpys to the device in HOGScale and HOGPadding--they // may require more work to get rid of because they seem to rely on variables // determined during the execution phase. BeginProcess(&image, -1, -1, -1, -1, -1.0f, -1.0f); } void copyout() { EndProcess(); } void freeGPU() { DeviceFreeHOGEngineDeviceMemory(); } void freeCPU() { HostFreeHOGEngineDeviceMemory(); } void finish() { FinalizeHOG(); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); DestroyHOGImage(&image); if (cudaDeviceReset() != cudaSuccess) { printf("Failed to reset the device.\n"); exit(1); } } /* int main(void) { init(0); mallocCPU(0); mallocGPU(0); copyin(0); exec(0); copyout(); freeGPU(); freeCPU(); finish(); return 0; } */
3286b2e69fa9c898af47d3aff389bc15fe0b2336.hip
// !!! This is a file automatically generated by hipify!!! #include "Header.h" #include "hip/hip_runtime.h" __device__ char compareCharacters(char c1, char c2); __global__ void calculateSimilarityChar(char* seq1, char* seq2, char* signs, int length, int offset, int mutationIndex); __constant__ char conservativeGroups[9][5] = {"NDEQ", "NEQK", "STA", "MILV", "QHRK", "NHQK", "FYW", "HY", "MILF"}; __constant__ char semiConservativeGroups[11][7] = {"SAG", "ATV", "CSA", "SGND", "STPA", "STNK", "NEQHRK", "NDEQHK", "SNDEQK", "HFY", "FVLIM"}; __global__ void calculateSimilarityChar(char* seq1, char* seq2, char* signs, int length, int offset, int mutationIndex) { // Fill the signs array with a similarity sequence as this: ::*..*.**:: * . :: . int i = blockDim.x * blockIdx.x + threadIdx.x; char c1, c2; // compare the characters and write to signs in the corresponding positions accoarding to the thread ID if (i < length) { c1 = seq1[i + offset]; c2 = seq2[i]; if(i == mutationIndex) { *(signs + i) = ' '; return; } else if(i > mutationIndex) c2 = seq2[i - 1]; *(signs + i ) = compareCharacters(c1, c2); } } __device__ char compareCharacters(char c1, char c2) { // Given two Characters, compute the result Character with regard to the conservative and semi-conservative groups. char* s; int i, j, containsC1 = 0, containsC2 = 0; char c; if(c1 == c2) return '*'; for(i=0; i<9 ;i++) { s = conservativeGroups[i]; containsC1 = 0; containsC2 = 0; for (j=0; j<4; j++) { c = s[j]; if(c == '\0') break; if(c == c1) { containsC1 = 1; if(containsC2) return ':'; } if(c == c2) { containsC2 = 1; if(containsC1) return ':'; } } } for(i=0; i<11 ;i++) { s = semiConservativeGroups[i]; containsC1 = 0; containsC2 = 0; for (j=0; j<6; j++) { c = s[j]; if(c == '\0') break; if(c == c1) { containsC1 = 1; if(containsC2) return '.'; } if(c == c2) { containsC2 = 1; if(containsC1) return '.'; } } } return ' '; } void allocateCudaMemory(char** seq1, char** seq2, char** signs, int msLength, int sLength) { // Given String array pointers, allocate them into the CUDA memory. // Error code to check return values for CUDA calls hipError_t err1 = hipSuccess; hipError_t err2 = hipSuccess; hipError_t err3 = hipSuccess; size_t size1 = msLength * sizeof(char); size_t size2 = sLength * sizeof(char); // Allocate memory on GPU to copy the data from the host err1 = hipMalloc(seq1, size1); err2 = hipMalloc(seq2, size2); err3 = hipMalloc(signs, size2); if (err1 != hipSuccess) { fprintf(stderr, "1Failed to allocate device memory - %s\n", hipGetErrorString(err1)); exit(EXIT_FAILURE); } if (err2 != hipSuccess) { fprintf(stderr, "2Failed to allocate device memory - %s\n", hipGetErrorString(err2)); exit(EXIT_FAILURE); } if (err3 != hipSuccess) { fprintf(stderr, "3Failed to allocate device memory - %s\n", hipGetErrorString(err3)); exit(EXIT_FAILURE); } } void copyInformationToCuda(char* seq1, char* seq2, MainSequence* ms, Sequence* s) { // Given empty String pointers after alocation inside CUDA, copy the original information to them for further computation. // Error code to check return values for CUDA calls hipError_t err1 = hipSuccess; hipError_t err2 = hipSuccess; size_t size1 = ms->length * sizeof(char); size_t size2 = s->length * sizeof(char); // Copy data from host to the GPU memory err1 = hipMemcpy(seq1, ms->letters, size1, hipMemcpyHostToDevice); err2 = hipMemcpy(seq2, s->letters, size2, hipMemcpyHostToDevice); if (err1 != hipSuccess || err2 != hipSuccess) { fprintf(stderr, "4Failed to copy data from host to device - %s\n", hipGetErrorString(err1)); exit(EXIT_FAILURE); } if (err2 != hipSuccess) { fprintf(stderr, "5Failed to copy data from host to device - %s\n", hipGetErrorString(err2)); exit(EXIT_FAILURE); } } void freeCudaMemory(char* seq1, char* seq2, char* signs) { // Free allocated memory on GPU if (hipFree(signs) != hipSuccess || hipFree(seq1) != hipSuccess || hipFree(seq2) != hipSuccess) { fprintf(stderr, "8Failed to free device data"); exit(EXIT_FAILURE); } } int GPU_Create_Signs(Sequence* s, int n, char* originalSigns, int mutationIndex, char* seq1, char* seq2, char* signs) { // Given the Alocated and copied String Sequences, use the CUDA kernel to compute the target similarity String according to the given mutation and offset n. // Error code to check return values for CUDA calls hipError_t err = hipSuccess; size_t size = s->length * sizeof(char); // Launch the Kernel int threadsPerBlock = 256; int blocksPerGrid = (s->length + threadsPerBlock) / threadsPerBlock; // added + 1 for '-' // n is the current offset hipLaunchKernelGGL(( calculateSimilarityChar), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, seq1, seq2, signs, s->length + 1, n, mutationIndex); // including the '-' (this is the + 1) err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "6Failed to launch vectorAdd kernel - %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the result from GPU to the host memory. err = hipMemcpy(originalSigns, signs, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "7Failed to copy result array from device to host -%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
3286b2e69fa9c898af47d3aff389bc15fe0b2336.cu
#include "Header.h" #include "cuda_runtime.h" __device__ char compareCharacters(char c1, char c2); __global__ void calculateSimilarityChar(char* seq1, char* seq2, char* signs, int length, int offset, int mutationIndex); __constant__ char conservativeGroups[9][5] = {"NDEQ", "NEQK", "STA", "MILV", "QHRK", "NHQK", "FYW", "HY", "MILF"}; __constant__ char semiConservativeGroups[11][7] = {"SAG", "ATV", "CSA", "SGND", "STPA", "STNK", "NEQHRK", "NDEQHK", "SNDEQK", "HFY", "FVLIM"}; __global__ void calculateSimilarityChar(char* seq1, char* seq2, char* signs, int length, int offset, int mutationIndex) { // Fill the signs array with a similarity sequence as this: ::*..*.**:: * . :: . int i = blockDim.x * blockIdx.x + threadIdx.x; char c1, c2; // compare the characters and write to signs in the corresponding positions accoarding to the thread ID if (i < length) { c1 = seq1[i + offset]; c2 = seq2[i]; if(i == mutationIndex) { *(signs + i) = ' '; return; } else if(i > mutationIndex) c2 = seq2[i - 1]; *(signs + i ) = compareCharacters(c1, c2); } } __device__ char compareCharacters(char c1, char c2) { // Given two Characters, compute the result Character with regard to the conservative and semi-conservative groups. char* s; int i, j, containsC1 = 0, containsC2 = 0; char c; if(c1 == c2) return '*'; for(i=0; i<9 ;i++) { s = conservativeGroups[i]; containsC1 = 0; containsC2 = 0; for (j=0; j<4; j++) { c = s[j]; if(c == '\0') break; if(c == c1) { containsC1 = 1; if(containsC2) return ':'; } if(c == c2) { containsC2 = 1; if(containsC1) return ':'; } } } for(i=0; i<11 ;i++) { s = semiConservativeGroups[i]; containsC1 = 0; containsC2 = 0; for (j=0; j<6; j++) { c = s[j]; if(c == '\0') break; if(c == c1) { containsC1 = 1; if(containsC2) return '.'; } if(c == c2) { containsC2 = 1; if(containsC1) return '.'; } } } return ' '; } void allocateCudaMemory(char** seq1, char** seq2, char** signs, int msLength, int sLength) { // Given String array pointers, allocate them into the CUDA memory. // Error code to check return values for CUDA calls cudaError_t err1 = cudaSuccess; cudaError_t err2 = cudaSuccess; cudaError_t err3 = cudaSuccess; size_t size1 = msLength * sizeof(char); size_t size2 = sLength * sizeof(char); // Allocate memory on GPU to copy the data from the host err1 = cudaMalloc(seq1, size1); err2 = cudaMalloc(seq2, size2); err3 = cudaMalloc(signs, size2); if (err1 != cudaSuccess) { fprintf(stderr, "1Failed to allocate device memory - %s\n", cudaGetErrorString(err1)); exit(EXIT_FAILURE); } if (err2 != cudaSuccess) { fprintf(stderr, "2Failed to allocate device memory - %s\n", cudaGetErrorString(err2)); exit(EXIT_FAILURE); } if (err3 != cudaSuccess) { fprintf(stderr, "3Failed to allocate device memory - %s\n", cudaGetErrorString(err3)); exit(EXIT_FAILURE); } } void copyInformationToCuda(char* seq1, char* seq2, MainSequence* ms, Sequence* s) { // Given empty String pointers after alocation inside CUDA, copy the original information to them for further computation. // Error code to check return values for CUDA calls cudaError_t err1 = cudaSuccess; cudaError_t err2 = cudaSuccess; size_t size1 = ms->length * sizeof(char); size_t size2 = s->length * sizeof(char); // Copy data from host to the GPU memory err1 = cudaMemcpy(seq1, ms->letters, size1, cudaMemcpyHostToDevice); err2 = cudaMemcpy(seq2, s->letters, size2, cudaMemcpyHostToDevice); if (err1 != cudaSuccess || err2 != cudaSuccess) { fprintf(stderr, "4Failed to copy data from host to device - %s\n", cudaGetErrorString(err1)); exit(EXIT_FAILURE); } if (err2 != cudaSuccess) { fprintf(stderr, "5Failed to copy data from host to device - %s\n", cudaGetErrorString(err2)); exit(EXIT_FAILURE); } } void freeCudaMemory(char* seq1, char* seq2, char* signs) { // Free allocated memory on GPU if (cudaFree(signs) != cudaSuccess || cudaFree(seq1) != cudaSuccess || cudaFree(seq2) != cudaSuccess) { fprintf(stderr, "8Failed to free device data"); exit(EXIT_FAILURE); } } int GPU_Create_Signs(Sequence* s, int n, char* originalSigns, int mutationIndex, char* seq1, char* seq2, char* signs) { // Given the Alocated and copied String Sequences, use the CUDA kernel to compute the target similarity String according to the given mutation and offset n. // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; size_t size = s->length * sizeof(char); // Launch the Kernel int threadsPerBlock = 256; int blocksPerGrid = (s->length + threadsPerBlock) / threadsPerBlock; // added + 1 for '-' // n is the current offset calculateSimilarityChar<<<blocksPerGrid, threadsPerBlock>>>(seq1, seq2, signs, s->length + 1, n, mutationIndex); // including the '-' (this is the + 1) err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "6Failed to launch vectorAdd kernel - %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the result from GPU to the host memory. err = cudaMemcpy(originalSigns, signs, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "7Failed to copy result array from device to host -%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
c357840847b5cb0670a0fa9cfe0d29bc0233b12b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define CATCH_CONFIG_MAIN #include <catch.hpp> #include <iostream> #include "../include/ecuda/ecuda.hpp" template<typename T> struct element_t { T x, y; __HOST__ __DEVICE__ element_t() : x(static_cast<T>(66)), y(static_cast<T>(99)) {} __HOST__ __DEVICE__ element_t( const T& x, const T& y ) : x(x), y(y) {} __HOST__ __DEVICE__ inline bool operator==( const element_t<T>& other ) const { return x == other.x and y == other.y; } __HOST__ __DEVICE__ inline bool operator!=( const element_t<T>& other ) const { return !operator==(other); } __HOST__ __DEVICE__ inline bool operator<( const element_t<T>& other ) const { return x == y ? y < other.y : x < other.x; } friend std::ostream& operator<<( std::ostream& out, const element_t<T>& el ) { out << "Element[" << el.x << "," << el.y << "]"; return out; } }; typedef element_t<double> data_type; //const std::size_t N = 100; // 100k bytes const std::size_t R = 100; const std::size_t C = 33; #ifdef __HIPCC__ template<typename T> __global__ void check_matrix_accessors_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,3>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); for( std::size_t i = 0; i < matrix.number_rows(); ++i ) { for( std::size_t j = 0; j < matrix.number_columns(); ++j ) { if( matrix.at(i,j) != T(i,j) ) results[0] = 0; if( matrix[i][j] != T(i,j) ) results[1] = 0; if( matrix(i,j) != T(i,j) ) results[2] = 0; } } } } template<typename T> __global__ void check_matrix_front_and_back_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,2>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 0 ); if( matrix.front() == T(0,0) ) results[0] = 1; if( matrix.back() == T(R-1,C-1) ) results[1] = 1; } } template<typename T> __global__ void check_matrix_begin_and_end_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,1>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); unsigned counter = 0; for( typename ecuda::matrix<T>::const_iterator iter = matrix.begin(); iter != matrix.end(); ++iter, ++counter ) { if( *iter != T(counter/matrix.number_columns(),counter%matrix.number_columns()) ) results.front() = 0; } } } template<typename T> __global__ void check_matrix_rbegin_and_rend_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,1>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); unsigned counter = matrix.size()-1; for( typename ecuda::matrix<T>::const_reverse_iterator iter = matrix.rbegin(); iter != matrix.rend(); ++iter, --counter ) { if( *iter != T(counter/matrix.number_columns(),counter%matrix.number_columns()) ) results.front() = 0; } } } template<typename T> __global__ void check_matrix_lexicographical_comparison_on_device( const typename ecuda::matrix<T>::kernel_argument matrix1, const typename ecuda::matrix<T>::kernel_argument matrix2, typename ecuda::array<int,1>::kernel_argument results ) { if( !threadIdx.x ) { const bool b = ecuda::lexicographical_compare( matrix1.begin(), matrix1.end(), matrix2.begin(), matrix2.end() ); ecuda::fill( results.begin(), results.end(), b ? 1 : 0 ); } } template<typename T> __global__ void check_matrix_column_default_values_on_device( typename ecuda::matrix<T>::const_column_type column, typename ecuda::array<int,2>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); for( std::size_t i = 0; i < column.size(); ++i ) if( column[i] != T() ) results[0] = 0; for( typename ecuda::matrix<T>::const_column_type::const_iterator iter = column.begin(); iter != column.end(); ++iter ) if( *iter != T() ) results[1] = 0; } } template<typename T> __global__ void check_matrix_column_unique_values_on_device( typename ecuda::matrix<T>::const_column_type column, typename ecuda::array<int,2>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); for( std::size_t i = 0; i < column.size(); ++i ) if( column[i] != T(i,8) ) results[0] = 0; std::size_t counter = 0; for( typename ecuda::matrix<T>::const_column_type::const_iterator iter = column.begin(); iter != column.end(); ++iter, ++counter ) if( *iter != T(counter,8) ) results[1] = 0; } } #endif SCENARIO( "matrix functions correctly", "matrix" ) { GIVEN( "one default-intialized ecuda::matrix with a value of (66,99) and another ecuda::array with unique values from (0,0)-(99,33) inclusive" ) { ecuda::matrix<data_type> deviceMatrixWithDefaultValues(R,C); ecuda::matrix<data_type> deviceMatrixWithUniqueValues(R,C); data_type hostMatrixWithUniqueValues[R][C]; for( std::size_t i = 0; i < R; ++i ) for( std::size_t j = 0; j < C; ++j ) hostMatrixWithUniqueValues[i][j] = data_type(i,j); ecuda::copy( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ); WHEN( "the values of the former are inspected" ) { std::vector<data_type> v( R*C ); THEN( "they should all be default initialized" ) { REQUIRE( ecuda::equal( deviceMatrixWithDefaultValues.begin(), deviceMatrixWithDefaultValues.end(), v.begin() ) ); REQUIRE( ecuda::equal( v.begin(), v.end(), deviceMatrixWithDefaultValues.begin() ) ); } } AND_WHEN( "the values of the latter are inspected" ) { THEN( "they should all have expected values" ) { REQUIRE( ecuda::equal( deviceMatrixWithUniqueValues.begin(), deviceMatrixWithUniqueValues.end(), &hostMatrixWithUniqueValues[0][0] ) ); REQUIRE( ecuda::equal( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ) ); } } AND_WHEN( "another matrix is copy constructed" ) { ecuda::matrix<data_type> mat2( deviceMatrixWithDefaultValues ); THEN( "they should be equal" ) { REQUIRE( ecuda::equal( deviceMatrixWithDefaultValues.begin(), deviceMatrixWithDefaultValues.end(), mat2.begin() ) ); REQUIRE( ecuda::equal( mat2.begin(), mat2.end(), deviceMatrixWithDefaultValues.begin() ) ); } AND_THEN( "they should reside in different memory locations" ) { REQUIRE( deviceMatrixWithDefaultValues.data() != mat2.data() ); } } AND_WHEN( "another matrix is copy assigned") { ecuda::matrix<data_type> mat2( R, C ); mat2 = deviceMatrixWithDefaultValues; THEN( "they should be equal" ) { REQUIRE( ecuda::equal( deviceMatrixWithDefaultValues.begin(), deviceMatrixWithDefaultValues.end(), mat2.begin() ) ); REQUIRE( ecuda::equal( mat2.begin(), mat2.end(), deviceMatrixWithDefaultValues.begin() ) ); } AND_THEN( "they should reside in different memory locations" ) { REQUIRE( deviceMatrixWithDefaultValues.data() != mat2.data() ); } } #ifdef ECUDA_CPP11_AVAILABLE AND_WHEN( "another matrix is move constructed" ) { ecuda::matrix<data_type> mat2( std::move(deviceMatrixWithDefaultValues) ); THEN( "the original matrix should be invalid" ) { REQUIRE( !deviceMatrixWithDefaultValues.data() ); } AND_THEN( "the new matrix should have the original one's contents" ) { REQUIRE( ecuda::count( mat2.begin(), mat2.end(), data_type() ) == (R*C) ); } AND_THEN( "a move assignment operation should restore the original state" ) { deviceMatrixWithDefaultValues = std::move(mat2); REQUIRE( deviceMatrixWithDefaultValues.data() ); REQUIRE( !mat2.data() ); } } #endif #ifdef __HIPCC__ AND_WHEN( "the at, operator[], and operator() accessors are used in device code" ) { THEN( "the values should be as expected" ) { ecuda::array<int,3> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT(hipLaunchKernelGGL(( check_matrix_accessors_on_device<data_type>), dim3(1),dim3(1), 0, 0, deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 3 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // at() REQUIRE( hostResultCodes[1] ); // operator[] REQUIRE( hostResultCodes[2] ); // operator() } } #else AND_WHEN( "the at, operator[], and operator() accessors are used in host code" ) { THEN( "the values should be as expected" ) { std::vector<int> resultCodes( 3, 1 ); for( std::size_t i = 0; i < R; ++i ) { for( std::size_t j = 0; j < C; ++j ) { if( deviceMatrixWithUniqueValues.at(i,j) != data_type(i,j) ) resultCodes[0] = 0; if( deviceMatrixWithUniqueValues[i][j] != data_type(i,j) ) resultCodes[1] = 0; if( deviceMatrixWithUniqueValues(i,j) != data_type(i,j) ) resultCodes[2] = 0; } } REQUIRE( resultCodes[0] ); REQUIRE( resultCodes[1] ); REQUIRE( resultCodes[2] ); bool exceptionThrown = false; try { deviceMatrixWithUniqueValues.at(R,C); } catch( std::out_of_range& ex ) { exceptionThrown = true; } REQUIRE(exceptionThrown); } } #endif // __HIPCC__ #ifdef __HIPCC__ AND_WHEN( "the front() and back() accessors are used in device code" ) { THEN( "the values should be as expected" ) { ecuda::array<int,2> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT(hipLaunchKernelGGL(( check_matrix_front_and_back_on_device<data_type>), dim3(1),dim3(1), 0, 0, deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 2 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // front() REQUIRE( hostResultCodes[1] ); // back() } } #else AND_WHEN( "the front() and back() accessors are used in host code" ) { THEN( "the values should be as expected" ) { REQUIRE( deviceMatrixWithUniqueValues.front() == data_type(0,0) ); REQUIRE( deviceMatrixWithUniqueValues.back() == data_type(R-1,C-1) ); } } #endif #ifdef __HIPCC__ AND_WHEN( "the begin() and end() iterators are used to traverse the matrix in device code" ) { THEN( "the values should increase from 0,0 to R-1,C-1" ) { ecuda::array<int,1> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT(hipLaunchKernelGGL(( check_matrix_begin_and_end_on_device<data_type>), dim3(1),dim3(1), 0, 0, deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 1 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values } } #else AND_WHEN( "the begin() and end() iterators are used to traverse the matrix in host code" ) { THEN( "the values should increase from 0,0 to R-1,C-1" ) { std::size_t counter = 0; bool correctValues = true; for( typename ecuda::matrix<data_type>::const_iterator iter = deviceMatrixWithUniqueValues.begin(); iter != deviceMatrixWithUniqueValues.end(); ++iter, ++counter ) { if( *iter != data_type( counter/C, counter % C ) ) correctValues = false; } REQUIRE(correctValues); } } #endif #ifdef __HIPCC__ AND_WHEN( "the rbegin() and rend() iterators are used to traverse the matrix in device code" ) { THEN( "the values should decrease from R-1,C-1 to 0,0" ) { ecuda::array<int,1> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT(hipLaunchKernelGGL(( check_matrix_rbegin_and_rend_on_device<data_type>), dim3(1),dim3(1), 0, 0, deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 1 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values } } #else AND_WHEN( "the rbegin() and rend() iterators are used to traverse the matrix in host code" ) { THEN( "the values should decrease from R-1,C-1 to 0,0" ) { std::size_t counter = R*C-1; bool correctValues = true; for( typename ecuda::matrix<data_type>::const_reverse_iterator iter = deviceMatrixWithUniqueValues.rbegin(); iter != deviceMatrixWithUniqueValues.rend(); ++iter, --counter ) { if( *iter != data_type( counter/C, counter % C) ) correctValues = false; } REQUIRE(correctValues); } } #endif #ifdef __HIPCC__ AND_WHEN( "the matrices are lexicographically compared in device code" ) { THEN( "the former matrix should compare less than the latter matrix (since the default value is (66,99))" ) { ecuda::array<int,1> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT(hipLaunchKernelGGL(( check_matrix_lexicographical_comparison_on_device<data_type>), dim3(1),dim3(1), 0, 0, deviceMatrixWithUniqueValues, deviceMatrixWithDefaultValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 1 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values } } #else AND_WHEN( "the matrices are lexicographically compared in host code" ) { THEN( "the former matrix should compare less than the latter matrix (since the default value is (66,99))" ) { REQUIRE( deviceMatrixWithUniqueValues < deviceMatrixWithDefaultValues ); REQUIRE( !( deviceMatrixWithUniqueValues > deviceMatrixWithDefaultValues ) ); REQUIRE( !( deviceMatrixWithUniqueValues >= deviceMatrixWithDefaultValues ) ); } } #endif } } SCENARIO( "matrix rows function correctly", "matrix_rows" ) { GIVEN( "the 16th row from: one default-initialized ecuda::matrix with a value of (66,99) and another ecuda::array with unique values from (0,0)-(99,33) inclusive" ) { ecuda::matrix<data_type> deviceMatrixWithDefaultValues(R,C); ecuda::matrix<data_type> deviceMatrixWithUniqueValues(R,C); data_type hostMatrixWithUniqueValues[R][C]; for( std::size_t i = 0; i < R; ++i ) for( std::size_t j = 0; j < C; ++j ) hostMatrixWithUniqueValues[i][j] = data_type(i,j); ecuda::copy( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ); typename ecuda::matrix<data_type>::row_type deviceRowWithDefaultValues = deviceMatrixWithDefaultValues.get_row(16); typename ecuda::matrix<data_type>::row_type deviceRowWithUniqueValues = deviceMatrixWithUniqueValues[16]; WHEN( "the size of the row is inspected" ) { THEN( "it should be equal to the number of matrix columns" ) { REQUIRE( deviceRowWithDefaultValues.size() == C ); } } AND_WHEN( "the values of the former are inspected" ) { std::vector<data_type> v( C ); THEN( "they should all be default initialized" ) { REQUIRE( ecuda::equal( deviceRowWithDefaultValues.begin(), deviceRowWithDefaultValues.end(), v.begin() ) ); REQUIRE( ecuda::equal( v.begin(), v.end(), deviceRowWithDefaultValues.begin() ) ); } } AND_WHEN( "the values of the latter are inspected" ) { THEN( "they should all have expected values" ) { REQUIRE( ecuda::equal( deviceRowWithUniqueValues.begin(), deviceRowWithUniqueValues.end(), &hostMatrixWithUniqueValues[16][0] ) ); REQUIRE( ecuda::equal( &hostMatrixWithUniqueValues[16][0], &hostMatrixWithUniqueValues[16][C], deviceRowWithUniqueValues.begin() ) ); } } } } SCENARIO( "matrix columns function correctly", "matrix_columns" ) { GIVEN( "the 8th column from: one default-initialized ecuda::matrix with a value of (66,99) and another ecuda::array with unique values from (0,0)-(99,33) inclusive" ) { ecuda::matrix<data_type> deviceMatrixWithDefaultValues(R,C); ecuda::matrix<data_type> deviceMatrixWithUniqueValues(R,C); data_type hostMatrixWithUniqueValues[R][C]; for( std::size_t i = 0; i < R; ++i ) for( std::size_t j = 0; j < C; ++j ) hostMatrixWithUniqueValues[i][j] = data_type(i,j); ecuda::copy( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ); typename ecuda::matrix<data_type>::column_type deviceColumnWithDefaultValues = deviceMatrixWithDefaultValues.get_column(8); typename ecuda::matrix<data_type>::column_type deviceColumnWithUniqueValues = deviceMatrixWithUniqueValues.get_column(8); WHEN( "the size of the row is inspected" ) { THEN( "it should be equal to the number of matrix rows" ) { REQUIRE( deviceColumnWithDefaultValues.size() == R ); } } #ifdef __HIPCC__ AND_WHEN( "the values of the former are inspected in device code" ) { std::vector<data_type> v( C ); THEN( "they should all be default initialized" ) { ecuda::array<int,2> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT(hipLaunchKernelGGL(( check_matrix_column_default_values_on_device<data_type>), dim3(1),dim3(1), 0, 0, deviceColumnWithDefaultValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 2 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values REQUIRE( hostResultCodes[1] ); // correct values } } AND_WHEN( "the values of the latter are inspected in device code" ) { THEN( "they should all have expected values" ) { ecuda::array<int,2> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT(hipLaunchKernelGGL(( check_matrix_column_unique_values_on_device<data_type>), dim3(1),dim3(1), 0, 0, deviceColumnWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 2 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values REQUIRE( hostResultCodes[1] ); // correct values } } #endif } }
c357840847b5cb0670a0fa9cfe0d29bc0233b12b.cu
#define CATCH_CONFIG_MAIN #include <catch.hpp> #include <iostream> #include "../include/ecuda/ecuda.hpp" template<typename T> struct element_t { T x, y; __HOST__ __DEVICE__ element_t() : x(static_cast<T>(66)), y(static_cast<T>(99)) {} __HOST__ __DEVICE__ element_t( const T& x, const T& y ) : x(x), y(y) {} __HOST__ __DEVICE__ inline bool operator==( const element_t<T>& other ) const { return x == other.x and y == other.y; } __HOST__ __DEVICE__ inline bool operator!=( const element_t<T>& other ) const { return !operator==(other); } __HOST__ __DEVICE__ inline bool operator<( const element_t<T>& other ) const { return x == y ? y < other.y : x < other.x; } friend std::ostream& operator<<( std::ostream& out, const element_t<T>& el ) { out << "Element[" << el.x << "," << el.y << "]"; return out; } }; typedef element_t<double> data_type; //const std::size_t N = 100; // 100k bytes const std::size_t R = 100; const std::size_t C = 33; #ifdef __CUDACC__ template<typename T> __global__ void check_matrix_accessors_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,3>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); for( std::size_t i = 0; i < matrix.number_rows(); ++i ) { for( std::size_t j = 0; j < matrix.number_columns(); ++j ) { if( matrix.at(i,j) != T(i,j) ) results[0] = 0; if( matrix[i][j] != T(i,j) ) results[1] = 0; if( matrix(i,j) != T(i,j) ) results[2] = 0; } } } } template<typename T> __global__ void check_matrix_front_and_back_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,2>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 0 ); if( matrix.front() == T(0,0) ) results[0] = 1; if( matrix.back() == T(R-1,C-1) ) results[1] = 1; } } template<typename T> __global__ void check_matrix_begin_and_end_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,1>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); unsigned counter = 0; for( typename ecuda::matrix<T>::const_iterator iter = matrix.begin(); iter != matrix.end(); ++iter, ++counter ) { if( *iter != T(counter/matrix.number_columns(),counter%matrix.number_columns()) ) results.front() = 0; } } } template<typename T> __global__ void check_matrix_rbegin_and_rend_on_device( const typename ecuda::matrix<T>::kernel_argument matrix, typename ecuda::array<int,1>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); unsigned counter = matrix.size()-1; for( typename ecuda::matrix<T>::const_reverse_iterator iter = matrix.rbegin(); iter != matrix.rend(); ++iter, --counter ) { if( *iter != T(counter/matrix.number_columns(),counter%matrix.number_columns()) ) results.front() = 0; } } } template<typename T> __global__ void check_matrix_lexicographical_comparison_on_device( const typename ecuda::matrix<T>::kernel_argument matrix1, const typename ecuda::matrix<T>::kernel_argument matrix2, typename ecuda::array<int,1>::kernel_argument results ) { if( !threadIdx.x ) { const bool b = ecuda::lexicographical_compare( matrix1.begin(), matrix1.end(), matrix2.begin(), matrix2.end() ); ecuda::fill( results.begin(), results.end(), b ? 1 : 0 ); } } template<typename T> __global__ void check_matrix_column_default_values_on_device( typename ecuda::matrix<T>::const_column_type column, typename ecuda::array<int,2>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); for( std::size_t i = 0; i < column.size(); ++i ) if( column[i] != T() ) results[0] = 0; for( typename ecuda::matrix<T>::const_column_type::const_iterator iter = column.begin(); iter != column.end(); ++iter ) if( *iter != T() ) results[1] = 0; } } template<typename T> __global__ void check_matrix_column_unique_values_on_device( typename ecuda::matrix<T>::const_column_type column, typename ecuda::array<int,2>::kernel_argument results ) { if( !threadIdx.x ) { ecuda::fill( results.begin(), results.end(), 1 ); for( std::size_t i = 0; i < column.size(); ++i ) if( column[i] != T(i,8) ) results[0] = 0; std::size_t counter = 0; for( typename ecuda::matrix<T>::const_column_type::const_iterator iter = column.begin(); iter != column.end(); ++iter, ++counter ) if( *iter != T(counter,8) ) results[1] = 0; } } #endif SCENARIO( "matrix functions correctly", "matrix" ) { GIVEN( "one default-intialized ecuda::matrix with a value of (66,99) and another ecuda::array with unique values from (0,0)-(99,33) inclusive" ) { ecuda::matrix<data_type> deviceMatrixWithDefaultValues(R,C); ecuda::matrix<data_type> deviceMatrixWithUniqueValues(R,C); data_type hostMatrixWithUniqueValues[R][C]; for( std::size_t i = 0; i < R; ++i ) for( std::size_t j = 0; j < C; ++j ) hostMatrixWithUniqueValues[i][j] = data_type(i,j); ecuda::copy( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ); WHEN( "the values of the former are inspected" ) { std::vector<data_type> v( R*C ); THEN( "they should all be default initialized" ) { REQUIRE( ecuda::equal( deviceMatrixWithDefaultValues.begin(), deviceMatrixWithDefaultValues.end(), v.begin() ) ); REQUIRE( ecuda::equal( v.begin(), v.end(), deviceMatrixWithDefaultValues.begin() ) ); } } AND_WHEN( "the values of the latter are inspected" ) { THEN( "they should all have expected values" ) { REQUIRE( ecuda::equal( deviceMatrixWithUniqueValues.begin(), deviceMatrixWithUniqueValues.end(), &hostMatrixWithUniqueValues[0][0] ) ); REQUIRE( ecuda::equal( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ) ); } } AND_WHEN( "another matrix is copy constructed" ) { ecuda::matrix<data_type> mat2( deviceMatrixWithDefaultValues ); THEN( "they should be equal" ) { REQUIRE( ecuda::equal( deviceMatrixWithDefaultValues.begin(), deviceMatrixWithDefaultValues.end(), mat2.begin() ) ); REQUIRE( ecuda::equal( mat2.begin(), mat2.end(), deviceMatrixWithDefaultValues.begin() ) ); } AND_THEN( "they should reside in different memory locations" ) { REQUIRE( deviceMatrixWithDefaultValues.data() != mat2.data() ); } } AND_WHEN( "another matrix is copy assigned") { ecuda::matrix<data_type> mat2( R, C ); mat2 = deviceMatrixWithDefaultValues; THEN( "they should be equal" ) { REQUIRE( ecuda::equal( deviceMatrixWithDefaultValues.begin(), deviceMatrixWithDefaultValues.end(), mat2.begin() ) ); REQUIRE( ecuda::equal( mat2.begin(), mat2.end(), deviceMatrixWithDefaultValues.begin() ) ); } AND_THEN( "they should reside in different memory locations" ) { REQUIRE( deviceMatrixWithDefaultValues.data() != mat2.data() ); } } #ifdef ECUDA_CPP11_AVAILABLE AND_WHEN( "another matrix is move constructed" ) { ecuda::matrix<data_type> mat2( std::move(deviceMatrixWithDefaultValues) ); THEN( "the original matrix should be invalid" ) { REQUIRE( !deviceMatrixWithDefaultValues.data() ); } AND_THEN( "the new matrix should have the original one's contents" ) { REQUIRE( ecuda::count( mat2.begin(), mat2.end(), data_type() ) == (R*C) ); } AND_THEN( "a move assignment operation should restore the original state" ) { deviceMatrixWithDefaultValues = std::move(mat2); REQUIRE( deviceMatrixWithDefaultValues.data() ); REQUIRE( !mat2.data() ); } } #endif #ifdef __CUDACC__ AND_WHEN( "the at, operator[], and operator() accessors are used in device code" ) { THEN( "the values should be as expected" ) { ecuda::array<int,3> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT( check_matrix_accessors_on_device<data_type><<<1,1>>>( deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 3 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // at() REQUIRE( hostResultCodes[1] ); // operator[] REQUIRE( hostResultCodes[2] ); // operator() } } #else AND_WHEN( "the at, operator[], and operator() accessors are used in host code" ) { THEN( "the values should be as expected" ) { std::vector<int> resultCodes( 3, 1 ); for( std::size_t i = 0; i < R; ++i ) { for( std::size_t j = 0; j < C; ++j ) { if( deviceMatrixWithUniqueValues.at(i,j) != data_type(i,j) ) resultCodes[0] = 0; if( deviceMatrixWithUniqueValues[i][j] != data_type(i,j) ) resultCodes[1] = 0; if( deviceMatrixWithUniqueValues(i,j) != data_type(i,j) ) resultCodes[2] = 0; } } REQUIRE( resultCodes[0] ); REQUIRE( resultCodes[1] ); REQUIRE( resultCodes[2] ); bool exceptionThrown = false; try { deviceMatrixWithUniqueValues.at(R,C); } catch( std::out_of_range& ex ) { exceptionThrown = true; } REQUIRE(exceptionThrown); } } #endif // __CUDACC__ #ifdef __CUDACC__ AND_WHEN( "the front() and back() accessors are used in device code" ) { THEN( "the values should be as expected" ) { ecuda::array<int,2> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT( check_matrix_front_and_back_on_device<data_type><<<1,1>>>( deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 2 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // front() REQUIRE( hostResultCodes[1] ); // back() } } #else AND_WHEN( "the front() and back() accessors are used in host code" ) { THEN( "the values should be as expected" ) { REQUIRE( deviceMatrixWithUniqueValues.front() == data_type(0,0) ); REQUIRE( deviceMatrixWithUniqueValues.back() == data_type(R-1,C-1) ); } } #endif #ifdef __CUDACC__ AND_WHEN( "the begin() and end() iterators are used to traverse the matrix in device code" ) { THEN( "the values should increase from 0,0 to R-1,C-1" ) { ecuda::array<int,1> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT( check_matrix_begin_and_end_on_device<data_type><<<1,1>>>( deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 1 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values } } #else AND_WHEN( "the begin() and end() iterators are used to traverse the matrix in host code" ) { THEN( "the values should increase from 0,0 to R-1,C-1" ) { std::size_t counter = 0; bool correctValues = true; for( typename ecuda::matrix<data_type>::const_iterator iter = deviceMatrixWithUniqueValues.begin(); iter != deviceMatrixWithUniqueValues.end(); ++iter, ++counter ) { if( *iter != data_type( counter/C, counter % C ) ) correctValues = false; } REQUIRE(correctValues); } } #endif #ifdef __CUDACC__ AND_WHEN( "the rbegin() and rend() iterators are used to traverse the matrix in device code" ) { THEN( "the values should decrease from R-1,C-1 to 0,0" ) { ecuda::array<int,1> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT( check_matrix_rbegin_and_rend_on_device<data_type><<<1,1>>>( deviceMatrixWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 1 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values } } #else AND_WHEN( "the rbegin() and rend() iterators are used to traverse the matrix in host code" ) { THEN( "the values should decrease from R-1,C-1 to 0,0" ) { std::size_t counter = R*C-1; bool correctValues = true; for( typename ecuda::matrix<data_type>::const_reverse_iterator iter = deviceMatrixWithUniqueValues.rbegin(); iter != deviceMatrixWithUniqueValues.rend(); ++iter, --counter ) { if( *iter != data_type( counter/C, counter % C) ) correctValues = false; } REQUIRE(correctValues); } } #endif #ifdef __CUDACC__ AND_WHEN( "the matrices are lexicographically compared in device code" ) { THEN( "the former matrix should compare less than the latter matrix (since the default value is (66,99))" ) { ecuda::array<int,1> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT( check_matrix_lexicographical_comparison_on_device<data_type><<<1,1>>>( deviceMatrixWithUniqueValues, deviceMatrixWithDefaultValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 1 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values } } #else AND_WHEN( "the matrices are lexicographically compared in host code" ) { THEN( "the former matrix should compare less than the latter matrix (since the default value is (66,99))" ) { REQUIRE( deviceMatrixWithUniqueValues < deviceMatrixWithDefaultValues ); REQUIRE( !( deviceMatrixWithUniqueValues > deviceMatrixWithDefaultValues ) ); REQUIRE( !( deviceMatrixWithUniqueValues >= deviceMatrixWithDefaultValues ) ); } } #endif } } SCENARIO( "matrix rows function correctly", "matrix_rows" ) { GIVEN( "the 16th row from: one default-initialized ecuda::matrix with a value of (66,99) and another ecuda::array with unique values from (0,0)-(99,33) inclusive" ) { ecuda::matrix<data_type> deviceMatrixWithDefaultValues(R,C); ecuda::matrix<data_type> deviceMatrixWithUniqueValues(R,C); data_type hostMatrixWithUniqueValues[R][C]; for( std::size_t i = 0; i < R; ++i ) for( std::size_t j = 0; j < C; ++j ) hostMatrixWithUniqueValues[i][j] = data_type(i,j); ecuda::copy( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ); typename ecuda::matrix<data_type>::row_type deviceRowWithDefaultValues = deviceMatrixWithDefaultValues.get_row(16); typename ecuda::matrix<data_type>::row_type deviceRowWithUniqueValues = deviceMatrixWithUniqueValues[16]; WHEN( "the size of the row is inspected" ) { THEN( "it should be equal to the number of matrix columns" ) { REQUIRE( deviceRowWithDefaultValues.size() == C ); } } AND_WHEN( "the values of the former are inspected" ) { std::vector<data_type> v( C ); THEN( "they should all be default initialized" ) { REQUIRE( ecuda::equal( deviceRowWithDefaultValues.begin(), deviceRowWithDefaultValues.end(), v.begin() ) ); REQUIRE( ecuda::equal( v.begin(), v.end(), deviceRowWithDefaultValues.begin() ) ); } } AND_WHEN( "the values of the latter are inspected" ) { THEN( "they should all have expected values" ) { REQUIRE( ecuda::equal( deviceRowWithUniqueValues.begin(), deviceRowWithUniqueValues.end(), &hostMatrixWithUniqueValues[16][0] ) ); REQUIRE( ecuda::equal( &hostMatrixWithUniqueValues[16][0], &hostMatrixWithUniqueValues[16][C], deviceRowWithUniqueValues.begin() ) ); } } } } SCENARIO( "matrix columns function correctly", "matrix_columns" ) { GIVEN( "the 8th column from: one default-initialized ecuda::matrix with a value of (66,99) and another ecuda::array with unique values from (0,0)-(99,33) inclusive" ) { ecuda::matrix<data_type> deviceMatrixWithDefaultValues(R,C); ecuda::matrix<data_type> deviceMatrixWithUniqueValues(R,C); data_type hostMatrixWithUniqueValues[R][C]; for( std::size_t i = 0; i < R; ++i ) for( std::size_t j = 0; j < C; ++j ) hostMatrixWithUniqueValues[i][j] = data_type(i,j); ecuda::copy( &hostMatrixWithUniqueValues[0][0], &hostMatrixWithUniqueValues[R][0], deviceMatrixWithUniqueValues.begin() ); typename ecuda::matrix<data_type>::column_type deviceColumnWithDefaultValues = deviceMatrixWithDefaultValues.get_column(8); typename ecuda::matrix<data_type>::column_type deviceColumnWithUniqueValues = deviceMatrixWithUniqueValues.get_column(8); WHEN( "the size of the row is inspected" ) { THEN( "it should be equal to the number of matrix rows" ) { REQUIRE( deviceColumnWithDefaultValues.size() == R ); } } #ifdef __CUDACC__ AND_WHEN( "the values of the former are inspected in device code" ) { std::vector<data_type> v( C ); THEN( "they should all be default initialized" ) { ecuda::array<int,2> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT( check_matrix_column_default_values_on_device<data_type><<<1,1>>>( deviceColumnWithDefaultValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 2 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values REQUIRE( hostResultCodes[1] ); // correct values } } AND_WHEN( "the values of the latter are inspected in device code" ) { THEN( "they should all have expected values" ) { ecuda::array<int,2> deviceResultCodes; CUDA_CALL_KERNEL_AND_WAIT( check_matrix_column_unique_values_on_device<data_type><<<1,1>>>( deviceColumnWithUniqueValues, deviceResultCodes ) ); std::vector<int> hostResultCodes( 2 ); ecuda::copy( deviceResultCodes.begin(), deviceResultCodes.end(), hostResultCodes.begin() ); REQUIRE( hostResultCodes[0] ); // correct values REQUIRE( hostResultCodes[1] ); // correct values } } #endif } }
cc43a30a4cbdb11c88c2c671221da1f75b2152e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// // x - input, y - indices, z - output template<typename X, typename Y> __global__ static void gatherNDCuda(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int xRank, yRank, zRank, maxRank, yLastDim; __shared__ Nd4jLong zLen, totalThreads, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); xRank = shape::rank(xShapeInfo); yRank = shape::rank(yShapeInfo); zRank = shape::rank(zShapeInfo); maxRank = sd::math::nd4j_max<int>(yRank, sd::math::nd4j_max<int>(xRank, zRank)); zLen = shape::length(zShapeInfo); yLastDim = yShapeInfo[yRank]; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coord = sharedMem + threadIdx.x * maxRank; Nd4jLong *zCoordStart, *xCoordStart; if(yLastDim == xRank) { zCoordStart = coord; xCoordStart = coord; } if(zRank >= xRank) { zCoordStart = coord; xCoordStart = coord + zRank - xRank; } else { zCoordStart = coord + xRank - zRank; xCoordStart = coord; } const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, zCoordStart); const auto zOffset = shape::getOffset(zShapeInfo, zCoordStart); // last y coordinate int coordToRestore; if(yLastDim != xRank) coordToRestore = static_cast<int>(zCoordStart[yRank - 1]); zCoordStart[yRank - 1] = 0; // last y coordinate const auto yOffset = shape::getOffset(yShapeInfo, zCoordStart); //restore z coordinate if(yLastDim != xRank) zCoordStart[yRank - 1] = coordToRestore; // construct coordinates for x for(uint j = 0; j < yLastDim; ++j) xCoordStart[j] = y[yOffset + j * yShapeInfo[2 * yRank]]; // last stride const auto xOffset = shape::getOffset(xShapeInfo, xCoordStart); z[zOffset] = x[xOffset]; // printf("z[%lld] = x[%lld] = %f\n", zOffset, xOffset, (float) z[zOffset]); } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void gatherNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { hipLaunchKernelGGL(( gatherNDCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// void gatherND(sd::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) { const int maxRank = sd::math::nd4j_max<int>(indices.rankOf(), sd::math::nd4j_max<int>(input.rankOf(), output.rankOf())); const int threadsPerBlock = 256; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * maxRank + 128; const auto xType = input.dataType(); const auto yType = indices.dataType(); PointersManager manager(context, "gatherND"); NDArray::prepareSpecialUse({&output}, {&input, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, gatherNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), indices.specialBuffer(), indices.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&input, &indices}); manager.synchronize(); } } } }
cc43a30a4cbdb11c88c2c671221da1f75b2152e4.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// // x - input, y - indices, z - output template<typename X, typename Y> __global__ static void gatherNDCuda(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int xRank, yRank, zRank, maxRank, yLastDim; __shared__ Nd4jLong zLen, totalThreads, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); xRank = shape::rank(xShapeInfo); yRank = shape::rank(yShapeInfo); zRank = shape::rank(zShapeInfo); maxRank = sd::math::nd4j_max<int>(yRank, sd::math::nd4j_max<int>(xRank, zRank)); zLen = shape::length(zShapeInfo); yLastDim = yShapeInfo[yRank]; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coord = sharedMem + threadIdx.x * maxRank; Nd4jLong *zCoordStart, *xCoordStart; if(yLastDim == xRank) { zCoordStart = coord; xCoordStart = coord; } if(zRank >= xRank) { zCoordStart = coord; xCoordStart = coord + zRank - xRank; } else { zCoordStart = coord + xRank - zRank; xCoordStart = coord; } const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(i, zShapeInfo, zCoordStart); const auto zOffset = shape::getOffset(zShapeInfo, zCoordStart); // last y coordinate int coordToRestore; if(yLastDim != xRank) coordToRestore = static_cast<int>(zCoordStart[yRank - 1]); zCoordStart[yRank - 1] = 0; // last y coordinate const auto yOffset = shape::getOffset(yShapeInfo, zCoordStart); //restore z coordinate if(yLastDim != xRank) zCoordStart[yRank - 1] = coordToRestore; // construct coordinates for x for(uint j = 0; j < yLastDim; ++j) xCoordStart[j] = y[yOffset + j * yShapeInfo[2 * yRank]]; // last stride const auto xOffset = shape::getOffset(xShapeInfo, xCoordStart); z[zOffset] = x[xOffset]; // printf("z[%lld] = x[%lld] = %f\n", zOffset, xOffset, (float) z[zOffset]); } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void gatherNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { gatherNDCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// void gatherND(sd::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) { const int maxRank = sd::math::nd4j_max<int>(indices.rankOf(), sd::math::nd4j_max<int>(input.rankOf(), output.rankOf())); const int threadsPerBlock = 256; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * maxRank + 128; const auto xType = input.dataType(); const auto yType = indices.dataType(); PointersManager manager(context, "gatherND"); NDArray::prepareSpecialUse({&output}, {&input, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, gatherNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), indices.specialBuffer(), indices.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({&output}, {&input, &indices}); manager.synchronize(); } } } }
9a3894c4251d690d8a36e8033cd11c4c680f7001.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "core/warp_solver/NodeGraphSmoothHandler.h" #include "common/Constants.h" #include "core/warp_solver/solver_constants.h" #include <device_launch_parameters.h> namespace surfelwarp { namespace device { __global__ void forwardWarpSmootherNodeKernel( DeviceArrayView<ushort2> node_graph, const float4* reference_node_array, const DualQuaternion* node_se3, float3* Ti_xj_array, float3* Tj_xj_array, unsigned char* validity_indicator_array ) { const auto idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < node_graph.Size()) { const ushort2 node_ij = node_graph[idx]; const auto xi = reference_node_array[node_ij.x]; const auto xj = reference_node_array[node_ij.y]; DualQuaternion dq_i = node_se3[node_ij.x]; DualQuaternion dq_j = node_se3[node_ij.y]; const mat34 Ti = dq_i.se3_matrix(); const mat34 Tj = dq_j.se3_matrix(); const auto Ti_xj = Ti.rot * xj + Ti.trans; const auto Tj_xj = Tj.rot * xj + Tj.trans; unsigned char validity_indicator = 1; #if defined(CLIP_FARAWAY_NODEGRAPH_PAIR) if (squared_norm_xyz(xi - xj) > 64 * d_node_radius_square) { validity_indicator = 0; } #endif //Save all the data Ti_xj_array[idx] = Ti_xj; Tj_xj_array[idx] = Tj_xj; validity_indicator_array[idx] = validity_indicator; } } } // device } // surfelwarp surfelwarp::NodeGraphSmoothHandler::NodeGraphSmoothHandler() { const auto num_smooth_terms = Constants::kMaxNumNodes * Constants::kNumNodeGraphNeigbours; Ti_xj_.AllocateBuffer(num_smooth_terms); Tj_xj_.AllocateBuffer(num_smooth_terms); m_pair_validity_indicator.AllocateBuffer(num_smooth_terms); } surfelwarp::NodeGraphSmoothHandler::~NodeGraphSmoothHandler() { Ti_xj_.ReleaseBuffer(); Tj_xj_.ReleaseBuffer(); m_pair_validity_indicator.ReleaseBuffer(); } void surfelwarp::NodeGraphSmoothHandler::SetInputs( const DeviceArrayView<DualQuaternion>& node_se3, const DeviceArrayView<ushort2>& node_graph, const DeviceArrayView<float4>& reference_nodes ) { m_node_se3 = node_se3; m_node_graph = node_graph; m_reference_node_coords = reference_nodes; } /* The method to build the term2jacobian */ void surfelwarp::NodeGraphSmoothHandler::forwardWarpSmootherNodes(hipStream_t stream) { Ti_xj_.ResizeArrayOrException(m_node_graph.Size()); Tj_xj_.ResizeArrayOrException(m_node_graph.Size()); m_pair_validity_indicator.ResizeArrayOrException(m_node_graph.Size()); dim3 blk(128); dim3 grid(divUp(m_node_graph.Size(), blk.x)); hipLaunchKernelGGL(( device::forwardWarpSmootherNodeKernel), dim3(grid), dim3(blk), 0, stream, m_node_graph, m_reference_node_coords.RawPtr(), m_node_se3.RawPtr(), Ti_xj_.Ptr(), Tj_xj_.Ptr(), m_pair_validity_indicator.Ptr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif } void surfelwarp::NodeGraphSmoothHandler::BuildTerm2Jacobian(hipStream_t stream) { forwardWarpSmootherNodes(stream); } surfelwarp::NodeGraphSmoothTerm2Jacobian surfelwarp::NodeGraphSmoothHandler::Term2JacobianMap() const { NodeGraphSmoothTerm2Jacobian map; map.node_se3 = m_node_se3; map.reference_node_coords = m_reference_node_coords; map.node_graph = m_node_graph; map.Ti_xj = Ti_xj_.ArrayView(); map.Tj_xj = Tj_xj_.ArrayView(); map.validity_indicator = m_pair_validity_indicator.ArrayView(); return map; }
9a3894c4251d690d8a36e8033cd11c4c680f7001.cu
#include "core/warp_solver/NodeGraphSmoothHandler.h" #include "common/Constants.h" #include "core/warp_solver/solver_constants.h" #include <device_launch_parameters.h> namespace surfelwarp { namespace device { __global__ void forwardWarpSmootherNodeKernel( DeviceArrayView<ushort2> node_graph, const float4* reference_node_array, const DualQuaternion* node_se3, float3* Ti_xj_array, float3* Tj_xj_array, unsigned char* validity_indicator_array ) { const auto idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < node_graph.Size()) { const ushort2 node_ij = node_graph[idx]; const auto xi = reference_node_array[node_ij.x]; const auto xj = reference_node_array[node_ij.y]; DualQuaternion dq_i = node_se3[node_ij.x]; DualQuaternion dq_j = node_se3[node_ij.y]; const mat34 Ti = dq_i.se3_matrix(); const mat34 Tj = dq_j.se3_matrix(); const auto Ti_xj = Ti.rot * xj + Ti.trans; const auto Tj_xj = Tj.rot * xj + Tj.trans; unsigned char validity_indicator = 1; #if defined(CLIP_FARAWAY_NODEGRAPH_PAIR) if (squared_norm_xyz(xi - xj) > 64 * d_node_radius_square) { validity_indicator = 0; } #endif //Save all the data Ti_xj_array[idx] = Ti_xj; Tj_xj_array[idx] = Tj_xj; validity_indicator_array[idx] = validity_indicator; } } } // device } // surfelwarp surfelwarp::NodeGraphSmoothHandler::NodeGraphSmoothHandler() { const auto num_smooth_terms = Constants::kMaxNumNodes * Constants::kNumNodeGraphNeigbours; Ti_xj_.AllocateBuffer(num_smooth_terms); Tj_xj_.AllocateBuffer(num_smooth_terms); m_pair_validity_indicator.AllocateBuffer(num_smooth_terms); } surfelwarp::NodeGraphSmoothHandler::~NodeGraphSmoothHandler() { Ti_xj_.ReleaseBuffer(); Tj_xj_.ReleaseBuffer(); m_pair_validity_indicator.ReleaseBuffer(); } void surfelwarp::NodeGraphSmoothHandler::SetInputs( const DeviceArrayView<DualQuaternion>& node_se3, const DeviceArrayView<ushort2>& node_graph, const DeviceArrayView<float4>& reference_nodes ) { m_node_se3 = node_se3; m_node_graph = node_graph; m_reference_node_coords = reference_nodes; } /* The method to build the term2jacobian */ void surfelwarp::NodeGraphSmoothHandler::forwardWarpSmootherNodes(cudaStream_t stream) { Ti_xj_.ResizeArrayOrException(m_node_graph.Size()); Tj_xj_.ResizeArrayOrException(m_node_graph.Size()); m_pair_validity_indicator.ResizeArrayOrException(m_node_graph.Size()); dim3 blk(128); dim3 grid(divUp(m_node_graph.Size(), blk.x)); device::forwardWarpSmootherNodeKernel<<<grid, blk, 0, stream>>>( m_node_graph, m_reference_node_coords.RawPtr(), m_node_se3.RawPtr(), Ti_xj_.Ptr(), Tj_xj_.Ptr(), m_pair_validity_indicator.Ptr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif } void surfelwarp::NodeGraphSmoothHandler::BuildTerm2Jacobian(cudaStream_t stream) { forwardWarpSmootherNodes(stream); } surfelwarp::NodeGraphSmoothTerm2Jacobian surfelwarp::NodeGraphSmoothHandler::Term2JacobianMap() const { NodeGraphSmoothTerm2Jacobian map; map.node_se3 = m_node_se3; map.reference_node_coords = m_reference_node_coords; map.node_graph = m_node_graph; map.Ti_xj = Ti_xj_.ArrayView(); map.Tj_xj = Tj_xj_.ArrayView(); map.validity_indicator = m_pair_validity_indicator.ArrayView(); return map; }
936750938091907b04681586eea3694f2e9226ed.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Example showing the use of CUFFT for fast 1D-convolution using FFT. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <hip/hip_runtime.h> #include <hipfft.h> #include <helper_functions.h> #include <helper_cuda.h> // Complex data type typedef float2 Complex; static __device__ __host__ inline Complex ComplexAdd(Complex, Complex); static __device__ __host__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *, int, float); // Filtering functions void Convolve(const Complex *, int, const Complex *, int, Complex *); // Padding functions int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int); //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { printf("[simpleCUFFT] is starting...\n"); findCudaDevice(argc, (const char **)argv); // Allocate host memory for the signal Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE); // Initialize the memory for the signal for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) // 50 { h_signal[i].x = rand() / (float)RAND_MAX; h_signal[i].y = 0; } // Allocate host memory for the filter Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE); // Initialize the memory for the filter for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = rand() / (float)RAND_MAX; h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Complex *h_padded_signal; Complex *h_padded_filter_kernel; int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE); int mem_size = sizeof(Complex) * new_size; // Allocate device memory for signal Complex *d_signal; checkCudaErrors(hipMalloc((void **)&d_signal, mem_size)); // Copy host memory to device checkCudaErrors(hipMemcpy(d_signal, h_padded_signal, mem_size, hipMemcpyHostToDevice)); // Allocate device memory for filter kernel Complex *d_filter_kernel; checkCudaErrors(hipMalloc((void **)&d_filter_kernel, mem_size)); // Copy host memory to device checkCudaErrors(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size, hipMemcpyHostToDevice)); // CUFFT plan hipfftHandle plan; checkCudaErrors(hipfftPlan1d(&plan, new_size, HIPFFT_C2C, 1)); // Transform signal and kernel printf("Transforming signal hipfftExecC2C\n"); checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD)); checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD)); // Multiply the coefficients together and normalize the result printf("Launching ComplexPointwiseMulAndScale<<< >>>\n"); hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(32), dim3(256), 0, 0, d_signal, d_filter_kernel, new_size, 1.0f / new_size); // Check if kernel execution generated and error getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]"); // Transform signal back printf("Transforming signal back hipfftExecC2C\n"); checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD)); // Copy device memory to host Complex *h_convolved_signal = h_padded_signal; checkCudaErrors(hipMemcpy(h_convolved_signal, d_signal, mem_size, hipMemcpyDeviceToHost)); // Allocate host memory for the convolution result Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE); // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); // check result bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f); //Destroy CUFFT context checkCudaErrors(hipfftDestroy(plan)); // cleanup memory free(h_signal); free(h_filter_kernel); free(h_padded_signal); free(h_padded_filter_kernel); free(h_convolved_signal_ref); checkCudaErrors(hipFree(d_signal)); checkCudaErrors(hipFree(d_filter_kernel)); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); } // Pad data int PadData(const Complex *signal, Complex **padded_signal, int signal_size, const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; int new_size = signal_size + maxRadius; // Pad signal Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, signal, signal_size * sizeof(Complex)); memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex)); *padded_signal = new_data; // Pad filter new_data = (Complex *)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex)); memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex)); memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex)); *padded_filter_kernel = new_data; return new_size; } //////////////////////////////////////////////////////////////////////////////// // Filtering operations //////////////////////////////////////////////////////////////////////////////// // Computes convolution on the host void Convolve(const Complex *signal, int signal_size, const Complex *filter_kernel, int filter_kernel_size, Complex *filtered_signal) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; // Loop over output element indices for (int i = 0; i < signal_size; ++i) { filtered_signal[i].x = filtered_signal[i].y = 0; // Loop over convolution indices for (int j = - maxRadius + 1; j <= minRadius; ++j) { int k = i + j; if (k >= 0 && k < signal_size) { filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j])); } } } } //////////////////////////////////////////////////////////////////////////////// // Complex operations //////////////////////////////////////////////////////////////////////////////// // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scale static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b, int size, float scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < size; i += numThreads) { a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale); } }
936750938091907b04681586eea3694f2e9226ed.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Example showing the use of CUFFT for fast 1D-convolution using FFT. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cuda_runtime.h> #include <cufft.h> #include <helper_functions.h> #include <helper_cuda.h> // Complex data type typedef float2 Complex; static __device__ __host__ inline Complex ComplexAdd(Complex, Complex); static __device__ __host__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *, int, float); // Filtering functions void Convolve(const Complex *, int, const Complex *, int, Complex *); // Padding functions int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int); //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char **argv) { printf("[simpleCUFFT] is starting...\n"); findCudaDevice(argc, (const char **)argv); // Allocate host memory for the signal Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE); // Initialize the memory for the signal for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) // 50 { h_signal[i].x = rand() / (float)RAND_MAX; h_signal[i].y = 0; } // Allocate host memory for the filter Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE); // Initialize the memory for the filter for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = rand() / (float)RAND_MAX; h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Complex *h_padded_signal; Complex *h_padded_filter_kernel; int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE); int mem_size = sizeof(Complex) * new_size; // Allocate device memory for signal Complex *d_signal; checkCudaErrors(cudaMalloc((void **)&d_signal, mem_size)); // Copy host memory to device checkCudaErrors(cudaMemcpy(d_signal, h_padded_signal, mem_size, cudaMemcpyHostToDevice)); // Allocate device memory for filter kernel Complex *d_filter_kernel; checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size)); // Copy host memory to device checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size, cudaMemcpyHostToDevice)); // CUFFT plan cufftHandle plan; checkCudaErrors(cufftPlan1d(&plan, new_size, CUFFT_C2C, 1)); // Transform signal and kernel printf("Transforming signal cufftExecC2C\n"); checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD)); checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD)); // Multiply the coefficients together and normalize the result printf("Launching ComplexPointwiseMulAndScale<<< >>>\n"); ComplexPointwiseMulAndScale<<<32, 256>>>(d_signal, d_filter_kernel, new_size, 1.0f / new_size); // Check if kernel execution generated and error getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]"); // Transform signal back printf("Transforming signal back cufftExecC2C\n"); checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE)); // Copy device memory to host Complex *h_convolved_signal = h_padded_signal; checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size, cudaMemcpyDeviceToHost)); // Allocate host memory for the convolution result Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE); // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); // check result bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f); //Destroy CUFFT context checkCudaErrors(cufftDestroy(plan)); // cleanup memory free(h_signal); free(h_filter_kernel); free(h_padded_signal); free(h_padded_filter_kernel); free(h_convolved_signal_ref); checkCudaErrors(cudaFree(d_signal)); checkCudaErrors(cudaFree(d_filter_kernel)); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); } // Pad data int PadData(const Complex *signal, Complex **padded_signal, int signal_size, const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; int new_size = signal_size + maxRadius; // Pad signal Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, signal, signal_size * sizeof(Complex)); memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex)); *padded_signal = new_data; // Pad filter new_data = (Complex *)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex)); memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex)); memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex)); *padded_filter_kernel = new_data; return new_size; } //////////////////////////////////////////////////////////////////////////////// // Filtering operations //////////////////////////////////////////////////////////////////////////////// // Computes convolution on the host void Convolve(const Complex *signal, int signal_size, const Complex *filter_kernel, int filter_kernel_size, Complex *filtered_signal) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; // Loop over output element indices for (int i = 0; i < signal_size; ++i) { filtered_signal[i].x = filtered_signal[i].y = 0; // Loop over convolution indices for (int j = - maxRadius + 1; j <= minRadius; ++j) { int k = i + j; if (k >= 0 && k < signal_size) { filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j])); } } } } //////////////////////////////////////////////////////////////////////////////// // Complex operations //////////////////////////////////////////////////////////////////////////////// // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scale static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Complex pointwise multiplication static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b, int size, float scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < size; i += numThreads) { a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale); } }
relu_bfloat16_kernel.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include <hip/hip_runtime.h> #include "oneflow/core/ep/cuda/cuda_stream.h" #if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000 #include "oneflow/core/device/cuda_pseudo_bfloat16.h" namespace oneflow { namespace user_op { namespace { template<typename T> __global__ void ReluForwardGpu(int64_t n, const T* in, T* out) { const T zero = static_cast<T>(0.0); CUDA_1D_KERNEL_LOOP(i, n) { const T in_i = in[i]; T out_i = zero; if (in_i > zero) { out_i = in_i; } out[i] = out_i; } } template<typename T> __global__ void ReluBackwardGpu(int64_t n, const T* y, const T* dy, T* dx) { const T zero = static_cast<T>(0.0); CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > zero ? dy[i] : zero; } } } // namespace class ReluNvBFloat16Kernel final : public OpKernel { public: ReluNvBFloat16Kernel() = default; ~ReluNvBFloat16Kernel() override = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { const Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const int64_t n = in->shape().elem_cnt(); hipLaunchKernelGGL(( ReluForwardGpu<nv_bfloat16>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), n, reinterpret_cast<const nv_bfloat16*>(in->dptr()), reinterpret_cast<nv_bfloat16*>(out->mut_dptr())); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; class ReluGradNvBFloat16Kernel final : public OpKernel { public: ReluGradNvBFloat16Kernel() = default; ~ReluGradNvBFloat16Kernel() override = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { const Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0); const Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const int64_t n = y->shape().elem_cnt(); hipLaunchKernelGGL(( ReluBackwardGpu<nv_bfloat16>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream(), n, reinterpret_cast<const nv_bfloat16*>(y->dptr()), reinterpret_cast<const nv_bfloat16*>(dy->dptr()), reinterpret_cast<nv_bfloat16*>(dx->mut_dptr())); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("relu") .SetCreateFn<ReluNvBFloat16Kernel>() .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) && (user_op::HobDataType("out", 0) == DataType::kBFloat16)) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); return Maybe<void>::Ok(); }); REGISTER_USER_KERNEL("relu_grad") .SetCreateFn<ReluGradNvBFloat16Kernel>() .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) && (user_op::HobDataType("dx", 0) == DataType::kBFloat16)) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); return Maybe<void>::Ok(); }); } // namespace user_op } // namespace oneflow #endif // defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
relu_bfloat16_kernel.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include <cuda.h> #include "oneflow/core/ep/cuda/cuda_stream.h" #if defined(CUDA_VERSION) && CUDA_VERSION >= 11000 #include "oneflow/core/device/cuda_pseudo_bfloat16.h" namespace oneflow { namespace user_op { namespace { template<typename T> __global__ void ReluForwardGpu(int64_t n, const T* in, T* out) { const T zero = static_cast<T>(0.0); CUDA_1D_KERNEL_LOOP(i, n) { const T in_i = in[i]; T out_i = zero; if (in_i > zero) { out_i = in_i; } out[i] = out_i; } } template<typename T> __global__ void ReluBackwardGpu(int64_t n, const T* y, const T* dy, T* dx) { const T zero = static_cast<T>(0.0); CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > zero ? dy[i] : zero; } } } // namespace class ReluNvBFloat16Kernel final : public OpKernel { public: ReluNvBFloat16Kernel() = default; ~ReluNvBFloat16Kernel() override = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { const Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); const int64_t n = in->shape().elem_cnt(); ReluForwardGpu<nv_bfloat16><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( n, reinterpret_cast<const nv_bfloat16*>(in->dptr()), reinterpret_cast<nv_bfloat16*>(out->mut_dptr())); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; class ReluGradNvBFloat16Kernel final : public OpKernel { public: ReluGradNvBFloat16Kernel() = default; ~ReluGradNvBFloat16Kernel() override = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { const Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0); const Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0); Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const int64_t n = y->shape().elem_cnt(); ReluBackwardGpu<nv_bfloat16><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>( n, reinterpret_cast<const nv_bfloat16*>(y->dptr()), reinterpret_cast<const nv_bfloat16*>(dy->dptr()), reinterpret_cast<nv_bfloat16*>(dx->mut_dptr())); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; REGISTER_USER_KERNEL("relu") .SetCreateFn<ReluNvBFloat16Kernel>() .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) && (user_op::HobDataType("out", 0) == DataType::kBFloat16)) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); return Maybe<void>::Ok(); }); REGISTER_USER_KERNEL("relu_grad") .SetCreateFn<ReluGradNvBFloat16Kernel>() .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) && (user_op::HobDataType("dx", 0) == DataType::kBFloat16)) .SetInplaceProposalFn([](const user_op::InferContext&, user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true)); return Maybe<void>::Ok(); }); } // namespace user_op } // namespace oneflow #endif // defined(CUDA_VERSION) && CUDA_VERSION >= 11000
6c2174f65e17c4c68cde9b6bc737eeb48d370cf4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ float Hue_2_RGB_gpu( float v1, float v2, float vH ) //Function Hue_2_RGB { if ( vH < 0 ) vH += 1; if ( vH > 1 ) vH -= 1; if ( ( 6 * vH ) < 1 ) return ( v1 + ( v2 - v1 ) * 6 * vH ); if ( ( 2 * vH ) < 1 ) return ( v2 ); if ( ( 3 * vH ) < 2 ) return ( v1 + ( v2 - v1 ) * ( ( 2.0f/3.0f ) - vH ) * 6 ); return ( v1 ); } __global__ void hsl2rgb_gpu_son(float * d_h , float * d_s ,unsigned char * d_l , unsigned char * d_r, unsigned char * d_g, unsigned char * d_b, int size) { int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= size) return; float H = d_h[x]; float S = d_s[x]; float L = d_l[x]/255.0f; float var_1, var_2; unsigned char r,g,b; if ( S == 0 ) { r = L * 255; g = L * 255; b = L * 255; } else { if ( L < 0.5 ) var_2 = L * ( 1 + S ); else var_2 = ( L + S ) - ( S * L ); var_1 = 2 * L - var_2; r = 255 * Hue_2_RGB_gpu( var_1, var_2, H + (1.0f/3.0f) ); g = 255 * Hue_2_RGB_gpu( var_1, var_2, H ); b = 255 * Hue_2_RGB_gpu( var_1, var_2, H - (1.0f/3.0f) ); } d_r[x] = r; d_g[x] = g; d_b[x] = b; }
6c2174f65e17c4c68cde9b6bc737eeb48d370cf4.cu
#include "includes.h" __device__ float Hue_2_RGB_gpu( float v1, float v2, float vH ) //Function Hue_2_RGB { if ( vH < 0 ) vH += 1; if ( vH > 1 ) vH -= 1; if ( ( 6 * vH ) < 1 ) return ( v1 + ( v2 - v1 ) * 6 * vH ); if ( ( 2 * vH ) < 1 ) return ( v2 ); if ( ( 3 * vH ) < 2 ) return ( v1 + ( v2 - v1 ) * ( ( 2.0f/3.0f ) - vH ) * 6 ); return ( v1 ); } __global__ void hsl2rgb_gpu_son(float * d_h , float * d_s ,unsigned char * d_l , unsigned char * d_r, unsigned char * d_g, unsigned char * d_b, int size) { int x = threadIdx.x + blockDim.x*blockIdx.x; if (x >= size) return; float H = d_h[x]; float S = d_s[x]; float L = d_l[x]/255.0f; float var_1, var_2; unsigned char r,g,b; if ( S == 0 ) { r = L * 255; g = L * 255; b = L * 255; } else { if ( L < 0.5 ) var_2 = L * ( 1 + S ); else var_2 = ( L + S ) - ( S * L ); var_1 = 2 * L - var_2; r = 255 * Hue_2_RGB_gpu( var_1, var_2, H + (1.0f/3.0f) ); g = 255 * Hue_2_RGB_gpu( var_1, var_2, H ); b = 255 * Hue_2_RGB_gpu( var_1, var_2, H - (1.0f/3.0f) ); } d_r[x] = r; d_g[x] = g; d_b[x] = b; }