hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
9394d821048ac12862beb695822be309206df216.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> s d c @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_z #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void ztranspose_device( int m, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *AT, int ldat) { __shared__ magmaDoubleComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ztranspose_kernel( int m, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *AT, int ldat) { ztranspose_device(m, n, A, lda, AT, ldat); } __global__ void ztranspose_kernel_batched( int m, int n, magmaDoubleComplex **dA_array, int lda, magmaDoubleComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ztranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ztranspose copies and transposes a matrix dA to matrix dAT. Same as ztranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX_16 array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ztranspose_q( magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( ztranspose_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ztranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ztranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX_16* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX_16* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ztranspose_batched( magma_int_t m, magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t ldda, magmaDoubleComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); hipLaunchKernelGGL(( ztranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA_array, ldda, dAT_array, lddat ); }
9394d821048ac12862beb695822be309206df216.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @precisions normal z -> s d c @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define PRECISION_z #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __device__ void ztranspose_device( int m, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *AT, int ldat) { __shared__ magmaDoubleComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void ztranspose_kernel( int m, int n, const magmaDoubleComplex *A, int lda, magmaDoubleComplex *AT, int ldat) { ztranspose_device(m, n, A, lda, AT, ldat); } __global__ void ztranspose_kernel_batched( int m, int n, magmaDoubleComplex **dA_array, int lda, magmaDoubleComplex **dAT_array, int ldat) { int batchid = blockIdx.z; ztranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /***************************************************************************//** Purpose ------- ztranspose copies and transposes a matrix dA to matrix dAT. Same as ztranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT COMPLEX_16 array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_ztranspose_q( magma_int_t m, magma_int_t n, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); ztranspose_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dAT, lddat ); } /***************************************************************************//** Purpose ------- ztranspose_batched copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as ztranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array COMPLEX_16* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array COMPLEX_16* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_transpose_batched *******************************************************************************/ extern "C" void magmablas_ztranspose_batched( magma_int_t m, magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t ldda, magmaDoubleComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY, 1 ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); ztranspose_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA_array, ldda, dAT_array, lddat ); }
0ae17a9f949cf3973efc92b3c1a356af6606356c.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensor.hip" #else int THCTensor_(getDevice)(THCState* state, const THCTensor* tensor) { return THCTensor_getDevice(state, tensor); } #endif
0ae17a9f949cf3973efc92b3c1a356af6606356c.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensor.cu" #else int THCTensor_(getDevice)(THCState* state, const THCTensor* tensor) { return THCTensor_getDevice(state, tensor); } #endif
de7defe1b23571f09baa33545992222b22e7a99b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define MACRO_THREADS 1024 #define NOT_EVENT 0 #define INTERVAL 500 #define USE_NVTX // Following along from // https://developer.nvidia.com/blog/cuda-pro-tip-generate-custom-application-profile-timelines-nvtx/ #ifdef USE_NVTX #include "roctracer/roctx.h" const uint32_t colors[] = {0xff00ff00, 0xff0000ff, 0xffffff00, 0xffff00ff, 0xff00ffff, 0xffff0000, 0xffffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE roctxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define PPT 64 #define FILT_WINDOW 5 #define EVENTS 5 // 1 = text file // 2 = binary file #define READ_OPT 2 #include <assert.h> #include <fcntl.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <fstream> #include <iostream> int SIZE; // length of the data file going in, in units of points... #include "find_high_random.h" #include "find_transitions_c.h" #include "find_transitions_canny.h" #include "find_transitions_delta.h" #include "find_transitions_mean.h" #include "mean_filter_signal.h" float *h_values; float *h_values_raw; float *h_high_mean; float *d_values; int *d_transitions; float *d_high_mean; int *d_size; float *d_gradient; float *d_smoothed; int main(int argc, char **argv) { PUSH_RANGE("Read file and load data", 1) int expected_values = EVENTS; int inBinaryFile; const char *available_kernels[4]; available_kernels[0] = "mean"; available_kernels[1] = "delta"; available_kernels[2] = "canny"; available_kernels[3] = "c"; if (READ_OPT == 1) { printf("You are reading the CSV as an arg!\n"); char *filename = strdup(argv[2]); std::ifstream inTextFile; // Open the file and read data inTextFile.open(filename); if (!inTextFile) { printf("\nFailed to open file on csv run!\n"); return 1; } // Read SIZE from first line of the file inTextFile >> SIZE; } else { char filename[] = "signal.dat"; if ((inBinaryFile = open(filename, O_RDONLY)) < 0) { printf("\nFailed to open the file on dat run!\n"); return 1; } float *lines; lines = (float *)calloc(1, sizeof(float)); read(inBinaryFile, lines, sizeof(float)); SIZE = (*lines); } assert(SIZE > 0); printf("%d was the number of points you just passed in.\n", SIZE); const int THREADS = MACRO_THREADS; const int BLOCKS = floor((((float)SIZE / (float)THREADS)) / PPT); printf("Block count: %d.\n", BLOCKS); fflush(stdout); const int cropped_size = BLOCKS * THREADS * PPT; const int cropped_bytes = cropped_size * sizeof(float); printf("%d was as close as I could get.\n", cropped_size); fflush(stdout); assert(THREADS > 0); assert(BLOCKS > 0); assert(cropped_size > 0); assert(cropped_size < SIZE); // Now, copy the input and drop the last few points const int ARRAY_BYTES = SIZE * sizeof(float); h_values_raw = (float *)calloc(SIZE, sizeof(float)); if (READ_OPT == 1) { char *filename = strdup(argv[2]); std::ifstream inTextFile; // Open the file and read data inTextFile.open(filename); if (!inTextFile) { printf("\nFailed to open file"); return 1; } printf("You are pulling data from the CSV.\n"); for (int i = 0; i < SIZE; i++) inTextFile >> h_values_raw[i]; } else { read(inBinaryFile, h_values_raw, ARRAY_BYTES); } hipHostMalloc((void **)&h_values, cropped_bytes); for (int i = 1; i < cropped_size; i++) { h_values[i] = h_values_raw[i]; } free(h_values_raw); FILE *f; // Regardless of CPU or GPU, this is the file you're writing results // to. h_high_mean = (float *)calloc(1, sizeof(float)); if (argc == 1) { printf("Run with one of the arguments: "); for (int i = 0; i < sizeof(available_kernels) / sizeof(available_kernels[0]); i++) printf("%s ", available_kernels[i]); printf("\n"); return 1; } POP_RANGE if (strcmp(argv[1], "c") == 0) { PUSH_RANGE("Find with C", 2) printf("Using CPU.\n"); // Now you are not using the GPU at all, and are just on C on the CPU. // Run the relevant transition finder, using a multipass finder for now. *h_high_mean = find_high_random(h_values); float *h_transitions = (float *)calloc(cropped_size, sizeof(float)); int passes = 6; // How many passes do you want your multipass eventfinder to run on? find_transitions_c(h_values, h_transitions, *h_high_mean, passes, cropped_size); // This is going to modify h_values, so get rid of it for safety. hipFree(h_values); // open the correct guessed transition file for writing. f = fopen("transitions_guessed_c.csv", "w"); // Write the found transitions to the correct file you opened above. for (int i = 0; i < cropped_size; i++) fprintf(f, "%f\n", h_transitions[i]); fclose(f); printf("CPU run done.\n"); POP_RANGE } else { // You are in the GPU branch. // Allocate GPU memory PUSH_RANGE("Allocate GPU memory", 3) printf("Using GPU.\n"); hipMalloc((void **)&d_values, cropped_bytes); hipMalloc((void **)&d_smoothed, cropped_bytes); hipMalloc((void **)&d_transitions, sizeof(int) * BLOCKS); hipMalloc((void **)&d_high_mean, sizeof(float)); hipMalloc((void **)&d_size, sizeof(int)); hipMalloc((void **)&d_gradient, cropped_bytes); hipStream_t stream1; hipStreamCreate(&stream1); hipMemcpyAsync(d_values, h_values, cropped_bytes, hipMemcpyHostToDevice, stream1); printf("Host-to-device copy initiated.\n"); fflush(stdout); // Launch the kernel printf("All pre-kernel launch stuff OK.\n"); fflush(stdout); POP_RANGE if (strcmp(argv[1], "delta") == 0) { PUSH_RANGE("Find with delta", 2) // Transfer the array to GPU hipMemcpy(d_size, &cropped_size, sizeof(int), hipMemcpyHostToDevice); // Run the relevant transition finder hipLaunchKernelGGL(( find_transitions_delta), dim3(BLOCKS), dim3(THREADS), 0, stream1, d_values, d_transitions, PPT, MACRO_THREADS); // open the correct guessed transition file for writing. f = fopen("transitions_guessed_delta.csv", "w"); expected_values = EVENTS * 2; // copy the result back to CPU hipMemcpy(h_values, d_values, cropped_bytes, hipMemcpyDeviceToHost); POP_RANGE } else if (strcmp(argv[1], "mean") == 0) { PUSH_RANGE("Find with mean", 2) *h_high_mean = find_high_random(h_values); hipMemcpy(d_high_mean, h_high_mean, sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( find_transitions_mean), dim3(BLOCKS), dim3(THREADS), 0, stream1, d_values, d_transitions, PPT, MACRO_THREADS, d_high_mean); f = fopen("transitions_guessed_mean.csv", "w"); hipMemcpy(h_values, d_values, cropped_bytes, hipMemcpyDeviceToHost); POP_RANGE } else if (strcmp(argv[1], "canny") == 0) { PUSH_RANGE("Find with canny", 2) hipMemcpy(d_size, &cropped_size, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( mean_filter_signal), dim3(BLOCKS), dim3(THREADS), 0, stream1, d_values, PPT, FILT_WINDOW, d_size, d_smoothed); hipLaunchKernelGGL(( find_transitions_canny), dim3(BLOCKS), dim3(THREADS), 0, stream1, d_values, d_transitions, PPT, d_size, d_gradient); f = fopen("transitions_guessed_canny.csv", "w"); expected_values = EVENTS * 2; hipMemcpy(h_values, d_gradient, cropped_bytes, hipMemcpyDeviceToHost); POP_RANGE } else { printf("Run with one of the arguments: "); for (int i = 0; i < sizeof(available_kernels) / sizeof(available_kernels[0]); i++) printf("%s ", available_kernels[i]); printf("\n"); return 1; } // free GPU memory hipFree(d_values); hipStreamDestroy(stream1); // Write the found transitions to the correct file you opened above. for (int i = 0; i < cropped_size; i++) fprintf(f, "%f\n", h_values[i]); fclose(f); printf("GPU run done.\n"); } PUSH_RANGE("Count transitions", 4) char eventFlag = 'F'; int total_transitions = 0; for (int i = 0; i < cropped_size; i++) { if (h_values[i] == NOT_EVENT && eventFlag == 'F') { continue; // you're not in an event, and you pass } else if (h_values[i] != NOT_EVENT && eventFlag == 'F') { eventFlag = 'T'; // walked into event } else if (h_values[i] != NOT_EVENT && eventFlag == 'T') { continue; // moving along event } else if (h_values[i] == NOT_EVENT && eventFlag == 'T') { total_transitions++; // just left an event eventFlag = 'F'; } else { return 1; } } POP_RANGE printf("Computed with %s : ", argv[1]); printf("%d (%d expected for synthetically generated data.)\n", total_transitions, expected_values); return 0; }
de7defe1b23571f09baa33545992222b22e7a99b.cu
#define MACRO_THREADS 1024 #define NOT_EVENT 0 #define INTERVAL 500 #define USE_NVTX // Following along from // https://developer.nvidia.com/blog/cuda-pro-tip-generate-custom-application-profile-timelines-nvtx/ #ifdef USE_NVTX #include "nvToolsExt.h" const uint32_t colors[] = {0xff00ff00, 0xff0000ff, 0xffffff00, 0xffff00ff, 0xff00ffff, 0xffff0000, 0xffffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE nvtxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define PPT 64 #define FILT_WINDOW 5 #define EVENTS 5 // 1 = text file // 2 = binary file #define READ_OPT 2 #include <assert.h> #include <fcntl.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <fstream> #include <iostream> int SIZE; // length of the data file going in, in units of points... #include "find_high_random.h" #include "find_transitions_c.h" #include "find_transitions_canny.h" #include "find_transitions_delta.h" #include "find_transitions_mean.h" #include "mean_filter_signal.h" float *h_values; float *h_values_raw; float *h_high_mean; float *d_values; int *d_transitions; float *d_high_mean; int *d_size; float *d_gradient; float *d_smoothed; int main(int argc, char **argv) { PUSH_RANGE("Read file and load data", 1) int expected_values = EVENTS; int inBinaryFile; const char *available_kernels[4]; available_kernels[0] = "mean"; available_kernels[1] = "delta"; available_kernels[2] = "canny"; available_kernels[3] = "c"; if (READ_OPT == 1) { printf("You are reading the CSV as an arg!\n"); char *filename = strdup(argv[2]); std::ifstream inTextFile; // Open the file and read data inTextFile.open(filename); if (!inTextFile) { printf("\nFailed to open file on csv run!\n"); return 1; } // Read SIZE from first line of the file inTextFile >> SIZE; } else { char filename[] = "signal.dat"; if ((inBinaryFile = open(filename, O_RDONLY)) < 0) { printf("\nFailed to open the file on dat run!\n"); return 1; } float *lines; lines = (float *)calloc(1, sizeof(float)); read(inBinaryFile, lines, sizeof(float)); SIZE = (*lines); } assert(SIZE > 0); printf("%d was the number of points you just passed in.\n", SIZE); const int THREADS = MACRO_THREADS; const int BLOCKS = floor((((float)SIZE / (float)THREADS)) / PPT); printf("Block count: %d.\n", BLOCKS); fflush(stdout); const int cropped_size = BLOCKS * THREADS * PPT; const int cropped_bytes = cropped_size * sizeof(float); printf("%d was as close as I could get.\n", cropped_size); fflush(stdout); assert(THREADS > 0); assert(BLOCKS > 0); assert(cropped_size > 0); assert(cropped_size < SIZE); // Now, copy the input and drop the last few points const int ARRAY_BYTES = SIZE * sizeof(float); h_values_raw = (float *)calloc(SIZE, sizeof(float)); if (READ_OPT == 1) { char *filename = strdup(argv[2]); std::ifstream inTextFile; // Open the file and read data inTextFile.open(filename); if (!inTextFile) { printf("\nFailed to open file"); return 1; } printf("You are pulling data from the CSV.\n"); for (int i = 0; i < SIZE; i++) inTextFile >> h_values_raw[i]; } else { read(inBinaryFile, h_values_raw, ARRAY_BYTES); } cudaMallocHost((void **)&h_values, cropped_bytes); for (int i = 1; i < cropped_size; i++) { h_values[i] = h_values_raw[i]; } free(h_values_raw); FILE *f; // Regardless of CPU or GPU, this is the file you're writing results // to. h_high_mean = (float *)calloc(1, sizeof(float)); if (argc == 1) { printf("Run with one of the arguments: "); for (int i = 0; i < sizeof(available_kernels) / sizeof(available_kernels[0]); i++) printf("%s ", available_kernels[i]); printf("\n"); return 1; } POP_RANGE if (strcmp(argv[1], "c") == 0) { PUSH_RANGE("Find with C", 2) printf("Using CPU.\n"); // Now you are not using the GPU at all, and are just on C on the CPU. // Run the relevant transition finder, using a multipass finder for now. *h_high_mean = find_high_random(h_values); float *h_transitions = (float *)calloc(cropped_size, sizeof(float)); int passes = 6; // How many passes do you want your multipass eventfinder to run on? find_transitions_c(h_values, h_transitions, *h_high_mean, passes, cropped_size); // This is going to modify h_values, so get rid of it for safety. cudaFree(h_values); // open the correct guessed transition file for writing. f = fopen("transitions_guessed_c.csv", "w"); // Write the found transitions to the correct file you opened above. for (int i = 0; i < cropped_size; i++) fprintf(f, "%f\n", h_transitions[i]); fclose(f); printf("CPU run done.\n"); POP_RANGE } else { // You are in the GPU branch. // Allocate GPU memory PUSH_RANGE("Allocate GPU memory", 3) printf("Using GPU.\n"); cudaMalloc((void **)&d_values, cropped_bytes); cudaMalloc((void **)&d_smoothed, cropped_bytes); cudaMalloc((void **)&d_transitions, sizeof(int) * BLOCKS); cudaMalloc((void **)&d_high_mean, sizeof(float)); cudaMalloc((void **)&d_size, sizeof(int)); cudaMalloc((void **)&d_gradient, cropped_bytes); cudaStream_t stream1; cudaStreamCreate(&stream1); cudaMemcpyAsync(d_values, h_values, cropped_bytes, cudaMemcpyHostToDevice, stream1); printf("Host-to-device copy initiated.\n"); fflush(stdout); // Launch the kernel printf("All pre-kernel launch stuff OK.\n"); fflush(stdout); POP_RANGE if (strcmp(argv[1], "delta") == 0) { PUSH_RANGE("Find with delta", 2) // Transfer the array to GPU cudaMemcpy(d_size, &cropped_size, sizeof(int), cudaMemcpyHostToDevice); // Run the relevant transition finder find_transitions_delta<<<BLOCKS, THREADS, 0, stream1>>>( d_values, d_transitions, PPT, MACRO_THREADS); // open the correct guessed transition file for writing. f = fopen("transitions_guessed_delta.csv", "w"); expected_values = EVENTS * 2; // copy the result back to CPU cudaMemcpy(h_values, d_values, cropped_bytes, cudaMemcpyDeviceToHost); POP_RANGE } else if (strcmp(argv[1], "mean") == 0) { PUSH_RANGE("Find with mean", 2) *h_high_mean = find_high_random(h_values); cudaMemcpy(d_high_mean, h_high_mean, sizeof(float), cudaMemcpyHostToDevice); find_transitions_mean<<<BLOCKS, THREADS, 0, stream1>>>( d_values, d_transitions, PPT, MACRO_THREADS, d_high_mean); f = fopen("transitions_guessed_mean.csv", "w"); cudaMemcpy(h_values, d_values, cropped_bytes, cudaMemcpyDeviceToHost); POP_RANGE } else if (strcmp(argv[1], "canny") == 0) { PUSH_RANGE("Find with canny", 2) cudaMemcpy(d_size, &cropped_size, sizeof(int), cudaMemcpyHostToDevice); mean_filter_signal<<<BLOCKS, THREADS, 0, stream1>>>( d_values, PPT, FILT_WINDOW, d_size, d_smoothed); find_transitions_canny<<<BLOCKS, THREADS, 0, stream1>>>( d_values, d_transitions, PPT, d_size, d_gradient); f = fopen("transitions_guessed_canny.csv", "w"); expected_values = EVENTS * 2; cudaMemcpy(h_values, d_gradient, cropped_bytes, cudaMemcpyDeviceToHost); POP_RANGE } else { printf("Run with one of the arguments: "); for (int i = 0; i < sizeof(available_kernels) / sizeof(available_kernels[0]); i++) printf("%s ", available_kernels[i]); printf("\n"); return 1; } // free GPU memory cudaFree(d_values); cudaStreamDestroy(stream1); // Write the found transitions to the correct file you opened above. for (int i = 0; i < cropped_size; i++) fprintf(f, "%f\n", h_values[i]); fclose(f); printf("GPU run done.\n"); } PUSH_RANGE("Count transitions", 4) char eventFlag = 'F'; int total_transitions = 0; for (int i = 0; i < cropped_size; i++) { if (h_values[i] == NOT_EVENT && eventFlag == 'F') { continue; // you're not in an event, and you pass } else if (h_values[i] != NOT_EVENT && eventFlag == 'F') { eventFlag = 'T'; // walked into event } else if (h_values[i] != NOT_EVENT && eventFlag == 'T') { continue; // moving along event } else if (h_values[i] == NOT_EVENT && eventFlag == 'T') { total_transitions++; // just left an event eventFlag = 'F'; } else { return 1; } } POP_RANGE printf("Computed with %s : ", argv[1]); printf("%d (%d expected for synthetically generated data.)\n", total_transitions, expected_values); return 0; }
32f1f9a2db9fac0873ae401684efa27ea115469a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! \file VL_2D_cuda.cu * \brief Definitions of the cuda 2D VL algorithm functions. */ #ifdef CUDA #ifdef VL #include<stdio.h> #include<math.h> #include<cuda.h> #include"global.h" #include"global_cuda.h" #include"hydro_cuda.h" #include"VL_2D_cuda.h" #include"pcm_cuda.h" #include"plmp_cuda.h" #include"plmc_cuda.h" #include"ppmp_cuda.h" #include"ppmc_cuda.h" #include"exact_cuda.h" #include"roe_cuda.h" #include"hllc_cuda.h" #include"h_correction_2D_cuda.h" #include"cooling_cuda.h" #include"subgrid_routines_2D.h" __global__ void Update_Conserved_Variables_2D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, Real gamma, int n_fields); Real VL_Algorithm_2D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int x_off, int y_off, int n_ghost, Real dx, Real dy, Real xbound, Real ybound, Real dt, int n_fields) { //Here, *host_conserved contains the entire //set of conserved variables on the grid //concatenated into a 1-d array //host_conserved0 contains the values at time n, //host_conserved1 will contain the values at time n+1 // Initialize dt values Real max_dti = 0; #ifdef COOLING_GPU Real min_dt = 1e10; #endif if ( !block_size ) { // calculate the dimensions for each subgrid block sub_dimensions_2D(nx, ny, n_ghost, &nx_s, &ny_s, &block1_tot, &block2_tot, &remainder1, &remainder2, n_fields); //printf("%d %d %d %d %d %d\n", nx_s, ny_s, block1_tot, block2_tot, remainder1, remainder2); nz_s = 1; block_tot = block1_tot*block2_tot; // number of cells in one subgrid block BLOCK_VOL = nx_s*ny_s*nz_s; // dimensions for the 1D GPU grid ngrid = (BLOCK_VOL + TPB - 1) / (TPB); #ifndef DYNAMIC_GPU_ALLOC block_size = true; #endif } // set values for GPU kernels // number of blocks per 1D grid dim3 dim2dGrid(ngrid, 1, 1); //number of threads per 1D block dim3 dim1dBlock(TPB, 1, 1); // Set up pointers for the location to copy from and to if (block_tot == 1) { tmp1 = host_conserved0; tmp2 = host_conserved1; } if ( !memory_allocated ) { // allocate buffer to copy conserved variable blocks from and to if (block_tot > 1) { if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) { printf("Failed to allocate CPU buffer.\n"); } tmp1 = buffer; tmp2 = buffer; } // allocate an array on the CPU to hold max_dti returned from each thread block host_dti_array = (Real *) malloc(ngrid*sizeof(Real)); #ifdef COOLING_GPU host_dt_array = (Real *) malloc(ngrid*sizeof(Real)); #endif // allocate GPU arrays CudaSafeCall( hipMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&dev_conserved_half, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) ); #ifdef COOLING_GPU CudaSafeCall( hipMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) ); #endif #ifndef DYNAMIC_GPU_ALLOC // If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory. // If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep. memory_allocated = true; #endif } // counter for which block we're on int block = 0; // START LOOP OVER SUBGRID BLOCKS HERE while (block < block_tot) { // copy the conserved variable block to the buffer host_copy_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved0, buffer, n_fields); // calculate the global x and y offsets of this subgrid block // (only needed for gravitational potential) get_offsets_2D(nx_s, ny_s, n_ghost, x_off, y_off, block, block1_tot, block2_tot, remainder1, remainder2, &x_off_s, &y_off_s); // copy the conserved variables onto the GPU CudaSafeCall( hipMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyHostToDevice) ); // Step 1: Use PCM reconstruction to put conserved variables into interface arrays hipLaunchKernelGGL(( PCM_Reconstruction_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, nx_s, ny_s, n_ghost, gama, n_fields); CudaCheckError(); // Step 2: Calculate first-order upwind fluxes #ifdef EXACT hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); // Step 3: Update the conserved variables half a timestep hipLaunchKernelGGL(( Update_Conserved_Variables_2D_half), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, dev_conserved_half, F_x, F_y, nx_s, ny_s, n_ghost, dx, dy, 0.5*dt, gama, n_fields); CudaCheckError(); // Step 4: Construct left and right interface values using updated conserved variables #ifdef PLMP hipLaunchKernelGGL(( PLMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PLMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PLMC hipLaunchKernelGGL(( PLMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PLMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PPMP hipLaunchKernelGGL(( PPMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PPMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif //PPMP #ifdef PPMC hipLaunchKernelGGL(( PPMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PPMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif //PPMC CudaCheckError(); // Step 5: Calculate the fluxes again #ifdef EXACT hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); #ifdef DE // Compute the divergence of velocity before updating the conserved array, this solves syncronization issues when adding this term on Update_Conserved_Variables hipLaunchKernelGGL(( Partial_Update_Advected_Internal_Energy_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, nx_s, ny_s, n_ghost, dx, dy, dt, gama, n_fields ); #endif // Step 6: Update the conserved variable array hipLaunchKernelGGL(( Update_Conserved_Variables_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, F_x, F_y, nx_s, ny_s, x_off_s, y_off_s, n_ghost, dx, dy, xbound, ybound, dt, gama, n_fields); CudaCheckError(); #ifdef DE hipLaunchKernelGGL(( Select_Internal_Energy_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, n_ghost, n_fields); hipLaunchKernelGGL(( Sync_Energies_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, n_ghost, gama, n_fields); CudaCheckError(); #endif // Apply cooling #ifdef COOLING_GPU hipLaunchKernelGGL(( cooling_kernel), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array); CudaCheckError(); #endif // Step 7: Calculate the next timestep hipLaunchKernelGGL(( Calc_dt_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, n_ghost, dx, dy, dev_dti_array, gama); CudaCheckError(); // copy the conserved variable array back to the CPU CudaSafeCall( hipMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyDeviceToHost) ); // copy the updated conserved variable array back into the host_conserved array on the CPU host_return_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved1, buffer, n_fields); // copy the dti array onto the CPU CudaSafeCall( hipMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) ); // iterate through to find the maximum inverse dt for this subgrid block for (int i=0; i<ngrid; i++) { max_dti = fmax(max_dti, host_dti_array[i]); } #ifdef COOLING_GPU // copy the dt array from cooling onto the CPU CudaSafeCall( hipMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) ); // iterate through to find the minimum dt for this subgrid block for (int i=0; i<ngrid; i++) { min_dt = fmin(min_dt, host_dt_array[i]); } if (min_dt < C_cfl/max_dti) { max_dti = C_cfl/min_dt; } #endif // add one to the counter block++; } #ifdef DYNAMIC_GPU_ALLOC // If memory is not single allocated then free the memory every timestep. Free_Memory_VL_2D(); #endif // return the maximum inverse timestep return max_dti; } void Free_Memory_VL_2D() { // free the CPU memory if (block_tot > 1) free(buffer); free(host_dti_array); #ifdef COOLING_GPU free(host_dt_array); #endif // free the GPU memory hipFree(dev_conserved); hipFree(dev_conserved_half); hipFree(Q_Lx); hipFree(Q_Rx); hipFree(Q_Ly); hipFree(Q_Ry); hipFree(F_x); hipFree(F_y); hipFree(dev_dti_array); #ifdef COOLING_GPU hipFree(dev_dt_array); #endif } __global__ void Update_Conserved_Variables_2D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, Real gamma, int n_fields) { int id, xid, yid, n_cells; int imo, jmo; Real dtodx = dt/dx; Real dtody = dt/dy; n_cells = nx*ny; // get a global thread ID int blockId = blockIdx.x + blockIdx.y*gridDim.x; id = threadIdx.x + blockId * blockDim.x; yid = id / nx; xid = id - yid*nx; #ifdef DE Real d, d_inv, vx, vy, vz; Real vx_imo, vx_ipo, vy_jmo, vy_jpo, P; int ipo, jpo; #endif // all threads but one outer ring of ghost cells if (xid > 0 && xid < nx-1 && yid > 0 && yid < ny-1) { imo = xid-1 + yid*nx; jmo = xid + (yid-1)*nx; #ifdef DE d = dev_conserved[ id]; d_inv = 1.0 / d; vx = dev_conserved[1*n_cells + id] * d_inv; vy = dev_conserved[2*n_cells + id] * d_inv; vz = dev_conserved[3*n_cells + id] * d_inv; P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0); //if (d < 0.0 || d != d) printf("Negative density before half step update.\n"); //if (P < 0.0) printf("%d Negative pressure before half step update.\n", id); ipo = xid+1 + yid*nx; jpo = xid + (yid+1)*nx; vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo]; vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo]; vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo]; vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo]; #endif // update the conserved variable array dev_conserved_half[ id] = dev_conserved[ id] + dtodx * (dev_F_x[ imo] - dev_F_x[ id]) + dtody * (dev_F_y[ jmo] - dev_F_y[ id]); dev_conserved_half[ n_cells + id] = dev_conserved[ n_cells + id] + dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]) + dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]); dev_conserved_half[2*n_cells + id] = dev_conserved[2*n_cells + id] + dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]) + dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]); dev_conserved_half[3*n_cells + id] = dev_conserved[3*n_cells + id] + dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]) + dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]); dev_conserved_half[4*n_cells + id] = dev_conserved[4*n_cells + id] + dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]) + dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_conserved_half[(5+i)*n_cells + id] = dev_conserved[(5+i)*n_cells + id] + dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]) + dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]); } #endif #ifdef DE dev_conserved_half[(n_fields-1)*n_cells + id] = dev_conserved[(n_fields-1)*n_cells + id] + dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]) + dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]) + 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo)); #endif } } #endif //VL #endif //CUDA
32f1f9a2db9fac0873ae401684efa27ea115469a.cu
/*! \file VL_2D_cuda.cu * \brief Definitions of the cuda 2D VL algorithm functions. */ #ifdef CUDA #ifdef VL #include<stdio.h> #include<math.h> #include<cuda.h> #include"global.h" #include"global_cuda.h" #include"hydro_cuda.h" #include"VL_2D_cuda.h" #include"pcm_cuda.h" #include"plmp_cuda.h" #include"plmc_cuda.h" #include"ppmp_cuda.h" #include"ppmc_cuda.h" #include"exact_cuda.h" #include"roe_cuda.h" #include"hllc_cuda.h" #include"h_correction_2D_cuda.h" #include"cooling_cuda.h" #include"subgrid_routines_2D.h" __global__ void Update_Conserved_Variables_2D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, Real gamma, int n_fields); Real VL_Algorithm_2D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int x_off, int y_off, int n_ghost, Real dx, Real dy, Real xbound, Real ybound, Real dt, int n_fields) { //Here, *host_conserved contains the entire //set of conserved variables on the grid //concatenated into a 1-d array //host_conserved0 contains the values at time n, //host_conserved1 will contain the values at time n+1 // Initialize dt values Real max_dti = 0; #ifdef COOLING_GPU Real min_dt = 1e10; #endif if ( !block_size ) { // calculate the dimensions for each subgrid block sub_dimensions_2D(nx, ny, n_ghost, &nx_s, &ny_s, &block1_tot, &block2_tot, &remainder1, &remainder2, n_fields); //printf("%d %d %d %d %d %d\n", nx_s, ny_s, block1_tot, block2_tot, remainder1, remainder2); nz_s = 1; block_tot = block1_tot*block2_tot; // number of cells in one subgrid block BLOCK_VOL = nx_s*ny_s*nz_s; // dimensions for the 1D GPU grid ngrid = (BLOCK_VOL + TPB - 1) / (TPB); #ifndef DYNAMIC_GPU_ALLOC block_size = true; #endif } // set values for GPU kernels // number of blocks per 1D grid dim3 dim2dGrid(ngrid, 1, 1); //number of threads per 1D block dim3 dim1dBlock(TPB, 1, 1); // Set up pointers for the location to copy from and to if (block_tot == 1) { tmp1 = host_conserved0; tmp2 = host_conserved1; } if ( !memory_allocated ) { // allocate buffer to copy conserved variable blocks from and to if (block_tot > 1) { if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) { printf("Failed to allocate CPU buffer.\n"); } tmp1 = buffer; tmp2 = buffer; } // allocate an array on the CPU to hold max_dti returned from each thread block host_dti_array = (Real *) malloc(ngrid*sizeof(Real)); #ifdef COOLING_GPU host_dt_array = (Real *) malloc(ngrid*sizeof(Real)); #endif // allocate GPU arrays CudaSafeCall( cudaMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&dev_conserved_half, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) ); #ifdef COOLING_GPU CudaSafeCall( cudaMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) ); #endif #ifndef DYNAMIC_GPU_ALLOC // If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory. // If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep. memory_allocated = true; #endif } // counter for which block we're on int block = 0; // START LOOP OVER SUBGRID BLOCKS HERE while (block < block_tot) { // copy the conserved variable block to the buffer host_copy_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved0, buffer, n_fields); // calculate the global x and y offsets of this subgrid block // (only needed for gravitational potential) get_offsets_2D(nx_s, ny_s, n_ghost, x_off, y_off, block, block1_tot, block2_tot, remainder1, remainder2, &x_off_s, &y_off_s); // copy the conserved variables onto the GPU CudaSafeCall( cudaMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyHostToDevice) ); // Step 1: Use PCM reconstruction to put conserved variables into interface arrays PCM_Reconstruction_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, nx_s, ny_s, n_ghost, gama, n_fields); CudaCheckError(); // Step 2: Calculate first-order upwind fluxes #ifdef EXACT Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); // Step 3: Update the conserved variables half a timestep Update_Conserved_Variables_2D_half<<<dim2dGrid,dim1dBlock>>>(dev_conserved, dev_conserved_half, F_x, F_y, nx_s, ny_s, n_ghost, dx, dy, 0.5*dt, gama, n_fields); CudaCheckError(); // Step 4: Construct left and right interface values using updated conserved variables #ifdef PLMP PLMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PLMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PLMC PLMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PLMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PPMP PPMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PPMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif //PPMP #ifdef PPMC PPMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PPMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved_half, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif //PPMC CudaCheckError(); // Step 5: Calculate the fluxes again #ifdef EXACT Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); #ifdef DE // Compute the divergence of velocity before updating the conserved array, this solves syncronization issues when adding this term on Update_Conserved_Variables Partial_Update_Advected_Internal_Energy_2D<<<dim2dGrid,dim1dBlock>>>( dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, nx_s, ny_s, n_ghost, dx, dy, dt, gama, n_fields ); #endif // Step 6: Update the conserved variable array Update_Conserved_Variables_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, F_x, F_y, nx_s, ny_s, x_off_s, y_off_s, n_ghost, dx, dy, xbound, ybound, dt, gama, n_fields); CudaCheckError(); #ifdef DE Select_Internal_Energy_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, n_ghost, n_fields); Sync_Energies_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, n_ghost, gama, n_fields); CudaCheckError(); #endif // Apply cooling #ifdef COOLING_GPU cooling_kernel<<<dim2dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array); CudaCheckError(); #endif // Step 7: Calculate the next timestep Calc_dt_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, n_ghost, dx, dy, dev_dti_array, gama); CudaCheckError(); // copy the conserved variable array back to the CPU CudaSafeCall( cudaMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyDeviceToHost) ); // copy the updated conserved variable array back into the host_conserved array on the CPU host_return_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved1, buffer, n_fields); // copy the dti array onto the CPU CudaSafeCall( cudaMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) ); // iterate through to find the maximum inverse dt for this subgrid block for (int i=0; i<ngrid; i++) { max_dti = fmax(max_dti, host_dti_array[i]); } #ifdef COOLING_GPU // copy the dt array from cooling onto the CPU CudaSafeCall( cudaMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) ); // iterate through to find the minimum dt for this subgrid block for (int i=0; i<ngrid; i++) { min_dt = fmin(min_dt, host_dt_array[i]); } if (min_dt < C_cfl/max_dti) { max_dti = C_cfl/min_dt; } #endif // add one to the counter block++; } #ifdef DYNAMIC_GPU_ALLOC // If memory is not single allocated then free the memory every timestep. Free_Memory_VL_2D(); #endif // return the maximum inverse timestep return max_dti; } void Free_Memory_VL_2D() { // free the CPU memory if (block_tot > 1) free(buffer); free(host_dti_array); #ifdef COOLING_GPU free(host_dt_array); #endif // free the GPU memory cudaFree(dev_conserved); cudaFree(dev_conserved_half); cudaFree(Q_Lx); cudaFree(Q_Rx); cudaFree(Q_Ly); cudaFree(Q_Ry); cudaFree(F_x); cudaFree(F_y); cudaFree(dev_dti_array); #ifdef COOLING_GPU cudaFree(dev_dt_array); #endif } __global__ void Update_Conserved_Variables_2D_half(Real *dev_conserved, Real *dev_conserved_half, Real *dev_F_x, Real *dev_F_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, Real gamma, int n_fields) { int id, xid, yid, n_cells; int imo, jmo; Real dtodx = dt/dx; Real dtody = dt/dy; n_cells = nx*ny; // get a global thread ID int blockId = blockIdx.x + blockIdx.y*gridDim.x; id = threadIdx.x + blockId * blockDim.x; yid = id / nx; xid = id - yid*nx; #ifdef DE Real d, d_inv, vx, vy, vz; Real vx_imo, vx_ipo, vy_jmo, vy_jpo, P; int ipo, jpo; #endif // all threads but one outer ring of ghost cells if (xid > 0 && xid < nx-1 && yid > 0 && yid < ny-1) { imo = xid-1 + yid*nx; jmo = xid + (yid-1)*nx; #ifdef DE d = dev_conserved[ id]; d_inv = 1.0 / d; vx = dev_conserved[1*n_cells + id] * d_inv; vy = dev_conserved[2*n_cells + id] * d_inv; vz = dev_conserved[3*n_cells + id] * d_inv; P = (dev_conserved[4*n_cells + id] - 0.5*d*(vx*vx + vy*vy + vz*vz)) * (gamma - 1.0); //if (d < 0.0 || d != d) printf("Negative density before half step update.\n"); //if (P < 0.0) printf("%d Negative pressure before half step update.\n", id); ipo = xid+1 + yid*nx; jpo = xid + (yid+1)*nx; vx_imo = dev_conserved[1*n_cells + imo] / dev_conserved[imo]; vx_ipo = dev_conserved[1*n_cells + ipo] / dev_conserved[ipo]; vy_jmo = dev_conserved[2*n_cells + jmo] / dev_conserved[jmo]; vy_jpo = dev_conserved[2*n_cells + jpo] / dev_conserved[jpo]; #endif // update the conserved variable array dev_conserved_half[ id] = dev_conserved[ id] + dtodx * (dev_F_x[ imo] - dev_F_x[ id]) + dtody * (dev_F_y[ jmo] - dev_F_y[ id]); dev_conserved_half[ n_cells + id] = dev_conserved[ n_cells + id] + dtodx * (dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]) + dtody * (dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]); dev_conserved_half[2*n_cells + id] = dev_conserved[2*n_cells + id] + dtodx * (dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]) + dtody * (dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]); dev_conserved_half[3*n_cells + id] = dev_conserved[3*n_cells + id] + dtodx * (dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]) + dtody * (dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]); dev_conserved_half[4*n_cells + id] = dev_conserved[4*n_cells + id] + dtodx * (dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]) + dtody * (dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_conserved_half[(5+i)*n_cells + id] = dev_conserved[(5+i)*n_cells + id] + dtodx * (dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]) + dtody * (dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]); } #endif #ifdef DE dev_conserved_half[(n_fields-1)*n_cells + id] = dev_conserved[(n_fields-1)*n_cells + id] + dtodx * (dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]) + dtody * (dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]) + 0.5*P*(dtodx*(vx_imo-vx_ipo) + dtody*(vy_jmo-vy_jpo)); #endif } } #endif //VL #endif //CUDA
5e58b52428e0291f496e7105db311410dfb87eea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorAccessor.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <ATen/native/hip/PersistentSoftmax.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { namespace { Tensor gemm_nt(const Tensor& a, const Tensor& b) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2)}); auto b_ = b.transpose(1, 0); auto c_ = at::native::matmul(a_, b_); return c_.view({a.size(0), a.size(1), b.size(0)}); } template <typename scalar_t, typename accscalar_t> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v) { // warp per DH. // so launch B * NH * T warps. auto B = q_k_v.size(1); auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; constexpr int VEC = 4; using LoadT = memory::aligned_vector<scalar_t, VEC>; // FIXME: assert ((D % VEC) == 0) for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) / static_cast<accscalar_t>(8)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv( const Tensor& qkv, const Tensor& qkv_bias) { auto B = qkv.size(0); auto T = qkv.size(1); auto _3D = qkv.size(2); auto D = _3D / 3; auto dim_per_head = 64; auto num_head = D / dim_per_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv.options()); AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = std::min<int32_t>(1024, D / 4); auto blocks = B * T; hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>()); C10_HIP_KERNEL_LAUNCH_CHECK(); }); auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } Tensor bmm_nt(const Tensor& a, const Tensor& b) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2), a.size(3)}); auto b_ = b.view({b.size(0) * b.size(1), b.size(2), b.size(3)}); auto bt_ = b_.transpose(2, 1); // TODO: are these a single call to cublas batched matmul? auto c_ = at::matmul(a_, bt_); return c_.view({a.size(0), a.size(1), a.size(2), b.size(2)}); } template <typename T> __inline__ __device__ T WarpReduceMax(T val) { #pragma unroll for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { val = ::max(val, WARP_SHFL_DOWN(val, offset)); } return val; } template <typename T> __inline__ __device__ T WarpReduceSum(T val) { #pragma unroll for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { val += WARP_SHFL_DOWN(val, offset); } return val; } void masked_softmax_dropout( const Tensor& attn_scores, const c10::optional<Tensor>& attn_mask) { auto B = attn_scores.size(0); auto num_heads = attn_scores.size(1); auto T = attn_scores.size(2); if (attn_mask) { TORCH_CHECK(attn_mask->is_contiguous()); } AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, attn_scores.scalar_type(), "masked_softmax_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; // TODO: proper implementation with masking. dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, false, false>( attn_scores.data_ptr<scalar_t>(), attn_scores.data_ptr<scalar_t>(), T, T, B * num_heads * T ); }); } Tensor bmm_nn(const Tensor& a, const Tensor& b) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2), a.size(3)}); auto b_ = b.view({b.size(0) * b.size(1), b.size(2), b.size(3)}); // TODO: are these a single call to cublas batched matmul? auto c_ = at::matmul(a_, b_); return c_.view({a.size(0), a.size(1), a.size(2), b.size(3)}); } Tensor transform_0213(const Tensor& a) { // TODO: check perf vs dedicated kernel. return a.permute({0, 2, 1, 3}) .contiguous() .view({a.size(0), a.size(2), a.size(1) * a.size(3)}); } Tensor gemm_nt_bias(const Tensor& a, const Tensor& b, const Tensor& c) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2)}); // TODO: should be b.transpose(1, 0)? auto r_ = at::native::linear(a_, b, c); return r_.view({a.size(0), a.size(1), r_.size(1)}); } } // namespace Tensor multi_head_self_attention_cuda( const Tensor& query, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] // shape: [B, T, 3 x D] auto qkv = gemm_nt(query, qkv_weight); // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = transform_bias_rescale_qkv(qkv, qkv_bias); auto q = std::get<0>(q_k_v); auto k = std::get<1>(q_k_v); auto v = std::get<2>(q_k_v); // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // shape: [B, num_head, T, T] masked_softmax_dropout(qkt, mask); // shape: [B, num_head, T, dim_per_head] auto attn_ctx = bmm_nn(qkt, v); // shape: [B, T, D] auto attn = transform_0213(attn_ctx); // shape: [B, T, D] auto proj = gemm_nt_bias(attn, proj_weight, proj_bias); return proj; } } // namespace native } // namespace at
5e58b52428e0291f496e7105db311410dfb87eea.cu
#include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorAccessor.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <ATen/native/cuda/PersistentSoftmax.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { namespace { Tensor gemm_nt(const Tensor& a, const Tensor& b) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2)}); auto b_ = b.transpose(1, 0); auto c_ = at::native::matmul(a_, b_); return c_.view({a.size(0), a.size(1), b.size(0)}); } template <typename scalar_t, typename accscalar_t> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v) { // warp per DH. // so launch B * NH * T warps. auto B = q_k_v.size(1); auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; constexpr int VEC = 4; using LoadT = memory::aligned_vector<scalar_t, VEC>; // FIXME: assert ((D % VEC) == 0) for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) / static_cast<accscalar_t>(8)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv( const Tensor& qkv, const Tensor& qkv_bias) { auto B = qkv.size(0); auto T = qkv.size(1); auto _3D = qkv.size(2); auto D = _3D / 3; auto dim_per_head = 64; auto num_head = D / dim_per_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv.options()); AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = std::min<int32_t>(1024, D / 4); auto blocks = B * T; transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } Tensor bmm_nt(const Tensor& a, const Tensor& b) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2), a.size(3)}); auto b_ = b.view({b.size(0) * b.size(1), b.size(2), b.size(3)}); auto bt_ = b_.transpose(2, 1); // TODO: are these a single call to cublas batched matmul? auto c_ = at::matmul(a_, bt_); return c_.view({a.size(0), a.size(1), a.size(2), b.size(2)}); } template <typename T> __inline__ __device__ T WarpReduceMax(T val) { #pragma unroll for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { val = std::max(val, WARP_SHFL_DOWN(val, offset)); } return val; } template <typename T> __inline__ __device__ T WarpReduceSum(T val) { #pragma unroll for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) { val += WARP_SHFL_DOWN(val, offset); } return val; } void masked_softmax_dropout( const Tensor& attn_scores, const c10::optional<Tensor>& attn_mask) { auto B = attn_scores.size(0); auto num_heads = attn_scores.size(1); auto T = attn_scores.size(2); if (attn_mask) { TORCH_CHECK(attn_mask->is_contiguous()); } AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, attn_scores.scalar_type(), "masked_softmax_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; // TODO: proper implementation with masking. dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, false, false>( attn_scores.data_ptr<scalar_t>(), attn_scores.data_ptr<scalar_t>(), T, T, B * num_heads * T ); }); } Tensor bmm_nn(const Tensor& a, const Tensor& b) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2), a.size(3)}); auto b_ = b.view({b.size(0) * b.size(1), b.size(2), b.size(3)}); // TODO: are these a single call to cublas batched matmul? auto c_ = at::matmul(a_, b_); return c_.view({a.size(0), a.size(1), a.size(2), b.size(3)}); } Tensor transform_0213(const Tensor& a) { // TODO: check perf vs dedicated kernel. return a.permute({0, 2, 1, 3}) .contiguous() .view({a.size(0), a.size(2), a.size(1) * a.size(3)}); } Tensor gemm_nt_bias(const Tensor& a, const Tensor& b, const Tensor& c) { auto a_ = a.view({a.size(0) * a.size(1), a.size(2)}); // TODO: should be b.transpose(1, 0)? auto r_ = at::native::linear(a_, b, c); return r_.view({a.size(0), a.size(1), r_.size(1)}); } } // namespace Tensor multi_head_self_attention_cuda( const Tensor& query, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] // shape: [B, T, 3 x D] auto qkv = gemm_nt(query, qkv_weight); // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = transform_bias_rescale_qkv(qkv, qkv_bias); auto q = std::get<0>(q_k_v); auto k = std::get<1>(q_k_v); auto v = std::get<2>(q_k_v); // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // shape: [B, num_head, T, T] masked_softmax_dropout(qkt, mask); // shape: [B, num_head, T, dim_per_head] auto attn_ctx = bmm_nn(qkt, v); // shape: [B, T, D] auto attn = transform_0213(attn_ctx); // shape: [B, T, D] auto proj = gemm_nt_bias(attn, proj_weight, proj_bias); return proj; } } // namespace native } // namespace at
f440d84d361f8bbf2e95daf22c99155d216c3eba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztrtri_upper_batched.cu, normal z -> d, Mon Jun 25 18:24:14 2018 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar @author Ahmad Abdelfattah This file implements upper case, and is called by dtrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_BATCHED #include "dtrtri.cuh" #include "dtrtri_upper_device.cuh" /******************************************************************************/ __global__ void dtrtri_diag_upper_kernel_batched( magma_diag_t diag, int n, double const * const * dA_array, int lda, double **dinvA_array) { int batchid = blockIdx.z; dtrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]); } /******************************************************************************/ __global__ void triple_dgemm16_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm16_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part3_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } // ============================================================================= // vbatched kernels /******************************************************************************/ __global__ void dtrtri_diag_upper_kernel_vbatched( magma_diag_t diag, magma_int_t* n, double const * const * dA_array, magma_int_t* lda, double **dinvA_array) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; if(blockIdx.x >= magma_ceildiv(my_n, IB)) return; dtrtri_diag_upper_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]); } // The kernels below have 3D grids // grid.x and grid.y are independent from my_n // only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y /******************************************************************************/ __global__ void triple_dgemm16_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm16_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm16_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm16_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm32_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm32_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm32_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm32_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm64_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm64_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm_above64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm_above64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part3_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm_above64_part3_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); }
f440d84d361f8bbf2e95daf22c99155d216c3eba.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztrtri_upper_batched.cu, normal z -> d, Mon Jun 25 18:24:14 2018 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar @author Ahmad Abdelfattah This file implements upper case, and is called by dtrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_BATCHED #include "dtrtri.cuh" #include "dtrtri_upper_device.cuh" /******************************************************************************/ __global__ void dtrtri_diag_upper_kernel_batched( magma_diag_t diag, int n, double const * const * dA_array, int lda, double **dinvA_array) { int batchid = blockIdx.z; dtrtri_diag_upper_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]); } /******************************************************************************/ __global__ void triple_dgemm16_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm16_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm16_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm16_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm32_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm32_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part1_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm_above64_part1_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part2_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm_above64_part2_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part3_upper_kernel_batched( int n, double const * const * Ain_array, int lda, double **dinvA_array, int jb, int npages) { int batchid = blockIdx.z; triple_dgemm_above64_part3_upper_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); } // ============================================================================= // vbatched kernels /******************************************************************************/ __global__ void dtrtri_diag_upper_kernel_vbatched( magma_diag_t diag, magma_int_t* n, double const * const * dA_array, magma_int_t* lda, double **dinvA_array) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; if(blockIdx.x >= magma_ceildiv(my_n, IB)) return; dtrtri_diag_upper_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]); } // The kernels below have 3D grids // grid.x and grid.y are independent from my_n // only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y /******************************************************************************/ __global__ void triple_dgemm16_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm16_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm16_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm16_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm32_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm32_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm32_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm32_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm64_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm64_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part1_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm_above64_part1_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part2_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm_above64_part2_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part3_upper_kernel_vbatched( magma_int_t* n, double const * const * Ain_array, magma_int_t* lda, double **dinvA_array, int jb, int npages) { const int batchid = blockIdx.z; const int my_n = (int)n[batchid]; if(my_n <= 0) return; const int my_npages = magma_ceildiv(my_n, jb*2); if(blockIdx.y >= my_npages*(jb/16) ) return; triple_dgemm_above64_part3_upper_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); }
2a62cc8b2c6f3b621ea336bed163f28d62b98b1d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //nvcc --compiler-options -Wall daxpy_cuda_c_1.cu funciones.c -o daxpy_cuda_c_1.out // ./daxpy_cuda_c_1.out <dimension de vectores> #include<stdio.h> __global__ void daxpy(double *a, double *b, double *alpha, int *N){ int tid = blockIdx.x; if(tid<*N) b[tid] = b[tid] + (*alpha)*a[tid]; } int main(int argc, char *argv[]){ double *a, *b; double *device_a, *device_b; int i; double al=3.5; double *d_al; int N; int *d_N; double time_spent; //dimensiones N=atoi(argv[1]); //alojando en host: a = (double *)calloc(N,sizeof(double)); b = (double *)calloc(N,sizeof(double)); //alojando en device hipMalloc((void **)&device_a, sizeof(double)*N); hipMalloc((void **)&device_b, sizeof(double)*N); hipMalloc((void **)&d_al, sizeof(double)); hipMalloc((void **)&d_N, sizeof(int)); //llenando los arreglos: for(i=0;i<N;i++){ a[i]=i; b[i]=i*i; } //copiamos arreglos a, b a la GPU hipMemcpy(device_a,a,N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(device_b,b,N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_al,&al,sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_N,&N,sizeof(int), hipMemcpyHostToDevice); //mandamos a llamar a daxpy: hipSetDeviceFlags(hipDeviceScheduleBlockingSync); clock_t begin = clock(); hipLaunchKernelGGL(( daxpy), dim3(N),dim3(1), 0, 0, device_a,device_b,d_al,d_N); //N bloques de 1 thread hipDeviceSynchronize(); clock_t end = clock(); //tiempo de clculo: time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("Tiempo de clculo en la gpu %.5f\n", time_spent); //copia del resultado al arreglo b: hipMemcpy(b,device_b,N*sizeof(double),hipMemcpyDeviceToHost); hipFree(device_a); hipFree(device_b); hipFree(d_al); hipFree(d_N); return 0; }
2a62cc8b2c6f3b621ea336bed163f28d62b98b1d.cu
//nvcc --compiler-options -Wall daxpy_cuda_c_1.cu funciones.c -o daxpy_cuda_c_1.out // ./daxpy_cuda_c_1.out <dimension de vectores> #include<stdio.h> __global__ void daxpy(double *a, double *b, double *alpha, int *N){ int tid = blockIdx.x; if(tid<*N) b[tid] = b[tid] + (*alpha)*a[tid]; } int main(int argc, char *argv[]){ double *a, *b; double *device_a, *device_b; int i; double al=3.5; double *d_al; int N; int *d_N; double time_spent; //dimensiones N=atoi(argv[1]); //alojando en host: a = (double *)calloc(N,sizeof(double)); b = (double *)calloc(N,sizeof(double)); //alojando en device cudaMalloc((void **)&device_a, sizeof(double)*N); cudaMalloc((void **)&device_b, sizeof(double)*N); cudaMalloc((void **)&d_al, sizeof(double)); cudaMalloc((void **)&d_N, sizeof(int)); //llenando los arreglos: for(i=0;i<N;i++){ a[i]=i; b[i]=i*i; } //copiamos arreglos a, b a la GPU cudaMemcpy(device_a,a,N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(device_b,b,N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_al,&al,sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_N,&N,sizeof(int), cudaMemcpyHostToDevice); //mandamos a llamar a daxpy: cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); clock_t begin = clock(); daxpy<<<N,1>>>(device_a,device_b,d_al,d_N); //N bloques de 1 thread cudaDeviceSynchronize(); clock_t end = clock(); //tiempo de cálculo: time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("Tiempo de cálculo en la gpu %.5f\n", time_spent); //copia del resultado al arreglo b: cudaMemcpy(b,device_b,N*sizeof(double),cudaMemcpyDeviceToHost); cudaFree(device_a); cudaFree(device_b); cudaFree(d_al); cudaFree(d_N); return 0; }
525fe1db69f15baa3d073686e7ed42044273bda6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_mparticles.cuh" #include "cuda_bits.h" #include "psc_bits.h" #include "bs.hxx" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "cuda_base.cuh" #include <cstdio> #include <cassert> // ---------------------------------------------------------------------- // ctor template <typename BS> cuda_mparticles<BS>::cuda_mparticles(const Grid_t& grid) : cuda_mparticles_base<BS>(grid) { cuda_base_init(); xb_by_patch.resize(this->n_patches()); for (int p = 0; p < this->n_patches(); p++) { xb_by_patch[p] = Real3(grid.patches[p].xb); } } // ---------------------------------------------------------------------- // resize // // the goal here is to have d_xi4, d_pxi4, d_bidx and d_id always // have the same size. template <typename BS> void cuda_mparticles<BS>::resize(uint n_prts) { cuda_mparticles_base<BS>::resize(n_prts); this->by_block_.d_idx.resize(n_prts); this->by_block_.d_id.resize(n_prts); } // ---------------------------------------------------------------------- // dump_by_patch template <typename BS> void cuda_mparticles<BS>::dump_by_patch(uint* n_prts_by_patch) { printf("cuda_mparticles_dump_by_patch: n_prts = %d\n", this->n_prts); uint off = 0; for (int p = 0; p < this->n_patches(); p++) { float* xb = &xb_by_patch[p][0]; for (int n = 0; n < n_prts_by_patch[p]; n++) { auto prt = this->storage.load(n + off); uint bidx = this->by_block_.d_idx[n + off], id = this->by_block_.d_id[n + off]; printf("cuda_mparticles_dump_by_patch: [%d/%d] %g %g %g // %d // %g %g " "%g // %g b_idx %d id %d\n", p, n, prt.x[0] + xb[0], prt.x[1] + xb[1], prt.x[2] + xb[2], prt.kind, prt.u[0], prt.u[1], prt.u[2], prt.qni_wni, bidx, id); } off += n_prts_by_patch[p]; } } // ---------------------------------------------------------------------- // dump template <typename BS> void cuda_mparticles<BS>::dump(const std::string& filename) const { FILE* file = fopen(filename.c_str(), "w"); assert(file); fprintf(file, "cuda_mparticles_dump: n_prts = %d\n", this->n_prts); uint off = 0; auto& d_off = this->by_block_.d_off; for (int b = 0; b < this->n_blocks; b++) { uint off_b = d_off[b], off_e = d_off[b + 1]; int p = b / this->n_blocks_per_patch; fprintf(file, "cuda_mparticles_dump: block %d: %d -> %d (patch %d)\n", b, off_b, off_e, p); assert(d_off[b] == off); for (int n = d_off[b]; n < d_off[b + 1]; n++) { auto prt = this->storage.load(n + off); uint bidx = this->by_block_.d_idx[n], id = this->by_block_.d_id[n]; fprintf(file, "mparticles_dump: [%d] %g %g %g // %d // %g %g %g // %g || bidx " "%d id %d %s\n", n, prt.x[0], prt.x[1], prt.x[2], prt.kind, prt.u[0], prt.u[1], prt.u[2], prt.qni_wni, bidx, id, b == bidx ? "" : "BIDX MISMATCH!"); } off += off_e - off_b; } fclose(file); } // ---------------------------------------------------------------------- // swap_alt template <typename BS> void cuda_mparticles<BS>::swap_alt() { this->storage.xi4.swap(alt_storage.xi4); // thrust::swap(this->storage.xi4, alt_storage.xi4); this->storage.pxi4.swap(alt_storage.pxi4); // thrust::swap(this->storage.pxi4, alt_storage.pxi4); } #define THREADS_PER_BLOCK 256 // ---------------------------------------------------------------------- // k_reorder_and_offsets template <typename BS> __global__ static void k_reorder_and_offsets(DMparticlesCuda<BS> dmprts, int nr_prts, const uint* d_bidx, const uint* d_ids, uint* d_off, int last_block) { int i = threadIdx.x + blockDim.x * blockIdx.x; for (; i <= nr_prts; i += blockDim.x * gridDim.x) { int block, prev_block; if (i < nr_prts) { dmprts.storage.xi4[i] = dmprts.alt_storage.xi4[d_ids[i]]; dmprts.storage.pxi4[i] = dmprts.alt_storage.pxi4[d_ids[i]]; block = d_bidx[i]; } else { // needed if there is no particle in the last block block = last_block; } // OPT: d_bidx[i-1] could use shmem // create offsets per block into particle array prev_block = -1; if (i > 0) { prev_block = d_bidx[i - 1]; } for (int b = prev_block + 1; b <= block; b++) { d_off[b] = i; } } } // ---------------------------------------------------------------------- // reorder_and_offsets template <typename BS> void cuda_mparticles<BS>::reorder_and_offsets( const psc::device_vector<uint>& d_idx, const psc::device_vector<uint>& d_id, psc::device_vector<uint>& d_off) { if (this->n_patches() == 0) { return; } swap_alt(); resize(this->n_prts); int n_blocks = (this->n_prts + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; if (n_blocks > 32768) n_blocks = 32768; dim3 dimGrid(n_blocks); dim3 dimBlock(THREADS_PER_BLOCK); hipLaunchKernelGGL(( k_reorder_and_offsets<BS>), dim3(dimGrid), dim3(dimBlock), 0, 0, *this, this->n_prts, d_idx.data().get(), d_id.data().get(), d_off.data().get(), this->n_blocks); cuda_sync_if_enabled(); need_reorder = false; } // ---------------------------------------------------------------------- // k_reorder template <typename BS> __global__ static void k_reorder(DMparticlesCuda<BS> dmprts, int n_prts, const uint* d_ids) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i < n_prts) { int j = d_ids[i]; dmprts.storage.xi4[i] = dmprts.alt_storage.xi4[j]; dmprts.storage.pxi4[i] = dmprts.alt_storage.pxi4[j]; } } // ---------------------------------------------------------------------- // reorder template <typename BS> void cuda_mparticles<BS>::reorder() { if (!need_reorder) { return; } reorder(this->by_block_.d_id); need_reorder = false; } // ---------------------------------------------------------------------- // reorder template <typename BS> void cuda_mparticles<BS>::reorder(const psc::device_vector<uint>& d_id) { if (this->n_prts == 0) { return; } swap_alt(); resize(this->n_prts); dim3 dimGrid((this->n_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK); hipLaunchKernelGGL(( k_reorder<BS>) , dim3(dimGrid), dim3(THREADS_PER_BLOCK), 0, 0, *this, this->n_prts, d_id.data().get()); cuda_sync_if_enabled(); } // ---------------------------------------------------------------------- // setup_internals template <typename BS> void cuda_mparticles<BS>::setup_internals() { // pre-condition: particles sorted by patch, d_off being used to // describe patch boundaries // assert(check_in_patch_unordered_slow()); this->by_block_.find_indices_ids(*this); // assert(check_bidx_id_unordered_slow()); this->by_block_.stable_sort(); this->by_block_.reorder_and_offsets(*this); // post-condition: // - particles now sorted by block // - d_off describes block boundaries // - UNUSED: d_bidx has each particle's block index // assert(check_ordered()); } // ---------------------------------------------------------------------- // size template <typename BS> uint cuda_mparticles<BS>::size() { return this->n_prts; } // ---------------------------------------------------------------------- // inject_initial // // adds particles initially, ie., into an empty cmprts // does not complete setting correct internal state // (setup_internal() needs to be called next) template <typename BS> void cuda_mparticles<BS>::inject_initial( const std::vector<Particle>& buf, const std::vector<uint>& n_prts_by_patch) { thrust::host_vector<uint> h_off(this->by_block_.d_off); assert(this->storage.xi4.size() == 0); assert(this->n_prts == 0); uint buf_n = 0; for (int p = 0; p < this->n_patches(); p++) { assert(h_off[p * this->n_blocks_per_patch] == 0); assert(h_off[(p + 1) * this->n_blocks_per_patch] == 0); buf_n += n_prts_by_patch[p]; } resize(buf_n); HMparticlesCudaStorage h_storage{buf_n}; auto it = buf.begin(); uint off = 0; for (int p = 0; p < this->n_patches(); p++) { auto n_prts = n_prts_by_patch[p]; h_off[p * this->n_blocks_per_patch] = off; h_off[(p + 1) * this->n_blocks_per_patch] = off + n_prts; for (int n = 0; n < n_prts; n++) { auto prt = *it++; this->checkInPatchMod(prt.x); h_storage.store(prt, off + n); } off += n_prts; } this->n_prts = off; thrust::copy(h_storage.xi4.begin(), h_storage.xi4.end(), this->storage.xi4.begin()); thrust::copy(h_storage.pxi4.begin(), h_storage.pxi4.end(), this->storage.pxi4.begin()); thrust::copy(h_off.begin(), h_off.end(), this->by_block_.d_off.begin()); } // ---------------------------------------------------------------------- // inject template <typename BS> void cuda_mparticles<BS>::inject(const std::vector<Particle>& buf, const std::vector<uint>& buf_n_by_patch) { if (this->n_prts == 0) { // if there are no particles yet, we basically just initialize from the // buffer inject_initial(buf, buf_n_by_patch); setup_internals(); return; } using Double3 = Vec3<double>; uint buf_n = 0; for (int p = 0; p < this->n_patches(); p++) { buf_n += buf_n_by_patch[p]; // printf("p %d buf_n_by_patch %d\n", p, buf_n_by_patch[p]); } // printf("buf_n %d\n", buf_n); HMparticlesCudaStorage h_storage(buf_n); thrust::host_vector<uint> h_bidx(buf_n); // thrust::host_vector<uint> h_id(buf_n); uint off = 0; for (int p = 0; p < this->n_patches(); p++) { for (int n = 0; n < buf_n_by_patch[p]; n++) { auto prt = buf[off + n]; h_storage.store(prt, off + n); auto bidx = this->blockIndex(prt, p); assert(bidx >= 0 && bidx < this->n_blocks); h_bidx[off + n] = bidx; ; // h_id[off + n] = this->n_prts + off + n; } off += buf_n_by_patch[p]; } assert(off == buf_n); if (need_reorder) { reorder(); } // assert(check_in_patch_unordered_slow()); this->by_block_.find_indices_ids(*this); // assert(check_bidx_id_unordered_slow()); resize(this->n_prts + buf_n); thrust::copy(h_storage.xi4.begin(), h_storage.xi4.end(), this->storage.xi4.begin() + this->n_prts); thrust::copy(h_storage.pxi4.begin(), h_storage.pxi4.end(), this->storage.pxi4.begin() + this->n_prts); thrust::copy(h_bidx.begin(), h_bidx.end(), this->by_block_.d_idx.begin() + this->n_prts); // thrust::copy(h_id.begin(), h_id.end(), d_id + n_prts); // FIXME, looks like ids up until n_prts have already been set above thrust::sequence(this->by_block_.d_id.data(), this->by_block_.d_id.data() + this->n_prts + buf_n); // for (int i = -5; i <= 5; i++) { // // float4 xi4 = d_xi4[cmprts->n_prts + i]; // uint bidx = d_bidx[cmprts->n_prts + i]; // uint id = d_id[cmprts->n_prts + i]; // printf("i %d bidx %d %d\n", i, bidx, id); // } // assert(check_ordered()); this->n_prts += buf_n; this->by_block_.stable_sort(); this->by_block_.reorder_and_offsets(*this); // assert(check_ordered()); } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<typename cuda_mparticles<BS>::Particle> cuda_mparticles<BS>::get_particles(int beg, int end) { int n_prts = end - beg; std::vector<Particle> prts; prts.reserve(n_prts); reorder(); // FIXME? by means of this, this function disturbs the state... thrust::host_vector<float4> xi4(&this->storage.xi4[beg], &this->storage.xi4[end]); thrust::host_vector<float4> pxi4(&this->storage.pxi4[beg], &this->storage.pxi4[end]); for (int n = 0; n < n_prts; n++) { int kind = cuda_float_as_int(xi4[n].w); prts.emplace_back(Real3{xi4[n].x, xi4[n].y, xi4[n].z}, Real3{pxi4[n].x, pxi4[n].y, pxi4[n].z}, pxi4[n].w, kind, psc::particle::Id(), psc::particle::Tag()); #if 0 uint b = blockIndex(xi4[n], p); assert(b < n_blocks); #endif } return prts; } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<uint> cuda_mparticles<BS>::get_offsets() const { thrust::host_vector<uint> h_off(this->by_block_.d_off); std::vector<uint> off(this->n_patches() + 1); for (int p = 0; p <= this->n_patches(); p++) { off[p] = h_off[p * this->n_blocks_per_patch]; } return off; } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<typename cuda_mparticles<BS>::Particle> cuda_mparticles<BS>::get_particles() { return get_particles(0, this->n_prts); } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<typename cuda_mparticles<BS>::Particle> cuda_mparticles<BS>::get_particles(int p) { // FIXME, doing the copy here all the time would be nice to avoid // making sure we actually have a valid d_off would't hurt, either thrust::host_vector<uint> h_off(this->by_block_.d_off); uint beg = h_off[p * this->n_blocks_per_patch]; uint end = h_off[(p + 1) * this->n_blocks_per_patch]; return get_particles(beg, end); } // ---------------------------------------------------------------------- // get_particle template <typename BS> typename cuda_mparticles<BS>::Particle cuda_mparticles<BS>::get_particle(int p, int n) { auto off = this->by_block_.d_off[p * this->n_blocks_per_patch]; auto cprts = get_particles(off + n, off + n + 1); return cprts[0]; } #include "cuda_mparticles_gold.cu" #include "cuda_mparticles_checks.cu" template struct cuda_mparticles<BS144>; template struct cuda_mparticles<BS444>;
525fe1db69f15baa3d073686e7ed42044273bda6.cu
#include "cuda_mparticles.cuh" #include "cuda_bits.h" #include "psc_bits.h" #include "bs.hxx" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include "cuda_base.cuh" #include <cstdio> #include <cassert> // ---------------------------------------------------------------------- // ctor template <typename BS> cuda_mparticles<BS>::cuda_mparticles(const Grid_t& grid) : cuda_mparticles_base<BS>(grid) { cuda_base_init(); xb_by_patch.resize(this->n_patches()); for (int p = 0; p < this->n_patches(); p++) { xb_by_patch[p] = Real3(grid.patches[p].xb); } } // ---------------------------------------------------------------------- // resize // // the goal here is to have d_xi4, d_pxi4, d_bidx and d_id always // have the same size. template <typename BS> void cuda_mparticles<BS>::resize(uint n_prts) { cuda_mparticles_base<BS>::resize(n_prts); this->by_block_.d_idx.resize(n_prts); this->by_block_.d_id.resize(n_prts); } // ---------------------------------------------------------------------- // dump_by_patch template <typename BS> void cuda_mparticles<BS>::dump_by_patch(uint* n_prts_by_patch) { printf("cuda_mparticles_dump_by_patch: n_prts = %d\n", this->n_prts); uint off = 0; for (int p = 0; p < this->n_patches(); p++) { float* xb = &xb_by_patch[p][0]; for (int n = 0; n < n_prts_by_patch[p]; n++) { auto prt = this->storage.load(n + off); uint bidx = this->by_block_.d_idx[n + off], id = this->by_block_.d_id[n + off]; printf("cuda_mparticles_dump_by_patch: [%d/%d] %g %g %g // %d // %g %g " "%g // %g b_idx %d id %d\n", p, n, prt.x[0] + xb[0], prt.x[1] + xb[1], prt.x[2] + xb[2], prt.kind, prt.u[0], prt.u[1], prt.u[2], prt.qni_wni, bidx, id); } off += n_prts_by_patch[p]; } } // ---------------------------------------------------------------------- // dump template <typename BS> void cuda_mparticles<BS>::dump(const std::string& filename) const { FILE* file = fopen(filename.c_str(), "w"); assert(file); fprintf(file, "cuda_mparticles_dump: n_prts = %d\n", this->n_prts); uint off = 0; auto& d_off = this->by_block_.d_off; for (int b = 0; b < this->n_blocks; b++) { uint off_b = d_off[b], off_e = d_off[b + 1]; int p = b / this->n_blocks_per_patch; fprintf(file, "cuda_mparticles_dump: block %d: %d -> %d (patch %d)\n", b, off_b, off_e, p); assert(d_off[b] == off); for (int n = d_off[b]; n < d_off[b + 1]; n++) { auto prt = this->storage.load(n + off); uint bidx = this->by_block_.d_idx[n], id = this->by_block_.d_id[n]; fprintf(file, "mparticles_dump: [%d] %g %g %g // %d // %g %g %g // %g || bidx " "%d id %d %s\n", n, prt.x[0], prt.x[1], prt.x[2], prt.kind, prt.u[0], prt.u[1], prt.u[2], prt.qni_wni, bidx, id, b == bidx ? "" : "BIDX MISMATCH!"); } off += off_e - off_b; } fclose(file); } // ---------------------------------------------------------------------- // swap_alt template <typename BS> void cuda_mparticles<BS>::swap_alt() { this->storage.xi4.swap(alt_storage.xi4); // thrust::swap(this->storage.xi4, alt_storage.xi4); this->storage.pxi4.swap(alt_storage.pxi4); // thrust::swap(this->storage.pxi4, alt_storage.pxi4); } #define THREADS_PER_BLOCK 256 // ---------------------------------------------------------------------- // k_reorder_and_offsets template <typename BS> __global__ static void k_reorder_and_offsets(DMparticlesCuda<BS> dmprts, int nr_prts, const uint* d_bidx, const uint* d_ids, uint* d_off, int last_block) { int i = threadIdx.x + blockDim.x * blockIdx.x; for (; i <= nr_prts; i += blockDim.x * gridDim.x) { int block, prev_block; if (i < nr_prts) { dmprts.storage.xi4[i] = dmprts.alt_storage.xi4[d_ids[i]]; dmprts.storage.pxi4[i] = dmprts.alt_storage.pxi4[d_ids[i]]; block = d_bidx[i]; } else { // needed if there is no particle in the last block block = last_block; } // OPT: d_bidx[i-1] could use shmem // create offsets per block into particle array prev_block = -1; if (i > 0) { prev_block = d_bidx[i - 1]; } for (int b = prev_block + 1; b <= block; b++) { d_off[b] = i; } } } // ---------------------------------------------------------------------- // reorder_and_offsets template <typename BS> void cuda_mparticles<BS>::reorder_and_offsets( const psc::device_vector<uint>& d_idx, const psc::device_vector<uint>& d_id, psc::device_vector<uint>& d_off) { if (this->n_patches() == 0) { return; } swap_alt(); resize(this->n_prts); int n_blocks = (this->n_prts + 1 + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; if (n_blocks > 32768) n_blocks = 32768; dim3 dimGrid(n_blocks); dim3 dimBlock(THREADS_PER_BLOCK); k_reorder_and_offsets<BS><<<dimGrid, dimBlock>>>( *this, this->n_prts, d_idx.data().get(), d_id.data().get(), d_off.data().get(), this->n_blocks); cuda_sync_if_enabled(); need_reorder = false; } // ---------------------------------------------------------------------- // k_reorder template <typename BS> __global__ static void k_reorder(DMparticlesCuda<BS> dmprts, int n_prts, const uint* d_ids) { int i = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x; if (i < n_prts) { int j = d_ids[i]; dmprts.storage.xi4[i] = dmprts.alt_storage.xi4[j]; dmprts.storage.pxi4[i] = dmprts.alt_storage.pxi4[j]; } } // ---------------------------------------------------------------------- // reorder template <typename BS> void cuda_mparticles<BS>::reorder() { if (!need_reorder) { return; } reorder(this->by_block_.d_id); need_reorder = false; } // ---------------------------------------------------------------------- // reorder template <typename BS> void cuda_mparticles<BS>::reorder(const psc::device_vector<uint>& d_id) { if (this->n_prts == 0) { return; } swap_alt(); resize(this->n_prts); dim3 dimGrid((this->n_prts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK); k_reorder<BS> <<<dimGrid, THREADS_PER_BLOCK>>>(*this, this->n_prts, d_id.data().get()); cuda_sync_if_enabled(); } // ---------------------------------------------------------------------- // setup_internals template <typename BS> void cuda_mparticles<BS>::setup_internals() { // pre-condition: particles sorted by patch, d_off being used to // describe patch boundaries // assert(check_in_patch_unordered_slow()); this->by_block_.find_indices_ids(*this); // assert(check_bidx_id_unordered_slow()); this->by_block_.stable_sort(); this->by_block_.reorder_and_offsets(*this); // post-condition: // - particles now sorted by block // - d_off describes block boundaries // - UNUSED: d_bidx has each particle's block index // assert(check_ordered()); } // ---------------------------------------------------------------------- // size template <typename BS> uint cuda_mparticles<BS>::size() { return this->n_prts; } // ---------------------------------------------------------------------- // inject_initial // // adds particles initially, ie., into an empty cmprts // does not complete setting correct internal state // (setup_internal() needs to be called next) template <typename BS> void cuda_mparticles<BS>::inject_initial( const std::vector<Particle>& buf, const std::vector<uint>& n_prts_by_patch) { thrust::host_vector<uint> h_off(this->by_block_.d_off); assert(this->storage.xi4.size() == 0); assert(this->n_prts == 0); uint buf_n = 0; for (int p = 0; p < this->n_patches(); p++) { assert(h_off[p * this->n_blocks_per_patch] == 0); assert(h_off[(p + 1) * this->n_blocks_per_patch] == 0); buf_n += n_prts_by_patch[p]; } resize(buf_n); HMparticlesCudaStorage h_storage{buf_n}; auto it = buf.begin(); uint off = 0; for (int p = 0; p < this->n_patches(); p++) { auto n_prts = n_prts_by_patch[p]; h_off[p * this->n_blocks_per_patch] = off; h_off[(p + 1) * this->n_blocks_per_patch] = off + n_prts; for (int n = 0; n < n_prts; n++) { auto prt = *it++; this->checkInPatchMod(prt.x); h_storage.store(prt, off + n); } off += n_prts; } this->n_prts = off; thrust::copy(h_storage.xi4.begin(), h_storage.xi4.end(), this->storage.xi4.begin()); thrust::copy(h_storage.pxi4.begin(), h_storage.pxi4.end(), this->storage.pxi4.begin()); thrust::copy(h_off.begin(), h_off.end(), this->by_block_.d_off.begin()); } // ---------------------------------------------------------------------- // inject template <typename BS> void cuda_mparticles<BS>::inject(const std::vector<Particle>& buf, const std::vector<uint>& buf_n_by_patch) { if (this->n_prts == 0) { // if there are no particles yet, we basically just initialize from the // buffer inject_initial(buf, buf_n_by_patch); setup_internals(); return; } using Double3 = Vec3<double>; uint buf_n = 0; for (int p = 0; p < this->n_patches(); p++) { buf_n += buf_n_by_patch[p]; // printf("p %d buf_n_by_patch %d\n", p, buf_n_by_patch[p]); } // printf("buf_n %d\n", buf_n); HMparticlesCudaStorage h_storage(buf_n); thrust::host_vector<uint> h_bidx(buf_n); // thrust::host_vector<uint> h_id(buf_n); uint off = 0; for (int p = 0; p < this->n_patches(); p++) { for (int n = 0; n < buf_n_by_patch[p]; n++) { auto prt = buf[off + n]; h_storage.store(prt, off + n); auto bidx = this->blockIndex(prt, p); assert(bidx >= 0 && bidx < this->n_blocks); h_bidx[off + n] = bidx; ; // h_id[off + n] = this->n_prts + off + n; } off += buf_n_by_patch[p]; } assert(off == buf_n); if (need_reorder) { reorder(); } // assert(check_in_patch_unordered_slow()); this->by_block_.find_indices_ids(*this); // assert(check_bidx_id_unordered_slow()); resize(this->n_prts + buf_n); thrust::copy(h_storage.xi4.begin(), h_storage.xi4.end(), this->storage.xi4.begin() + this->n_prts); thrust::copy(h_storage.pxi4.begin(), h_storage.pxi4.end(), this->storage.pxi4.begin() + this->n_prts); thrust::copy(h_bidx.begin(), h_bidx.end(), this->by_block_.d_idx.begin() + this->n_prts); // thrust::copy(h_id.begin(), h_id.end(), d_id + n_prts); // FIXME, looks like ids up until n_prts have already been set above thrust::sequence(this->by_block_.d_id.data(), this->by_block_.d_id.data() + this->n_prts + buf_n); // for (int i = -5; i <= 5; i++) { // // float4 xi4 = d_xi4[cmprts->n_prts + i]; // uint bidx = d_bidx[cmprts->n_prts + i]; // uint id = d_id[cmprts->n_prts + i]; // printf("i %d bidx %d %d\n", i, bidx, id); // } // assert(check_ordered()); this->n_prts += buf_n; this->by_block_.stable_sort(); this->by_block_.reorder_and_offsets(*this); // assert(check_ordered()); } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<typename cuda_mparticles<BS>::Particle> cuda_mparticles<BS>::get_particles(int beg, int end) { int n_prts = end - beg; std::vector<Particle> prts; prts.reserve(n_prts); reorder(); // FIXME? by means of this, this function disturbs the state... thrust::host_vector<float4> xi4(&this->storage.xi4[beg], &this->storage.xi4[end]); thrust::host_vector<float4> pxi4(&this->storage.pxi4[beg], &this->storage.pxi4[end]); for (int n = 0; n < n_prts; n++) { int kind = cuda_float_as_int(xi4[n].w); prts.emplace_back(Real3{xi4[n].x, xi4[n].y, xi4[n].z}, Real3{pxi4[n].x, pxi4[n].y, pxi4[n].z}, pxi4[n].w, kind, psc::particle::Id(), psc::particle::Tag()); #if 0 uint b = blockIndex(xi4[n], p); assert(b < n_blocks); #endif } return prts; } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<uint> cuda_mparticles<BS>::get_offsets() const { thrust::host_vector<uint> h_off(this->by_block_.d_off); std::vector<uint> off(this->n_patches() + 1); for (int p = 0; p <= this->n_patches(); p++) { off[p] = h_off[p * this->n_blocks_per_patch]; } return off; } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<typename cuda_mparticles<BS>::Particle> cuda_mparticles<BS>::get_particles() { return get_particles(0, this->n_prts); } // ---------------------------------------------------------------------- // get_particles template <typename BS> std::vector<typename cuda_mparticles<BS>::Particle> cuda_mparticles<BS>::get_particles(int p) { // FIXME, doing the copy here all the time would be nice to avoid // making sure we actually have a valid d_off would't hurt, either thrust::host_vector<uint> h_off(this->by_block_.d_off); uint beg = h_off[p * this->n_blocks_per_patch]; uint end = h_off[(p + 1) * this->n_blocks_per_patch]; return get_particles(beg, end); } // ---------------------------------------------------------------------- // get_particle template <typename BS> typename cuda_mparticles<BS>::Particle cuda_mparticles<BS>::get_particle(int p, int n) { auto off = this->by_block_.d_off[p * this->n_blocks_per_patch]; auto cprts = get_particles(off + n, off + n + 1); return cprts[0]; } #include "cuda_mparticles_gold.cu" #include "cuda_mparticles_checks.cu" template struct cuda_mparticles<BS144>; template struct cuda_mparticles<BS444>;
7799e56806789741c8378d7792608b900f8efe9c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void checkIndex(void) { /* */ printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem + block.x - 1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); // 2 1 1 printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // 3 1 1 // check grid and block dimension from device side hipLaunchKernelGGL(( checkIndex) , dim3(grid), dim3(block), 0, 0, ); // reset device before you leave; hipDeviceReset(); return(0); }
7799e56806789741c8378d7792608b900f8efe9c.cu
#include <stdio.h> #include <cuda_runtime.h> __global__ void checkIndex(void) { /* 在核函数中,每个线程都输出自己的线程索引,块索引,块维度和网格维度 */ printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // 定义总的数据量 int nElem = 6; // define grid and block structure dim3 block(3); dim3 grid((nElem + block.x - 1) / block.x); // check grid and block dimension from host side printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); // 2 1 1 printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // 3 1 1 // check grid and block dimension from device side checkIndex <<<grid, block>>>(); // reset device before you leave; cudaDeviceReset(); return(0); }
0145ea6a1fe18368c28dc1cfb383427f22dd3ed9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE for terms of usage. // See CITATION.md for citation guidelines, if DCA++ is used for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // Peter Staar ([email protected]) // Raffaele Solca' ([email protected]) // // This file implements kernels_gpu.hpp #include "dca/linalg/blas/kernels_gpu.hpp" #include <cassert> #include <hip/hip_complex.h> #include <hip/hip_runtime.h> #include "dca/linalg/util/complex_operators_cuda.cu.hpp" #include "dca/linalg/util/error_cuda.hpp" #include "dca/linalg/util/stream_functions.hpp" #include "dca/util/integer_division.hpp" namespace dca { namespace linalg { namespace blas { namespace kernels { // dca::linalg::blas::kernels:: constexpr int copy_block_size_x = 32; constexpr int copy_block_size_y = 8; constexpr int move_block_size_x = 32; constexpr int move_block_size_y = 8; constexpr int scale_block_size_x = 32; constexpr int scale_block_size_y = 32; constexpr int swap_block_size_x = 32; constexpr int swap_block_size_y = 32; template <typename Type> __global__ void copyRows(int row_size, int n_rows, const int* i_x, const Type* x, int ldx, const int* i_y, Type* y, int ldy) { assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.z == 0); // Work on BlockDim.x rows and copyrows_block_size_y cols. int ind_i = threadIdx.x + blockIdx.x * blockDim.x; int js = blockIdx.y * copy_block_size_y; int je = min(row_size, (blockIdx.y + 1) * copy_block_size_y); if (ind_i < n_rows) { int iy = i_y[ind_i]; int ix = i_x[ind_i]; for (int j = js; j < je; ++j) y[iy + j * ldy] = x[ix + j * ldx]; } } template <typename Type> __global__ void copyCols(int col_size, int n_cols, const int* j_x, const Type* x, int ldx, const int* j_y, Type* y, int ldy) { assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.z == 0); // Work on BlockDim.x rows and copyrows_block_size_y cols. int i = threadIdx.x + blockIdx.x * blockDim.x; int ind_js = blockIdx.y * copy_block_size_y; int ind_je = min(n_cols, (blockIdx.y + 1) * copy_block_size_y); if (i < col_size) { for (int ind_j = ind_js; ind_j < ind_je; ++ind_j) y[i + j_y[ind_j] * ldy] = x[i + j_x[ind_j] * ldx]; } } template <typename Type> __global__ void moveLeft(int m, int n, Type* a, int lda) { assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.y == 0); assert(blockIdx.z == 0); int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < m) { for (int j = 0; j < n - 1; ++j) a[i + j * lda] = a[i + (j + 1) * lda]; } } template <typename Type> __global__ void moveUp(int m, int n, Type* a, int lda) { assert(blockDim.x == move_block_size_x); assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.x == 0); assert(blockIdx.z == 0); __shared__ Type work[move_block_size_x * move_block_size_y]; int ldw = move_block_size_x; int idx = threadIdx.x; int js = blockIdx.y * move_block_size_y; int je = min(n, (blockIdx.y + 1) * move_block_size_y); int jd = je - js; a += lda * js; int m_div = (m - 1) / blockDim.x * blockDim.x; for (int i = 0; i < m_div; i += blockDim.x) { for (int j = 0; j < jd; ++j) work[idx + ldw * j] = a[i + 1 + idx + lda * j]; __syncthreads(); for (int j = 0; j < jd; ++j) a[i + idx + lda * j] = work[idx + ldw * j]; __syncthreads(); } int i = m_div; if (i + idx < m - 1) { for (int j = 0; j < jd; ++j) work[idx + ldw * j] = a[i + 1 + idx + lda * j]; __syncthreads(); for (int j = 0; j < jd; ++j) a[i + idx + lda * j] = work[idx + ldw * j]; } } template <typename Type> __global__ void scaleRows(int row_size, int n_rows, const int* i, const Type* alpha, Type* a, int lda) { const int ind_i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if (ind_i < n_rows && j < row_size) { a[i[ind_i] + j * lda] *= alpha[ind_i]; } } template <typename Type> __global__ void swapRows(int row_size, int n_rows, const int* i1, const int* i2, Type* a, int lda) { const int ind_i = threadIdx.x + blockIdx.x * blockDim.x; const int ind_j = threadIdx.y + blockIdx.y * blockDim.y; if (ind_i < n_rows && ind_j < row_size) { const Type tmp = a[i1[ind_i] + ind_j * lda]; a[i1[ind_i] + ind_j * lda] = a[i2[ind_i] + ind_j * lda]; a[i2[ind_i] + ind_j * lda] = tmp; } } template <typename Type> __global__ void swapCols(int col_size, int n_cols, const int* j1, const int* j2, Type* a, int lda) { const int ind_i = threadIdx.x + blockIdx.x * blockDim.x; const int ind_j = threadIdx.y + blockIdx.y * blockDim.y; if (ind_i < col_size && ind_j < n_cols) { const Type tmp = a[ind_i + j1[ind_j] * lda]; a[ind_i + j1[ind_j] * lda] = a[ind_i + j2[ind_j] * lda]; a[ind_i + j2[ind_j] * lda] = tmp; } } } // kernels // dca::linalg::blas:: template <typename Type> void copyRows(int row_size, int n_rows, const int* i_x, const Type* x, int ldx, const int* i_y, Type* y, int ldy, int thread_id, int stream_id) { if (row_size > 0 && n_rows > 0) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(n_rows, kernels::copy_block_size_x); int bl_y = dca::util::ceilDiv(row_size, kernels::copy_block_size_y); dim3 threads(kernels::copy_block_size_x); dim3 blocks(bl_x, bl_y); hipStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); hipLaunchKernelGGL(( kernels::copyRows), dim3(blocks), dim3(threads), 0, stream, row_size, n_rows, i_x, x, ldx, i_y, y, ldy); checkErrorsCudaDebug(); } } template void copyRows(int row_size, int n_rows, const int* i_x, const float* x, int ldx, const int* i_y, float* y, int ldy, int thread_id, int stream_id); template void copyRows(int row_size, int n_rows, const int* i_x, const double* x, int ldx, const int* i_y, double* y, int ldy, int thread_id, int stream_id); template void copyRows(int row_size, int n_rows, const int* i_x, const hipComplex* x, int ldx, const int* i_y, hipComplex* y, int ldy, int thread_id, int stream_id); template void copyRows(int row_size, int n_rows, const int* i_x, const hipDoubleComplex* x, int ldx, const int* i_y, hipDoubleComplex* y, int ldy, int thread_id, int stream_id); template <typename Type> void copyCols(int col_size, int n_cols, const int* j_x, const Type* x, int ldx, const int* j_y, Type* y, int ldy, int thread_id, int stream_id) { if (col_size > 0 && n_cols > 0) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(col_size, kernels::copy_block_size_x); int bl_y = dca::util::ceilDiv(n_cols, kernels::copy_block_size_y); dim3 threads(kernels::copy_block_size_x); dim3 blocks(bl_x, bl_y); hipStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); hipLaunchKernelGGL(( kernels::copyCols), dim3(blocks), dim3(threads), 0, stream, col_size, n_cols, j_x, x, ldx, j_y, y, ldy); checkErrorsCudaDebug(); } } template void copyCols(int col_size, int n_cols, const int* j_x, const float* x, int ldx, const int* j_y, float* y, int ldy, int thread_id, int stream_id); template void copyCols(int col_size, int n_cols, const int* j_x, const double* x, int ldx, const int* j_y, double* y, int ldy, int thread_id, int stream_id); template void copyCols(int col_size, int n_cols, const int* j_x, const hipComplex* x, int ldx, const int* j_y, hipComplex* y, int ldy, int thread_id, int stream_id); template void copyCols(int col_size, int n_cols, const int* j_x, const hipDoubleComplex* x, int ldx, const int* j_y, hipDoubleComplex* y, int ldy, int thread_id, int stream_id); template <typename Type> void moveLeft(int m, int n, Type* a, int lda) { assert(lda >= m); if (m > 0 && n > 1) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(m, kernels::move_block_size_x); dim3 threads(kernels::move_block_size_x); dim3 blocks(bl_x); hipLaunchKernelGGL(( kernels::moveLeft), dim3(blocks), dim3(threads), 0, 0, m, n, a, lda); checkErrorsCudaDebug(); } } template void moveLeft(int m, int n, float* a, int lda); template void moveLeft(int m, int n, double* a, int lda); template void moveLeft(int m, int n, hipComplex* a, int lda); template void moveLeft(int m, int n, hipDoubleComplex* a, int lda); template <typename Type> void moveUp(int m, int n, Type* a, int lda) { assert(lda >= m); if (m > 1 && n > 0) { checkErrorsCudaDebug(); int bl_y = dca::util::ceilDiv(n, kernels::move_block_size_y); dim3 threads(kernels::move_block_size_x); dim3 blocks(1, bl_y); hipLaunchKernelGGL(( kernels::moveUp), dim3(blocks), dim3(threads), 0, 0, m, n, a, lda); checkErrorsCudaDebug(); } } template void moveUp(int m, int n, float* a, int lda); template void moveUp(int m, int n, double* a, int lda); template void moveUp(int m, int n, hipComplex* a, int lda); template void moveUp(int m, int n, hipDoubleComplex* a, int lda); template <typename Type> void scaleRows(int row_size, int n_rows, const int* i, const Type* alpha, Type* a, int lda, int thread_id, int stream_id) { if (row_size > 0 && n_rows > 0) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(n_rows, kernels::scale_block_size_x); int bl_y = dca::util::ceilDiv(row_size, kernels::scale_block_size_y); dim3 threads(kernels::scale_block_size_x, kernels::scale_block_size_y); dim3 blocks(bl_x, bl_y); hipStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); hipLaunchKernelGGL(( kernels::scaleRows), dim3(blocks), dim3(threads), 0, stream, row_size, n_rows, i, alpha, a, lda); checkErrorsCudaDebug(); } } template void scaleRows(int row_size, int n_rows, const int* i, const float* alpha, float* a, int lda, int thread_id, int stream_id); template void scaleRows(int row_size, int n_rows, const int* i, const double* alpha, double* a, int lda, int thread_id, int stream_id); template void scaleRows(int row_size, int n_rows, const int* i, const hipComplex* alpha, hipComplex* a, int lda, int thread_id, int stream_id); template void scaleRows(int row_size, int n_rows, const int* i, const hipDoubleComplex* alpha, hipDoubleComplex* a, int lda, int thread_id, int stream_id); template <typename Type> void swapRows(int row_size, int n_rows, const int* i1, const int* i2, Type* a, int lda, int thread_id, int stream_id) { if (row_size > 0 && n_rows > 0) { checkErrorsCudaDebug(); const int bl_x = dca::util::ceilDiv(n_rows, kernels::swap_block_size_x); const int bl_y = dca::util::ceilDiv(row_size, kernels::swap_block_size_y); dim3 threads(kernels::swap_block_size_x, kernels::swap_block_size_y); dim3 blocks(bl_x, bl_y); hipStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); hipLaunchKernelGGL(( kernels::swapRows), dim3(blocks), dim3(threads), 0, stream, row_size, n_rows, i1, i2, a, lda); checkErrorsCudaDebug(); } } template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, float* a, int lda, int thread_id, int stream_id); template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, double* a, int lda, int thread_id, int stream_id); template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, hipComplex* a, int lda, int thread_id, int stream_id); template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, hipDoubleComplex* a, int lda, int thread_id, int stream_id); template <typename Type> void swapCols(int col_size, int n_cols, const int* j1, const int* j2, Type* a, int lda, int thread_id, int stream_id) { if (col_size > 0 && n_cols > 0) { checkErrorsCudaDebug(); const int bl_x = dca::util::ceilDiv(col_size, kernels::swap_block_size_x); const int bl_y = dca::util::ceilDiv(n_cols, kernels::swap_block_size_y); dim3 threads(kernels::swap_block_size_x, kernels::swap_block_size_y); dim3 blocks(bl_x, bl_y); hipStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); hipLaunchKernelGGL(( kernels::swapCols), dim3(blocks), dim3(threads), 0, stream, col_size, n_cols, j1, j2, a, lda); checkErrorsCudaDebug(); } } template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, float* a, int lda, int thread_id, int stream_id); template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, double* a, int lda, int thread_id, int stream_id); template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, hipComplex* a, int lda, int thread_id, int stream_id); template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, hipDoubleComplex* a, int lda, int thread_id, int stream_id); } // blas } // linalg } // dca
0145ea6a1fe18368c28dc1cfb383427f22dd3ed9.cu
// Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE for terms of usage. // See CITATION.md for citation guidelines, if DCA++ is used for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // Peter Staar ([email protected]) // Raffaele Solca' ([email protected]) // // This file implements kernels_gpu.hpp #include "dca/linalg/blas/kernels_gpu.hpp" #include <cassert> #include <cuComplex.h> #include <cuda_runtime.h> #include "dca/linalg/util/complex_operators_cuda.cu.hpp" #include "dca/linalg/util/error_cuda.hpp" #include "dca/linalg/util/stream_functions.hpp" #include "dca/util/integer_division.hpp" namespace dca { namespace linalg { namespace blas { namespace kernels { // dca::linalg::blas::kernels:: constexpr int copy_block_size_x = 32; constexpr int copy_block_size_y = 8; constexpr int move_block_size_x = 32; constexpr int move_block_size_y = 8; constexpr int scale_block_size_x = 32; constexpr int scale_block_size_y = 32; constexpr int swap_block_size_x = 32; constexpr int swap_block_size_y = 32; template <typename Type> __global__ void copyRows(int row_size, int n_rows, const int* i_x, const Type* x, int ldx, const int* i_y, Type* y, int ldy) { assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.z == 0); // Work on BlockDim.x rows and copyrows_block_size_y cols. int ind_i = threadIdx.x + blockIdx.x * blockDim.x; int js = blockIdx.y * copy_block_size_y; int je = min(row_size, (blockIdx.y + 1) * copy_block_size_y); if (ind_i < n_rows) { int iy = i_y[ind_i]; int ix = i_x[ind_i]; for (int j = js; j < je; ++j) y[iy + j * ldy] = x[ix + j * ldx]; } } template <typename Type> __global__ void copyCols(int col_size, int n_cols, const int* j_x, const Type* x, int ldx, const int* j_y, Type* y, int ldy) { assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.z == 0); // Work on BlockDim.x rows and copyrows_block_size_y cols. int i = threadIdx.x + blockIdx.x * blockDim.x; int ind_js = blockIdx.y * copy_block_size_y; int ind_je = min(n_cols, (blockIdx.y + 1) * copy_block_size_y); if (i < col_size) { for (int ind_j = ind_js; ind_j < ind_je; ++ind_j) y[i + j_y[ind_j] * ldy] = x[i + j_x[ind_j] * ldx]; } } template <typename Type> __global__ void moveLeft(int m, int n, Type* a, int lda) { assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.y == 0); assert(blockIdx.z == 0); int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < m) { for (int j = 0; j < n - 1; ++j) a[i + j * lda] = a[i + (j + 1) * lda]; } } template <typename Type> __global__ void moveUp(int m, int n, Type* a, int lda) { assert(blockDim.x == move_block_size_x); assert(blockDim.y == 1); assert(blockDim.z == 1); assert(blockIdx.x == 0); assert(blockIdx.z == 0); __shared__ Type work[move_block_size_x * move_block_size_y]; int ldw = move_block_size_x; int idx = threadIdx.x; int js = blockIdx.y * move_block_size_y; int je = min(n, (blockIdx.y + 1) * move_block_size_y); int jd = je - js; a += lda * js; int m_div = (m - 1) / blockDim.x * blockDim.x; for (int i = 0; i < m_div; i += blockDim.x) { for (int j = 0; j < jd; ++j) work[idx + ldw * j] = a[i + 1 + idx + lda * j]; __syncthreads(); for (int j = 0; j < jd; ++j) a[i + idx + lda * j] = work[idx + ldw * j]; __syncthreads(); } int i = m_div; if (i + idx < m - 1) { for (int j = 0; j < jd; ++j) work[idx + ldw * j] = a[i + 1 + idx + lda * j]; __syncthreads(); for (int j = 0; j < jd; ++j) a[i + idx + lda * j] = work[idx + ldw * j]; } } template <typename Type> __global__ void scaleRows(int row_size, int n_rows, const int* i, const Type* alpha, Type* a, int lda) { const int ind_i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if (ind_i < n_rows && j < row_size) { a[i[ind_i] + j * lda] *= alpha[ind_i]; } } template <typename Type> __global__ void swapRows(int row_size, int n_rows, const int* i1, const int* i2, Type* a, int lda) { const int ind_i = threadIdx.x + blockIdx.x * blockDim.x; const int ind_j = threadIdx.y + blockIdx.y * blockDim.y; if (ind_i < n_rows && ind_j < row_size) { const Type tmp = a[i1[ind_i] + ind_j * lda]; a[i1[ind_i] + ind_j * lda] = a[i2[ind_i] + ind_j * lda]; a[i2[ind_i] + ind_j * lda] = tmp; } } template <typename Type> __global__ void swapCols(int col_size, int n_cols, const int* j1, const int* j2, Type* a, int lda) { const int ind_i = threadIdx.x + blockIdx.x * blockDim.x; const int ind_j = threadIdx.y + blockIdx.y * blockDim.y; if (ind_i < col_size && ind_j < n_cols) { const Type tmp = a[ind_i + j1[ind_j] * lda]; a[ind_i + j1[ind_j] * lda] = a[ind_i + j2[ind_j] * lda]; a[ind_i + j2[ind_j] * lda] = tmp; } } } // kernels // dca::linalg::blas:: template <typename Type> void copyRows(int row_size, int n_rows, const int* i_x, const Type* x, int ldx, const int* i_y, Type* y, int ldy, int thread_id, int stream_id) { if (row_size > 0 && n_rows > 0) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(n_rows, kernels::copy_block_size_x); int bl_y = dca::util::ceilDiv(row_size, kernels::copy_block_size_y); dim3 threads(kernels::copy_block_size_x); dim3 blocks(bl_x, bl_y); cudaStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); kernels::copyRows<<<blocks, threads, 0, stream>>>(row_size, n_rows, i_x, x, ldx, i_y, y, ldy); checkErrorsCudaDebug(); } } template void copyRows(int row_size, int n_rows, const int* i_x, const float* x, int ldx, const int* i_y, float* y, int ldy, int thread_id, int stream_id); template void copyRows(int row_size, int n_rows, const int* i_x, const double* x, int ldx, const int* i_y, double* y, int ldy, int thread_id, int stream_id); template void copyRows(int row_size, int n_rows, const int* i_x, const cuComplex* x, int ldx, const int* i_y, cuComplex* y, int ldy, int thread_id, int stream_id); template void copyRows(int row_size, int n_rows, const int* i_x, const cuDoubleComplex* x, int ldx, const int* i_y, cuDoubleComplex* y, int ldy, int thread_id, int stream_id); template <typename Type> void copyCols(int col_size, int n_cols, const int* j_x, const Type* x, int ldx, const int* j_y, Type* y, int ldy, int thread_id, int stream_id) { if (col_size > 0 && n_cols > 0) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(col_size, kernels::copy_block_size_x); int bl_y = dca::util::ceilDiv(n_cols, kernels::copy_block_size_y); dim3 threads(kernels::copy_block_size_x); dim3 blocks(bl_x, bl_y); cudaStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); kernels::copyCols<<<blocks, threads, 0, stream>>>(col_size, n_cols, j_x, x, ldx, j_y, y, ldy); checkErrorsCudaDebug(); } } template void copyCols(int col_size, int n_cols, const int* j_x, const float* x, int ldx, const int* j_y, float* y, int ldy, int thread_id, int stream_id); template void copyCols(int col_size, int n_cols, const int* j_x, const double* x, int ldx, const int* j_y, double* y, int ldy, int thread_id, int stream_id); template void copyCols(int col_size, int n_cols, const int* j_x, const cuComplex* x, int ldx, const int* j_y, cuComplex* y, int ldy, int thread_id, int stream_id); template void copyCols(int col_size, int n_cols, const int* j_x, const cuDoubleComplex* x, int ldx, const int* j_y, cuDoubleComplex* y, int ldy, int thread_id, int stream_id); template <typename Type> void moveLeft(int m, int n, Type* a, int lda) { assert(lda >= m); if (m > 0 && n > 1) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(m, kernels::move_block_size_x); dim3 threads(kernels::move_block_size_x); dim3 blocks(bl_x); kernels::moveLeft<<<blocks, threads>>>(m, n, a, lda); checkErrorsCudaDebug(); } } template void moveLeft(int m, int n, float* a, int lda); template void moveLeft(int m, int n, double* a, int lda); template void moveLeft(int m, int n, cuComplex* a, int lda); template void moveLeft(int m, int n, cuDoubleComplex* a, int lda); template <typename Type> void moveUp(int m, int n, Type* a, int lda) { assert(lda >= m); if (m > 1 && n > 0) { checkErrorsCudaDebug(); int bl_y = dca::util::ceilDiv(n, kernels::move_block_size_y); dim3 threads(kernels::move_block_size_x); dim3 blocks(1, bl_y); kernels::moveUp<<<blocks, threads>>>(m, n, a, lda); checkErrorsCudaDebug(); } } template void moveUp(int m, int n, float* a, int lda); template void moveUp(int m, int n, double* a, int lda); template void moveUp(int m, int n, cuComplex* a, int lda); template void moveUp(int m, int n, cuDoubleComplex* a, int lda); template <typename Type> void scaleRows(int row_size, int n_rows, const int* i, const Type* alpha, Type* a, int lda, int thread_id, int stream_id) { if (row_size > 0 && n_rows > 0) { checkErrorsCudaDebug(); int bl_x = dca::util::ceilDiv(n_rows, kernels::scale_block_size_x); int bl_y = dca::util::ceilDiv(row_size, kernels::scale_block_size_y); dim3 threads(kernels::scale_block_size_x, kernels::scale_block_size_y); dim3 blocks(bl_x, bl_y); cudaStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); kernels::scaleRows<<<blocks, threads, 0, stream>>>(row_size, n_rows, i, alpha, a, lda); checkErrorsCudaDebug(); } } template void scaleRows(int row_size, int n_rows, const int* i, const float* alpha, float* a, int lda, int thread_id, int stream_id); template void scaleRows(int row_size, int n_rows, const int* i, const double* alpha, double* a, int lda, int thread_id, int stream_id); template void scaleRows(int row_size, int n_rows, const int* i, const cuComplex* alpha, cuComplex* a, int lda, int thread_id, int stream_id); template void scaleRows(int row_size, int n_rows, const int* i, const cuDoubleComplex* alpha, cuDoubleComplex* a, int lda, int thread_id, int stream_id); template <typename Type> void swapRows(int row_size, int n_rows, const int* i1, const int* i2, Type* a, int lda, int thread_id, int stream_id) { if (row_size > 0 && n_rows > 0) { checkErrorsCudaDebug(); const int bl_x = dca::util::ceilDiv(n_rows, kernels::swap_block_size_x); const int bl_y = dca::util::ceilDiv(row_size, kernels::swap_block_size_y); dim3 threads(kernels::swap_block_size_x, kernels::swap_block_size_y); dim3 blocks(bl_x, bl_y); cudaStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); kernels::swapRows<<<blocks, threads, 0, stream>>>(row_size, n_rows, i1, i2, a, lda); checkErrorsCudaDebug(); } } template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, float* a, int lda, int thread_id, int stream_id); template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, double* a, int lda, int thread_id, int stream_id); template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, cuComplex* a, int lda, int thread_id, int stream_id); template void swapRows(int row_size, int n_rows, const int* i1, const int* i2, cuDoubleComplex* a, int lda, int thread_id, int stream_id); template <typename Type> void swapCols(int col_size, int n_cols, const int* j1, const int* j2, Type* a, int lda, int thread_id, int stream_id) { if (col_size > 0 && n_cols > 0) { checkErrorsCudaDebug(); const int bl_x = dca::util::ceilDiv(col_size, kernels::swap_block_size_x); const int bl_y = dca::util::ceilDiv(n_cols, kernels::swap_block_size_y); dim3 threads(kernels::swap_block_size_x, kernels::swap_block_size_y); dim3 blocks(bl_x, bl_y); cudaStream_t stream = dca::linalg::util::getStream(thread_id, stream_id); kernels::swapCols<<<blocks, threads, 0, stream>>>(col_size, n_cols, j1, j2, a, lda); checkErrorsCudaDebug(); } } template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, float* a, int lda, int thread_id, int stream_id); template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, double* a, int lda, int thread_id, int stream_id); template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, cuComplex* a, int lda, int thread_id, int stream_id); template void swapCols(int col_size, int n_cols, const int* j1, const int* j2, cuDoubleComplex* a, int lda, int thread_id, int stream_id); } // blas } // linalg } // dca
4c0ffd25d3126c039f07fcbdd956c699fa560f05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_cuda.h> #include <helper_timer.h> #include "kernel.h" #include "kernel1.h" int device = 0; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint); void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint); void printArray(float *arr, int rows, int cols, int shouldPrint); float * serial (float *a1, float*a2, int width, int height, int passes) ; void initializeArrays(float *a1, float *a2, int width, int height); void usage(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // jacobi threadsperblock passes width height [p] if(argc < 5 ){ usage(); return 1; } int threadsPerBlock = atoi(argv[1]); int passes = atoi(argv[2]); int width = atoi(argv[3]); int height = atoi(argv[4]); int shouldPrint=0; if(argc == 6 ) { if (argv[5][0]=='p'){ shouldPrint=1; } else { usage(); return 1; } } float * h_dataA= (float *)malloc(width * height * sizeof(float)); float * h_dataB= (float *)malloc(width * height * sizeof(float)); initializeArrays(h_dataA, h_dataB, width, height); if (threadsPerBlock == 0){ runSerial(h_dataA, h_dataB, width, height, passes, shouldPrint); } else { runCUDA(h_dataA, h_dataB, width, height, passes, threadsPerBlock, shouldPrint); } // Clean up Memory free( h_dataA); free( h_dataB); } //////////////////////////////////////////////////////////////////////////////// //! Run the CUDA version //////////////////////////////////////////////////////////////////////////////// void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint){ // Use card 0 (See top of file to make sure you are using your assigned device.) checkCudaErrors(hipSetDevice(device)); // To ensure alignment, we'll use the code below to pad rows of the arrays when they are // allocated on the device. size_t pitch; // allocate device memory for data A float* d_dataA; checkCudaErrors( hipMallocPitch( (void**) &d_dataA, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image A checkCudaErrors( hipMemcpy2D( d_dataA, pitch, h_dataA, width * sizeof(float), width * sizeof(float), height, hipMemcpyHostToDevice) ); // repeat for second device array float* d_dataB; checkCudaErrors( hipMallocPitch( (void**) &d_dataB, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image B checkCudaErrors( hipMemcpy2D( d_dataB, pitch, h_dataB, width * sizeof(float), width * sizeof(float), height, hipMemcpyHostToDevice) ); //*************************** // setup CUDA execution parameters int blockHeight; int blockWidth; // When testing with small arrays, this code might be useful. Feel free to change it. if (threadsPerBlock > width - 2 ){ blockWidth = 16 * (int) ceil((width - 2) / 16.0); blockHeight = 1; } else { blockWidth = threadsPerBlock; blockHeight = 1; } int gridWidth = (int) ceil( (width - 2) / (float) blockWidth); int gridHeight = (int) ceil( (height - 2) / (float) blockHeight); // number of blocks required to process all the data. int numBlocks = gridWidth * gridHeight; // Each block gets a shared memory region of this size. unsigned int shared_mem_size = ((blockWidth + 2) * 4) * sizeof(float); printf("blockDim.x=%d blockDim.y=%d grid = %d x %d\n", blockWidth, blockHeight, gridWidth, gridHeight); printf("numBlocks = %d, threadsPerBlock = %d shared_mem_size = %d\n", numBlocks, threadsPerBlock, shared_mem_size); if(gridWidth > 65536 || gridHeight > 65536) { fprintf(stderr, "****Error: a block dimension is too large.\n"); } if(threadsPerBlock > 1024) { fprintf(stderr, "****Error: number of threads per block is too large.\n"); } if(shared_mem_size > 49152) { fprintf(stderr, "****Error: shared memory per block is too large.\n"); } // Format the grid, which is a collection of blocks. dim3 grid( gridWidth, gridHeight, 1); // Format the blocks. dim3 threads( blockWidth, blockHeight, 1); printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); float * temp; for(int r=0; r<passes; r++){ //execute the kernel hipLaunchKernelGGL(( k1) , dim3(grid), dim3(threads), shared_mem_size , 0, d_dataA, d_dataB, pitch/sizeof(float), width); //TODO:uncomment the following line to use k0, the simple kernel, provived in kernel.cu //k0 <<< grid, threads >>>( d_dataA, d_dataB, pitch/sizeof(float), width); // swap the device data pointers temp = d_dataA; d_dataA = d_dataB; d_dataB = temp; } // check if kernel execution generated an error hipError_t code = hipGetLastError(); if (code != hipSuccess){ printf ("Cuda Kerel Launch error -- %s\n", hipGetErrorString(code)); } hipDeviceSynchronize(); sdkStopTimer(&timer); //checkCudaErrors( cutStopTimer( timer)); // copy result from device to host checkCudaErrors( hipMemcpy2D( h_dataA, width * sizeof(float), d_dataA, pitch, width * sizeof(float), height,hipMemcpyDeviceToHost) ); printArray(h_dataA, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); // cleanup memory checkCudaErrors(hipFree(d_dataA)); checkCudaErrors(hipFree(d_dataB)); } /* Run the serial jacobi code using the referenced arrays of floats with given width and height for * the specified number of passes. If the final parameter is non-zero, the initial and final states * of the arrays will be printed. In all cases, the execution time will be printed to stdout. * * For the first pass, values will be read from h_dataA and written to h_dataB. For subsequent * passes, the role of the arrays will be reversed. */ void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint){ printf("Running Serial Code.\n"); float * serialResult; printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); serialResult = serial(h_dataA, h_dataB, width, height, passes); sdkStopTimer(&timer); printArray(serialResult, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); } /* Performs the specified number of passes of jacobi iteration on two arrays * of the given width and height. For the first pass, values will be read from * a1 and written to a2. For subsequent passes, the role of the arrays will * be exchanged. In all cases, a pointer to the most recently changed array * is returned. * * For each element, this code computes a weighted average of the neighbors * and then reduces this value by 5% to simulate heat loss. There is nothing * mathematically or physically rigorous about this calculation, and it is * simply meant to provide an interesting parallel programming example. */ float * serial (float *a1, float*a2, int width, int height, int passes) { int i,j,p; float * old=a1; float * New=a2; float * temp; for(p=0; p<passes; p++){ for(i=1; i<height-1; i++){ for(j=1; j<width-1; j++){ New[i*width +j] = ( 0.2f * old[i*width + j] + 0.1f * old[(i-1) * width + j ] + //N 0.1f * old[(i-1) * width + (j+1)] + //NE 0.1f * old[ i * width + (j+1)] + //E 0.1f * old[(i+1) * width + (j+1)] + //SE 0.1f * old[(i+1) * width + j ] + //S 0.1f * old[(i+1) * width + (j-1)] + //SW 0.1f * old[ i * width + (j-1)] + //W 0.1f * old[(i-1) * width + (j-1)] //NW ) * 0.95f; } } temp = New; New = old; old = temp; } return old; } /* Initialize the two arrays referenced by the first two parameters in preparation for * jacobi iteration. The width and height of the arrays are given by the integer parameters. * Border elements are set to 5.0 for both arrays, and the interior elements of a1 are * set to 1.0. Interior elements of a2 are not initialized. */ void initializeArrays(float *a1, float *a2, int width, int height){ int i, j; for(i=0; i<height; i++){ for(j=0; j<width; j++){ if(i==0 || j ==0 || i==height-1 || j==width-1){ a1[i*width + j] = 5.0; a2[i*width + j] = 5.0; }else { a1[i*width + j] = 1.0; } } } } /* Print the 2D array of floats referenced by the first parameter. The second and third * parameters specify its dimensions, while the last argument indicates whether printing * is actually descired at all. No output is produced if shouldPrint == 0. */ void printArray(float *arr, int rows, int cols, int shouldPrint){ if (!shouldPrint) return; int i,j; for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ printf("%04.2f ", arr[i*cols + j]); } printf("\n"); } printf("\n"); } /* Prints a short but informative message about program usage.*/ void usage(){ fprintf(stderr, "usage: jacobi threadsperblock passes width height [p]\n"); fprintf(stderr, " (if threadsperblock == 0, serial code is run)\n"); }
4c0ffd25d3126c039f07fcbdd956c699fa560f05.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_cuda.h> #include <helper_timer.h> #include "kernel.h" #include "kernel1.h" int device = 0; //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint); void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint); void printArray(float *arr, int rows, int cols, int shouldPrint); float * serial (float *a1, float*a2, int width, int height, int passes) ; void initializeArrays(float *a1, float *a2, int width, int height); void usage(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // jacobi threadsperblock passes width height [p] if(argc < 5 ){ usage(); return 1; } int threadsPerBlock = atoi(argv[1]); int passes = atoi(argv[2]); int width = atoi(argv[3]); int height = atoi(argv[4]); int shouldPrint=0; if(argc == 6 ) { if (argv[5][0]=='p'){ shouldPrint=1; } else { usage(); return 1; } } float * h_dataA= (float *)malloc(width * height * sizeof(float)); float * h_dataB= (float *)malloc(width * height * sizeof(float)); initializeArrays(h_dataA, h_dataB, width, height); if (threadsPerBlock == 0){ runSerial(h_dataA, h_dataB, width, height, passes, shouldPrint); } else { runCUDA(h_dataA, h_dataB, width, height, passes, threadsPerBlock, shouldPrint); } // Clean up Memory free( h_dataA); free( h_dataB); } //////////////////////////////////////////////////////////////////////////////// //! Run the CUDA version //////////////////////////////////////////////////////////////////////////////// void runCUDA( float *h_dataA, float* h_dataB, int width, int height, int passes, int threadsPerBlock, int shouldPrint){ // Use card 0 (See top of file to make sure you are using your assigned device.) checkCudaErrors(cudaSetDevice(device)); // To ensure alignment, we'll use the code below to pad rows of the arrays when they are // allocated on the device. size_t pitch; // allocate device memory for data A float* d_dataA; checkCudaErrors( cudaMallocPitch( (void**) &d_dataA, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image A checkCudaErrors( cudaMemcpy2D( d_dataA, pitch, h_dataA, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice) ); // repeat for second device array float* d_dataB; checkCudaErrors( cudaMallocPitch( (void**) &d_dataB, &pitch, width * sizeof(float), height)); // copy host memory to device memory for image B checkCudaErrors( cudaMemcpy2D( d_dataB, pitch, h_dataB, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice) ); //*************************** // setup CUDA execution parameters int blockHeight; int blockWidth; // When testing with small arrays, this code might be useful. Feel free to change it. if (threadsPerBlock > width - 2 ){ blockWidth = 16 * (int) ceil((width - 2) / 16.0); blockHeight = 1; } else { blockWidth = threadsPerBlock; blockHeight = 1; } int gridWidth = (int) ceil( (width - 2) / (float) blockWidth); int gridHeight = (int) ceil( (height - 2) / (float) blockHeight); // number of blocks required to process all the data. int numBlocks = gridWidth * gridHeight; // Each block gets a shared memory region of this size. unsigned int shared_mem_size = ((blockWidth + 2) * 4) * sizeof(float); printf("blockDim.x=%d blockDim.y=%d grid = %d x %d\n", blockWidth, blockHeight, gridWidth, gridHeight); printf("numBlocks = %d, threadsPerBlock = %d shared_mem_size = %d\n", numBlocks, threadsPerBlock, shared_mem_size); if(gridWidth > 65536 || gridHeight > 65536) { fprintf(stderr, "****Error: a block dimension is too large.\n"); } if(threadsPerBlock > 1024) { fprintf(stderr, "****Error: number of threads per block is too large.\n"); } if(shared_mem_size > 49152) { fprintf(stderr, "****Error: shared memory per block is too large.\n"); } // Format the grid, which is a collection of blocks. dim3 grid( gridWidth, gridHeight, 1); // Format the blocks. dim3 threads( blockWidth, blockHeight, 1); printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); float * temp; for(int r=0; r<passes; r++){ //execute the kernel k1 <<< grid, threads, shared_mem_size >>>( d_dataA, d_dataB, pitch/sizeof(float), width); //TODO:uncomment the following line to use k0, the simple kernel, provived in kernel.cu //k0 <<< grid, threads >>>( d_dataA, d_dataB, pitch/sizeof(float), width); // swap the device data pointers temp = d_dataA; d_dataA = d_dataB; d_dataB = temp; } // check if kernel execution generated an error cudaError_t code = cudaGetLastError(); if (code != cudaSuccess){ printf ("Cuda Kerel Launch error -- %s\n", cudaGetErrorString(code)); } cudaThreadSynchronize(); sdkStopTimer(&timer); //checkCudaErrors( cutStopTimer( timer)); // copy result from device to host checkCudaErrors( cudaMemcpy2D( h_dataA, width * sizeof(float), d_dataA, pitch, width * sizeof(float), height,cudaMemcpyDeviceToHost) ); printArray(h_dataA, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); // cleanup memory checkCudaErrors(cudaFree(d_dataA)); checkCudaErrors(cudaFree(d_dataB)); } /* Run the serial jacobi code using the referenced arrays of floats with given width and height for * the specified number of passes. If the final parameter is non-zero, the initial and final states * of the arrays will be printed. In all cases, the execution time will be printed to stdout. * * For the first pass, values will be read from h_dataA and written to h_dataB. For subsequent * passes, the role of the arrays will be reversed. */ void runSerial( float * h_dataA, float * h_dataB, int width, int height, int passes, int shouldPrint){ printf("Running Serial Code.\n"); float * serialResult; printArray(h_dataA, height, width, shouldPrint); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); serialResult = serial(h_dataA, h_dataB, width, height, passes); sdkStopTimer(&timer); printArray(serialResult, height, width, shouldPrint); printf( "Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); } /* Performs the specified number of passes of jacobi iteration on two arrays * of the given width and height. For the first pass, values will be read from * a1 and written to a2. For subsequent passes, the role of the arrays will * be exchanged. In all cases, a pointer to the most recently changed array * is returned. * * For each element, this code computes a weighted average of the neighbors * and then reduces this value by 5% to simulate heat loss. There is nothing * mathematically or physically rigorous about this calculation, and it is * simply meant to provide an interesting parallel programming example. */ float * serial (float *a1, float*a2, int width, int height, int passes) { int i,j,p; float * old=a1; float * New=a2; float * temp; for(p=0; p<passes; p++){ for(i=1; i<height-1; i++){ for(j=1; j<width-1; j++){ New[i*width +j] = ( 0.2f * old[i*width + j] + 0.1f * old[(i-1) * width + j ] + //N 0.1f * old[(i-1) * width + (j+1)] + //NE 0.1f * old[ i * width + (j+1)] + //E 0.1f * old[(i+1) * width + (j+1)] + //SE 0.1f * old[(i+1) * width + j ] + //S 0.1f * old[(i+1) * width + (j-1)] + //SW 0.1f * old[ i * width + (j-1)] + //W 0.1f * old[(i-1) * width + (j-1)] //NW ) * 0.95f; } } temp = New; New = old; old = temp; } return old; } /* Initialize the two arrays referenced by the first two parameters in preparation for * jacobi iteration. The width and height of the arrays are given by the integer parameters. * Border elements are set to 5.0 for both arrays, and the interior elements of a1 are * set to 1.0. Interior elements of a2 are not initialized. */ void initializeArrays(float *a1, float *a2, int width, int height){ int i, j; for(i=0; i<height; i++){ for(j=0; j<width; j++){ if(i==0 || j ==0 || i==height-1 || j==width-1){ a1[i*width + j] = 5.0; a2[i*width + j] = 5.0; }else { a1[i*width + j] = 1.0; } } } } /* Print the 2D array of floats referenced by the first parameter. The second and third * parameters specify its dimensions, while the last argument indicates whether printing * is actually descired at all. No output is produced if shouldPrint == 0. */ void printArray(float *arr, int rows, int cols, int shouldPrint){ if (!shouldPrint) return; int i,j; for(i=0; i<rows; i++){ for(j=0; j<cols; j++){ printf("%04.2f ", arr[i*cols + j]); } printf("\n"); } printf("\n"); } /* Prints a short but informative message about program usage.*/ void usage(){ fprintf(stderr, "usage: jacobi threadsperblock passes width height [p]\n"); fprintf(stderr, " (if threadsperblock == 0, serial code is run)\n"); }
7dbe322bceca34b93f4d13b82d2da9dafe7e49be.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include "Matlab_like.cuh" #include "Utilities.cuh" /********/ /* MAIN */ /********/ int main() { const int N = 20; float a = 3.87f; float b = 7.11f; float *h_arr = (float *)malloc(N * sizeof(float)); float *d_arr = linspace(a, b, N); gpuErrchk(hipMemcpy(h_arr, d_arr, N * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < N; i++) printf("%f\n", h_arr[i]); return 0; }
7dbe322bceca34b93f4d13b82d2da9dafe7e49be.cu
#include <cstdio> #include "Matlab_like.cuh" #include "Utilities.cuh" /********/ /* MAIN */ /********/ int main() { const int N = 20; float a = 3.87f; float b = 7.11f; float *h_arr = (float *)malloc(N * sizeof(float)); float *d_arr = linspace(a, b, N); gpuErrchk(cudaMemcpy(h_arr, d_arr, N * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < N; i++) printf("%f\n", h_arr[i]); return 0; }
94994c0885d9849fb351f22c8e8c87e3284f8289.hip
// !!! This is a file automatically generated by hipify!!! /** * @file tcam.cu * @brief CUDA implementation of TCAM * * @author cjeong */ #include <hip/hip_runtime.h> #include "tcam.h" __global__ void tcam_kernel(unsigned *dev_memx, unsigned *dev_memy, unsigned a, unsigned *dev_d, unsigned *dev_be, unsigned fe, unsigned bse, unsigned clk, unsigned read, unsigned write, unsigned search, unsigned colwrite, unsigned bitpos, unsigned *dev_q, unsigned *dev_mll) { __shared__ int cache[CUDA_NTHREADS]; int tid = threadIdx.x + blockIdx.x + blockDim.x; /* STEP #1: Initialize the cache entries to 0. */ cache[threadIdx.x] = 0; __syncthreads(); /* STEP #2: Each thread is responsible for checking 32-bit word of a field in a TCAM word. In this step, we check if these 32-bit of the memory word matches the search data. */ unsigned thr_bse = bse & (1 << (blockDim.x / TCAM_NBANKWORDS)); if (tid < CUDA_N && thr_bse && clk) { unsigned thr_d = dev_d[threadIdx.x]; unsigned thr_be = dev_be[threadIdx.x]; unsigned thr_fe = fe & (1 << (threadIdx.x / TCAM_NFIELDS)); unsigned thr_memx = (tid < CUDA_N) ? dev_memx[tid] : (unsigned) -1; unsigned thr_memy = (tid < CUDA_N) ? dev_memy[tid] : (unsigned) -1; cache[threadIdx.x] = ((~thr_fe | ~thr_be | (~thr_memx & ~thr_memy) | (thr_be & ~thr_d & thr_memx & ~thr_memy) | (thr_be & thr_d & ~thr_memx & thr_memy)) == (unsigned) -1); } __syncthreads(); /* STEP #3: Perform reduction to determine MLL for this 32-bit word. After reduction, cache[0] will contain 1 iff MLL == 1 for this TCAM word. */ int cacheIdx = threadIdx.x; int i = blockDim.x/2; while (i != 0) { if (cacheIdx < i) cache[cacheIdx] = (cache[cacheIdx] == 1) && (cache[cacheIdx+i] == 1); __syncthreads(); i = i/2; } /* STEP #4: the first thread in the thread block writes MLL result to global mem. */ if (threadIdx.x == 0) dev_mll[blockIdx.x] = cache[0]; } void tcam(unsigned *dev_memx, unsigned *dev_memy, unsigned a, unsigned *dev_d, unsigned *dev_be, unsigned fe, unsigned bse, unsigned clk, unsigned read, unsigned write, unsigned search, unsigned colwrite, unsigned bitpos, unsigned *dev_q, unsigned *dev_mll) { hipLaunchKernelGGL(( tcam_kernel), dim3(CUDA_NBLOCKS), dim3(CUDA_NTHREADS), 0, 0, dev_memx, dev_memy, a, dev_d, dev_be, fe, bse, clk, read, write, search, colwrite, bitpos, dev_q, dev_mll); }
94994c0885d9849fb351f22c8e8c87e3284f8289.cu
/** * @file tcam.cu * @brief CUDA implementation of TCAM * * @author cjeong */ #include <cuda.h> #include "tcam.h" __global__ void tcam_kernel(unsigned *dev_memx, unsigned *dev_memy, unsigned a, unsigned *dev_d, unsigned *dev_be, unsigned fe, unsigned bse, unsigned clk, unsigned read, unsigned write, unsigned search, unsigned colwrite, unsigned bitpos, unsigned *dev_q, unsigned *dev_mll) { __shared__ int cache[CUDA_NTHREADS]; int tid = threadIdx.x + blockIdx.x + blockDim.x; /* STEP #1: Initialize the cache entries to 0. */ cache[threadIdx.x] = 0; __syncthreads(); /* STEP #2: Each thread is responsible for checking 32-bit word of a field in a TCAM word. In this step, we check if these 32-bit of the memory word matches the search data. */ unsigned thr_bse = bse & (1 << (blockDim.x / TCAM_NBANKWORDS)); if (tid < CUDA_N && thr_bse && clk) { unsigned thr_d = dev_d[threadIdx.x]; unsigned thr_be = dev_be[threadIdx.x]; unsigned thr_fe = fe & (1 << (threadIdx.x / TCAM_NFIELDS)); unsigned thr_memx = (tid < CUDA_N) ? dev_memx[tid] : (unsigned) -1; unsigned thr_memy = (tid < CUDA_N) ? dev_memy[tid] : (unsigned) -1; cache[threadIdx.x] = ((~thr_fe | ~thr_be | (~thr_memx & ~thr_memy) | (thr_be & ~thr_d & thr_memx & ~thr_memy) | (thr_be & thr_d & ~thr_memx & thr_memy)) == (unsigned) -1); } __syncthreads(); /* STEP #3: Perform reduction to determine MLL for this 32-bit word. After reduction, cache[0] will contain 1 iff MLL == 1 for this TCAM word. */ int cacheIdx = threadIdx.x; int i = blockDim.x/2; while (i != 0) { if (cacheIdx < i) cache[cacheIdx] = (cache[cacheIdx] == 1) && (cache[cacheIdx+i] == 1); __syncthreads(); i = i/2; } /* STEP #4: the first thread in the thread block writes MLL result to global mem. */ if (threadIdx.x == 0) dev_mll[blockIdx.x] = cache[0]; } void tcam(unsigned *dev_memx, unsigned *dev_memy, unsigned a, unsigned *dev_d, unsigned *dev_be, unsigned fe, unsigned bse, unsigned clk, unsigned read, unsigned write, unsigned search, unsigned colwrite, unsigned bitpos, unsigned *dev_q, unsigned *dev_mll) { tcam_kernel<<<CUDA_NBLOCKS, CUDA_NTHREADS>>>(dev_memx, dev_memy, a, dev_d, dev_be, fe, bse, clk, read, write, search, colwrite, bitpos, dev_q, dev_mll); }
5b44c817813e8edcab8fd408e6db9579e378c429.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES: 1) These functions expect a real input. Weights are length "length", input/outputs length "length * batchSize". 2) The tmp variable must have space 2 * length * batchSize. 3) There is still some performance work to be done. For questions/comments, please contact Jeremy Appleyard ([email protected]) */ // TODO: // Fusion for performance // Shared memory setup/final for smaller problems to avoid strided access // Get rid of static cuFFT plans. #include <lua.hpp> #include <luaT.h> #include "cutorch_state.h" #include "THH.h" #include <stdio.h> #include <hipfft.h> #include <hipfftXt.h> #include <stdio.h> #include <hipfft.h> #include <hipfftXt.h> // Useful to have #define PI 3.14159265359f #define ROOT2 1.4142135623730951f template<typename Dtype, int tblockSize, bool forward> __global__ void DCT_setup(int length, int batchSize, int groupSize, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; int groupID = blockIdx.y; if (forward) in += length * batchID; else in += length * (batchID * groupSize + groupID); out += 2 * length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; hipfftComplex val; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } // For forward we're relying on cache hits for perf here. if (element < length / 2) { val.x = ((float*)(in))[element * 2]; } else { val.x = ((float*)(in))[length - 2 * (element - length / 2) - 1]; } val.y = 0.f; if (A != NULL) { val.x *= A[groupID * length + index]; if (Ab != NULL) { val.x += Ab[groupID * length + index]; } } ((hipfftComplex*)(out))[element] = val; } } template<typename Dtype, int tblockSize> __global__ void DCT_final(int length, int batchSize, int groupSize, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; int groupID = blockIdx.y; in += 2 * length * (batchID * groupSize + groupID); out += length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; hipfftComplex val = ((hipfftComplex*)(in))[element]; hipfftComplex val2; hipfftComplex ret; __sincosf(element * PI / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((float)length); } else { ret.x *= ROOT2 * rsqrt((float)length); } if (A != NULL) { ret.x *= A[groupID * length + element]; if (Ab != NULL) { ret.x += Ab[groupID * length + element]; } } ((float*)(out))[element] = ret.x; } } /* template<typename Dtype, int tblockSize> __global__ void IDCT_setup(int length, int batchSize, int groupSize, const Dtype * __restrict__ D, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; in += batchID * length; out += 2 * batchID * length; for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; hipfftComplex val; float re_in = ((float*)(in))[element]; if (D != NULL) { re_in *= D[element]; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((float)length); } else { re_in *= ROOT2 * rsqrtf((float)length); } float2 val2; __sincosf(element * PI / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((hipfftComplex*)(out))[element] = val; } } */ template<typename Dtype, int tblockSize, bool accumulate> __global__ void IDCT_final(int length, int batchSize, int groupSize, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; int groupID = blockIdx.y; in += 2 * length * (batchID * groupSize + groupID); out += length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } hipfftComplex val = ((hipfftComplex*)(in))[element]; // "A" for backward pass if (A != NULL) { val.x *= A[groupID * length + index]; if (Ab != NULL) { val.x += Ab[groupID * length + index]; } } if (accumulate) { ((float*)(out))[index] += val.x; } else { ((float*)(out))[index] = val.x; } } } template<typename Dtype, int tblockSize, bool accumulate> __global__ void DCT_final_IDCT_setup( int length, int batchSize, int groupSize, const Dtype * __restrict__ D, const Dtype * __restrict__ Db, const Dtype * __restrict__ in, Dtype * __restrict__ out, Dtype * __restrict__ deltaMid) { int batchID = blockIdx.x; int groupID = blockIdx.y; in += 2 * length * (batchID * groupSize + groupID); out += 2 * length * (batchID * groupSize + groupID); if (deltaMid) deltaMid += length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; hipfftComplex val = ((hipfftComplex*)(in))[element]; hipfftComplex val2; hipfftComplex ret; __sincosf(element * PI / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((float)length); } else { ret.x *= ROOT2 * rsqrt((float)length); } float re_in = ret.x; if (D != NULL) { re_in *= D[groupID * length + element]; if (Db != NULL) { re_in += Db[groupID * length + element]; } } if (deltaMid) { if (accumulate) deltaMid[element] += re_in; else deltaMid[element] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((float)length); } else { re_in *= ROOT2 * rsqrtf((float)length); } __sincosf(element * PI / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((hipfftComplex*)(out))[element] = val; } } template<typename Dtype, int tblockSize> __global__ void updateWeights(int length, int batchSize, int groupSize, const Dtype * __restrict__ D, const Dtype * __restrict__ input, const Dtype * __restrict__ gradOutput, Dtype * __restrict__ delta_D, Dtype * __restrict__ delta_Db) { int batchID = blockIdx.x; int groupID = blockIdx.y; input += length * batchID; gradOutput += length * (batchID * groupSize + groupID); D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; float val = gradOutput[element] / D[element]; atomicAdd((float*)(&(delta_D[element])), (float)(val * input[element])); atomicAdd((float*)(&(delta_Db[element])), val); } } template<typename Dtype> void acdc_fp_fast2( hipStream_t stream, int length, int batchSize, int groupSize, const Dtype * __restrict__ in, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ D, const Dtype * __restrict__ Db, Dtype * __restrict__ out, Dtype * __restrict__ tmp1, Dtype * __restrict__ tmp2) { // This is awful. TODO: Store the plans more sensibly. static hipfftHandle plan; static int planLength = -1; static int planBatchSize = -1; if (planLength != length || planBatchSize != batchSize * groupSize) { if (planLength != -1 && planBatchSize != -1) { hipfftDestroy(plan); } hipfftPlan1d(&plan, length, HIPFFT_C2C, batchSize * groupSize); planLength = length; planBatchSize = batchSize * groupSize; } hipfftSetStream(plan, stream); const int blockSize = 128; dim3 blockDim; dim3 gridDim; blockDim.x = blockSize; gridDim.x = batchSize; gridDim.y = groupSize; // Two DCTs required. Inverse is handled in the custom setup. hipLaunchKernelGGL(( DCT_setup<Dtype, blockSize, true>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, A, Ab, in, tmp1); hipfftExecC2C(plan, (hipfftComplex*)tmp1, (hipfftComplex*)tmp2, HIPFFT_FORWARD); hipLaunchKernelGGL(( DCT_final_IDCT_setup<Dtype, blockSize, false>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); hipfftExecC2C(plan, (hipfftComplex*)tmp1, (hipfftComplex*)tmp2, HIPFFT_FORWARD); hipLaunchKernelGGL(( IDCT_final<Dtype, blockSize, false>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, NULL, NULL, tmp2, out); } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename Dtype> void acdc_bp_fast2( hipStream_t stream, int length, int batchSize, int groupSize, Dtype * __restrict__ delta_in, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ D, const Dtype * __restrict__ Db, const Dtype * __restrict__ delta_out, Dtype * __restrict__ delta_mid, Dtype * __restrict__ tmp1, Dtype * __restrict__ tmp2) { // This is awful. Don't do this. TODO: Store the plans more sensibly. static hipfftHandle plan; static int planLength = -1; static int planBatchSize = -1; if (planLength != length || planBatchSize != batchSize * groupSize) { if (planLength != -1 && planBatchSize != -1) { hipfftDestroy(plan); } hipfftPlan1d(&plan, length, HIPFFT_C2C, batchSize * groupSize); planLength = length; planBatchSize = batchSize * groupSize; } hipfftSetStream(plan, stream); const int blockSize = 128; dim3 blockDim; dim3 gridDim; blockDim.x = blockSize; gridDim.x = batchSize; gridDim.y = groupSize; // Backward through CD hipLaunchKernelGGL(( DCT_setup<Dtype, 128, false>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); hipfftExecC2C(plan, (hipfftComplex*)tmp1, (hipfftComplex*)tmp2, HIPFFT_FORWARD); hipLaunchKernelGGL(( DCT_final_IDCT_setup<Dtype, 128, false>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); // Backward through CA hipfftExecC2C(plan, (hipfftComplex*)tmp1, (hipfftComplex*)tmp2, HIPFFT_FORWARD); hipLaunchKernelGGL(( IDCT_final<Dtype, 128, false>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, A, NULL, tmp2, delta_in); } template<typename Dtype> void acdc_bp_acc_fast2( hipStream_t stream, int length, int batchSize, int groupSize, Dtype * __restrict__ delta_in, Dtype * __restrict__ delta_mid, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ D, //const Dtype * __restrict__ Db, const Dtype * __restrict__ inputA, Dtype * __restrict__ inputD, Dtype * __restrict__ delta_A, Dtype * __restrict__ delta_Ab, Dtype * __restrict__ delta_D, Dtype * __restrict__ delta_Db, Dtype * __restrict__ tmp1, Dtype * __restrict__ tmp2) { // This is awful. Don't do this. TODO: Store the plans more sensibly. static hipfftHandle plan; static int planLength = -1; static int planBatchSize = -1; if (planLength != length || planBatchSize != batchSize * groupSize) { if (planLength != -1 && planBatchSize != -1) { hipfftDestroy(plan); } hipfftPlan1d(&plan, length, HIPFFT_C2C, batchSize * groupSize); planLength = length; planBatchSize = batchSize * groupSize; } hipfftSetStream(plan, stream); const int blockSize = 128; dim3 blockDim; dim3 gridDim; blockDim.x = blockSize; gridDim.x = batchSize; gridDim.y = groupSize; hipLaunchKernelGGL(( updateWeights<Dtype, 128>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); // Forward thorugh AC to calculate input going into D hipLaunchKernelGGL(( DCT_setup<Dtype, 128, true>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, A, Ab, inputA, tmp1); hipfftExecC2C(plan, (hipfftComplex*)tmp1, (hipfftComplex*)tmp2, HIPFFT_FORWARD); hipLaunchKernelGGL(( DCT_final<Dtype, 128>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, NULL, NULL, tmp2, inputD); hipLaunchKernelGGL(( updateWeights<Dtype, 128>) , dim3(gridDim), dim3(blockDim), 0, stream , length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); } #define Tensor THCudaTensor #define TensorTypename "torch.CudaTensor" #define Tensor_(fn) THCudaTensor_ ## fn int Tensor_(Fast_ACDC_updateOutput)(lua_State* L) { THCState *state = getCutorchState(L); Tensor* input = static_cast<Tensor*>( luaT_checkudata(L, 2, TensorTypename)); Tensor* A = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "A", TensorTypename)); Tensor* Ab = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Ab", TensorTypename)); Tensor* D = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "D", TensorTypename)); Tensor* Db = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Db", TensorTypename)); Tensor* tmp1 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp1", TensorTypename)); Tensor* tmp2 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp2", TensorTypename)); Tensor* output = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "output", TensorTypename)); int batch_size; int input_size; int group_size; if (Tensor_(nDimension)(state, input) == 1) { batch_size = 1; group_size = 1; input_size = Tensor_(size)(state, input, 0); } else if (Tensor_(nDimension)(state, input) == 2) { batch_size = Tensor_(size)(state, input, 0); group_size = 1; input_size = Tensor_(size)(state, input, 1); } else if (Tensor_(nDimension)(state, input) == 3) { batch_size = Tensor_(size)(state, input, 0); group_size = Tensor_(size)(state, output, 1); input_size = Tensor_(size)(state, input, 2); } else { luaL_error(L, "input must have 1 or 2 or 3 dimensions"); } hipStream_t stream = THCState_getCurrentStream(state); acdc_fp_fast2<float>( stream, input_size, batch_size, group_size, Tensor_(data)(state, input), Tensor_(data)(state, A), Tensor_(data)(state, Ab), Tensor_(data)(state, D), Tensor_(data)(state, Db), Tensor_(data)(state, output), Tensor_(data)(state, tmp1), Tensor_(data)(state, tmp2)); hipDeviceSynchronize(); return 1; } int Tensor_(Fast_ACDC_updateGradInput)(lua_State* L) { THCState *state = getCutorchState(L); Tensor* input = static_cast<Tensor*>( luaT_checkudata(L, 2, TensorTypename)); Tensor* gradOutput = static_cast<Tensor*>( luaT_checkudata(L, 3, TensorTypename)); Tensor* A = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "A", TensorTypename)); Tensor* Ab = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Ab", TensorTypename)); Tensor* D = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "D", TensorTypename)); Tensor* Db = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Db", TensorTypename)); Tensor* tmp1 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp1", TensorTypename)); Tensor* tmp2 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp2", TensorTypename)); Tensor* delta_mid = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "delta_mid", TensorTypename)); Tensor* gradInput = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradInput", TensorTypename)); int batch_size; int input_size; int group_size; if (Tensor_(nDimension)(state, gradOutput) == 1) { batch_size = 1; group_size = 1; input_size = Tensor_(size)(state, gradOutput, 0); } else if (Tensor_(nDimension)(state, gradOutput) == 2) { batch_size = Tensor_(size)(state, gradOutput, 0); group_size = 1; input_size = Tensor_(size)(state, gradOutput, 1); } else if (Tensor_(nDimension)(state, gradOutput) == 3) { batch_size = Tensor_(size)(state, gradOutput, 0); group_size = Tensor_(size)(state, gradOutput, 1); input_size = Tensor_(size)(state, gradOutput, 2); } else { luaL_error(L, "input must have 1 or 2 or 3 dimensions"); } hipStream_t stream = THCState_getCurrentStream(state); acdc_bp_fast2<float>( stream, input_size, batch_size, group_size, Tensor_(data)(state, gradInput), Tensor_(data)(state, A), Tensor_(data)(state, Ab), Tensor_(data)(state, D), Tensor_(data)(state, Db), Tensor_(data)(state, gradOutput), Tensor_(data)(state, delta_mid), Tensor_(data)(state, tmp1), Tensor_(data)(state, tmp2)); return 1; } int Tensor_(Fast_ACDC_accGradParams)(lua_State* L) { THCState *state = getCutorchState(L); Tensor* input = static_cast<Tensor*>( luaT_checkudata(L, 2, TensorTypename)); Tensor* gradOutput = static_cast<Tensor*>( luaT_checkudata(L, 3, TensorTypename)); Tensor* A = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "A", TensorTypename)); Tensor* Ab = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Ab", TensorTypename)); Tensor* D = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "D", TensorTypename)); Tensor* Db = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Db", TensorTypename)); Tensor* tmp1 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp1", TensorTypename)); Tensor* tmp2 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp2", TensorTypename)); Tensor* inputD = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "activationsD", TensorTypename)); Tensor* gradA = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradA", TensorTypename)); Tensor* gradD = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradD", TensorTypename)); Tensor* gradAb = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradAb", TensorTypename)); Tensor* gradDb = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradDb", TensorTypename)); Tensor* delta_mid = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "delta_mid", TensorTypename)); Tensor* gradInput = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradInput", TensorTypename)); int outputIdx = lua_gettop(L); int batch_size; int input_size; int group_size; if (Tensor_(nDimension)(state, gradInput) == 1) { batch_size = 1; group_size = 1; input_size = Tensor_(size)(state, gradInput, 0); } else if (Tensor_(nDimension)(state, gradInput) == 2) { batch_size = Tensor_(size)(state, gradInput, 0); group_size = 1; input_size = Tensor_(size)(state, gradInput, 1); } else if (Tensor_(nDimension)(state, gradInput) == 3) { batch_size = Tensor_(size)(state, gradInput, 0); group_size = Tensor_(size)(state, gradInput, 1); input_size = Tensor_(size)(state, gradInput, 2); } else { luaL_error(L, "input must have 1 or 2 or 3 dimensions"); } hipStream_t stream = THCState_getCurrentStream(state); acdc_bp_acc_fast2<float>( stream, input_size, batch_size, group_size, Tensor_(data)(state, gradInput), Tensor_(data)(state, delta_mid), Tensor_(data)(state, A), Tensor_(data)(state, Ab), Tensor_(data)(state, D), //Tensor_(data)(state, Db), Tensor_(data)(state, input), // inputA Tensor_(data)(state, inputD), Tensor_(data)(state, gradA), Tensor_(data)(state, gradAb), Tensor_(data)(state, gradD), Tensor_(data)(state, gradDb), Tensor_(data)(state, tmp1), Tensor_(data)(state, tmp2)); lua_pushvalue(L, outputIdx); return 1; } static const struct luaL_Reg Tensor_(Fast_ACDC_functions_)[] = { {"Fast_ACDC_updateOutput", Tensor_(Fast_ACDC_updateOutput)}, {"Fast_ACDC_updateGradInput", Tensor_(Fast_ACDC_updateGradInput)}, {"Fast_ACDC_accGradParams", Tensor_(Fast_ACDC_accGradParams)}, {NULL, NULL} }; namespace acdc { void Tensor_(initFastACDC)(lua_State* L) { luaT_pushmetatable(L, TensorTypename); luaT_registeratname(L, Tensor_(Fast_ACDC_functions_), "nn"); lua_pop(L, 1); } }
5b44c817813e8edcab8fd408e6db9579e378c429.cu
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* NOTES: 1) These functions expect a real input. Weights are length "length", input/outputs length "length * batchSize". 2) The tmp variable must have space 2 * length * batchSize. 3) There is still some performance work to be done. For questions/comments, please contact Jeremy Appleyard ([email protected]) */ // TODO: // Fusion for performance // Shared memory setup/final for smaller problems to avoid strided access // Get rid of static cuFFT plans. #include <lua.hpp> #include <luaT.h> #include "cutorch_state.h" #include "THC.h" #include <stdio.h> #include <cufft.h> #include <cufftXt.h> #include <stdio.h> #include <cufft.h> #include <cufftXt.h> // Useful to have #define PI 3.14159265359f #define ROOT2 1.4142135623730951f template<typename Dtype, int tblockSize, bool forward> __global__ void DCT_setup(int length, int batchSize, int groupSize, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; int groupID = blockIdx.y; if (forward) in += length * batchID; else in += length * (batchID * groupSize + groupID); out += 2 * length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; cufftComplex val; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } // For forward we're relying on cache hits for perf here. if (element < length / 2) { val.x = ((float*)(in))[element * 2]; } else { val.x = ((float*)(in))[length - 2 * (element - length / 2) - 1]; } val.y = 0.f; if (A != NULL) { val.x *= A[groupID * length + index]; if (Ab != NULL) { val.x += Ab[groupID * length + index]; } } ((cufftComplex*)(out))[element] = val; } } template<typename Dtype, int tblockSize> __global__ void DCT_final(int length, int batchSize, int groupSize, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; int groupID = blockIdx.y; in += 2 * length * (batchID * groupSize + groupID); out += length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; cufftComplex val = ((cufftComplex*)(in))[element]; cufftComplex val2; cufftComplex ret; __sincosf(element * PI / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((float)length); } else { ret.x *= ROOT2 * rsqrt((float)length); } if (A != NULL) { ret.x *= A[groupID * length + element]; if (Ab != NULL) { ret.x += Ab[groupID * length + element]; } } ((float*)(out))[element] = ret.x; } } /* template<typename Dtype, int tblockSize> __global__ void IDCT_setup(int length, int batchSize, int groupSize, const Dtype * __restrict__ D, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; in += batchID * length; out += 2 * batchID * length; for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; cufftComplex val; float re_in = ((float*)(in))[element]; if (D != NULL) { re_in *= D[element]; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((float)length); } else { re_in *= ROOT2 * rsqrtf((float)length); } float2 val2; __sincosf(element * PI / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((cufftComplex*)(out))[element] = val; } } */ template<typename Dtype, int tblockSize, bool accumulate> __global__ void IDCT_final(int length, int batchSize, int groupSize, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ in, Dtype * __restrict__ out) { int batchID = blockIdx.x; int groupID = blockIdx.y; in += 2 * length * (batchID * groupSize + groupID); out += length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; int index; if (element < length / 2) { index = element * 2; } else { index = length - 2 * (element - length / 2) - 1; } cufftComplex val = ((cufftComplex*)(in))[element]; // "A" for backward pass if (A != NULL) { val.x *= A[groupID * length + index]; if (Ab != NULL) { val.x += Ab[groupID * length + index]; } } if (accumulate) { ((float*)(out))[index] += val.x; } else { ((float*)(out))[index] = val.x; } } } template<typename Dtype, int tblockSize, bool accumulate> __global__ void DCT_final_IDCT_setup( int length, int batchSize, int groupSize, const Dtype * __restrict__ D, const Dtype * __restrict__ Db, const Dtype * __restrict__ in, Dtype * __restrict__ out, Dtype * __restrict__ deltaMid) { int batchID = blockIdx.x; int groupID = blockIdx.y; in += 2 * length * (batchID * groupSize + groupID); out += 2 * length * (batchID * groupSize + groupID); if (deltaMid) deltaMid += length * (batchID * groupSize + groupID); for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; cufftComplex val = ((cufftComplex*)(in))[element]; cufftComplex val2; cufftComplex ret; __sincosf(element * PI / (2.f * (length)), &(val2.y), &(val2.x)); val2.y = -val2.y; ret.x = val.x * val2.x - val.y * val2.y; // Normalisation if (element == 0) { ret.x *= rsqrt((float)length); } else { ret.x *= ROOT2 * rsqrt((float)length); } float re_in = ret.x; if (D != NULL) { re_in *= D[groupID * length + element]; if (Db != NULL) { re_in += Db[groupID * length + element]; } } if (deltaMid) { if (accumulate) deltaMid[element] += re_in; else deltaMid[element] = re_in; } // Un-normalisation if (element == 0) { re_in *= rsqrtf((float)length); } else { re_in *= ROOT2 * rsqrtf((float)length); } __sincosf(element * PI / (2.f * length), &(val2.y), &(val2.x)); val.x = re_in * val2.x; val.y = -re_in * val2.y; ((cufftComplex*)(out))[element] = val; } } template<typename Dtype, int tblockSize> __global__ void updateWeights(int length, int batchSize, int groupSize, const Dtype * __restrict__ D, const Dtype * __restrict__ input, const Dtype * __restrict__ gradOutput, Dtype * __restrict__ delta_D, Dtype * __restrict__ delta_Db) { int batchID = blockIdx.x; int groupID = blockIdx.y; input += length * batchID; gradOutput += length * (batchID * groupSize + groupID); D += length * groupID; delta_D += length * groupID; delta_Db += length * groupID; for (int i = 0; i < (length + tblockSize - 1) / tblockSize; i++) { int element = i * tblockSize + threadIdx.x; if (element >= length) return; float val = gradOutput[element] / D[element]; atomicAdd((float*)(&(delta_D[element])), (float)(val * input[element])); atomicAdd((float*)(&(delta_Db[element])), val); } } template<typename Dtype> void acdc_fp_fast2( cudaStream_t stream, int length, int batchSize, int groupSize, const Dtype * __restrict__ in, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ D, const Dtype * __restrict__ Db, Dtype * __restrict__ out, Dtype * __restrict__ tmp1, Dtype * __restrict__ tmp2) { // This is awful. TODO: Store the plans more sensibly. static cufftHandle plan; static int planLength = -1; static int planBatchSize = -1; if (planLength != length || planBatchSize != batchSize * groupSize) { if (planLength != -1 && planBatchSize != -1) { cufftDestroy(plan); } cufftPlan1d(&plan, length, CUFFT_C2C, batchSize * groupSize); planLength = length; planBatchSize = batchSize * groupSize; } cufftSetStream(plan, stream); const int blockSize = 128; dim3 blockDim; dim3 gridDim; blockDim.x = blockSize; gridDim.x = batchSize; gridDim.y = groupSize; // Two DCTs required. Inverse is handled in the custom setup. DCT_setup<Dtype, blockSize, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, in, tmp1); cufftExecC2C(plan, (cufftComplex*)tmp1, (cufftComplex*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<Dtype, blockSize, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL); cufftExecC2C(plan, (cufftComplex*)tmp1, (cufftComplex*)tmp2, CUFFT_FORWARD); IDCT_final<Dtype, blockSize, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, out); } // NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in. template<typename Dtype> void acdc_bp_fast2( cudaStream_t stream, int length, int batchSize, int groupSize, Dtype * __restrict__ delta_in, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ D, const Dtype * __restrict__ Db, const Dtype * __restrict__ delta_out, Dtype * __restrict__ delta_mid, Dtype * __restrict__ tmp1, Dtype * __restrict__ tmp2) { // This is awful. Don't do this. TODO: Store the plans more sensibly. static cufftHandle plan; static int planLength = -1; static int planBatchSize = -1; if (planLength != length || planBatchSize != batchSize * groupSize) { if (planLength != -1 && planBatchSize != -1) { cufftDestroy(plan); } cufftPlan1d(&plan, length, CUFFT_C2C, batchSize * groupSize); planLength = length; planBatchSize = batchSize * groupSize; } cufftSetStream(plan, stream); const int blockSize = 128; dim3 blockDim; dim3 gridDim; blockDim.x = blockSize; gridDim.x = batchSize; gridDim.y = groupSize; // Backward through CD DCT_setup<Dtype, 128, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, delta_out, tmp1); cufftExecC2C(plan, (cufftComplex*)tmp1, (cufftComplex*)tmp2, CUFFT_FORWARD); DCT_final_IDCT_setup<Dtype, 128, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid); // Backward through CA cufftExecC2C(plan, (cufftComplex*)tmp1, (cufftComplex*)tmp2, CUFFT_FORWARD); IDCT_final<Dtype, 128, false> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, NULL, tmp2, delta_in); } template<typename Dtype> void acdc_bp_acc_fast2( cudaStream_t stream, int length, int batchSize, int groupSize, Dtype * __restrict__ delta_in, Dtype * __restrict__ delta_mid, const Dtype * __restrict__ A, const Dtype * __restrict__ Ab, const Dtype * __restrict__ D, //const Dtype * __restrict__ Db, const Dtype * __restrict__ inputA, Dtype * __restrict__ inputD, Dtype * __restrict__ delta_A, Dtype * __restrict__ delta_Ab, Dtype * __restrict__ delta_D, Dtype * __restrict__ delta_Db, Dtype * __restrict__ tmp1, Dtype * __restrict__ tmp2) { // This is awful. Don't do this. TODO: Store the plans more sensibly. static cufftHandle plan; static int planLength = -1; static int planBatchSize = -1; if (planLength != length || planBatchSize != batchSize * groupSize) { if (planLength != -1 && planBatchSize != -1) { cufftDestroy(plan); } cufftPlan1d(&plan, length, CUFFT_C2C, batchSize * groupSize); planLength = length; planBatchSize = batchSize * groupSize; } cufftSetStream(plan, stream); const int blockSize = 128; dim3 blockDim; dim3 gridDim; blockDim.x = blockSize; gridDim.x = batchSize; gridDim.y = groupSize; updateWeights<Dtype, 128> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab); // Forward thorugh AC to calculate input going into D DCT_setup<Dtype, 128, true> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, A, Ab, inputA, tmp1); cufftExecC2C(plan, (cufftComplex*)tmp1, (cufftComplex*)tmp2, CUFFT_FORWARD); DCT_final<Dtype, 128> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, NULL, NULL, tmp2, inputD); updateWeights<Dtype, 128> <<< gridDim, blockDim, 0, stream >>> ( length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db); } #define Tensor THCudaTensor #define TensorTypename "torch.CudaTensor" #define Tensor_(fn) THCudaTensor_ ## fn int Tensor_(Fast_ACDC_updateOutput)(lua_State* L) { THCState *state = getCutorchState(L); Tensor* input = static_cast<Tensor*>( luaT_checkudata(L, 2, TensorTypename)); Tensor* A = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "A", TensorTypename)); Tensor* Ab = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Ab", TensorTypename)); Tensor* D = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "D", TensorTypename)); Tensor* Db = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Db", TensorTypename)); Tensor* tmp1 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp1", TensorTypename)); Tensor* tmp2 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp2", TensorTypename)); Tensor* output = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "output", TensorTypename)); int batch_size; int input_size; int group_size; if (Tensor_(nDimension)(state, input) == 1) { batch_size = 1; group_size = 1; input_size = Tensor_(size)(state, input, 0); } else if (Tensor_(nDimension)(state, input) == 2) { batch_size = Tensor_(size)(state, input, 0); group_size = 1; input_size = Tensor_(size)(state, input, 1); } else if (Tensor_(nDimension)(state, input) == 3) { batch_size = Tensor_(size)(state, input, 0); group_size = Tensor_(size)(state, output, 1); input_size = Tensor_(size)(state, input, 2); } else { luaL_error(L, "input must have 1 or 2 or 3 dimensions"); } cudaStream_t stream = THCState_getCurrentStream(state); acdc_fp_fast2<float>( stream, input_size, batch_size, group_size, Tensor_(data)(state, input), Tensor_(data)(state, A), Tensor_(data)(state, Ab), Tensor_(data)(state, D), Tensor_(data)(state, Db), Tensor_(data)(state, output), Tensor_(data)(state, tmp1), Tensor_(data)(state, tmp2)); cudaDeviceSynchronize(); return 1; } int Tensor_(Fast_ACDC_updateGradInput)(lua_State* L) { THCState *state = getCutorchState(L); Tensor* input = static_cast<Tensor*>( luaT_checkudata(L, 2, TensorTypename)); Tensor* gradOutput = static_cast<Tensor*>( luaT_checkudata(L, 3, TensorTypename)); Tensor* A = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "A", TensorTypename)); Tensor* Ab = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Ab", TensorTypename)); Tensor* D = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "D", TensorTypename)); Tensor* Db = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Db", TensorTypename)); Tensor* tmp1 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp1", TensorTypename)); Tensor* tmp2 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp2", TensorTypename)); Tensor* delta_mid = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "delta_mid", TensorTypename)); Tensor* gradInput = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradInput", TensorTypename)); int batch_size; int input_size; int group_size; if (Tensor_(nDimension)(state, gradOutput) == 1) { batch_size = 1; group_size = 1; input_size = Tensor_(size)(state, gradOutput, 0); } else if (Tensor_(nDimension)(state, gradOutput) == 2) { batch_size = Tensor_(size)(state, gradOutput, 0); group_size = 1; input_size = Tensor_(size)(state, gradOutput, 1); } else if (Tensor_(nDimension)(state, gradOutput) == 3) { batch_size = Tensor_(size)(state, gradOutput, 0); group_size = Tensor_(size)(state, gradOutput, 1); input_size = Tensor_(size)(state, gradOutput, 2); } else { luaL_error(L, "input must have 1 or 2 or 3 dimensions"); } cudaStream_t stream = THCState_getCurrentStream(state); acdc_bp_fast2<float>( stream, input_size, batch_size, group_size, Tensor_(data)(state, gradInput), Tensor_(data)(state, A), Tensor_(data)(state, Ab), Tensor_(data)(state, D), Tensor_(data)(state, Db), Tensor_(data)(state, gradOutput), Tensor_(data)(state, delta_mid), Tensor_(data)(state, tmp1), Tensor_(data)(state, tmp2)); return 1; } int Tensor_(Fast_ACDC_accGradParams)(lua_State* L) { THCState *state = getCutorchState(L); Tensor* input = static_cast<Tensor*>( luaT_checkudata(L, 2, TensorTypename)); Tensor* gradOutput = static_cast<Tensor*>( luaT_checkudata(L, 3, TensorTypename)); Tensor* A = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "A", TensorTypename)); Tensor* Ab = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Ab", TensorTypename)); Tensor* D = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "D", TensorTypename)); Tensor* Db = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "Db", TensorTypename)); Tensor* tmp1 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp1", TensorTypename)); Tensor* tmp2 = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "tmp2", TensorTypename)); Tensor* inputD = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "activationsD", TensorTypename)); Tensor* gradA = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradA", TensorTypename)); Tensor* gradD = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradD", TensorTypename)); Tensor* gradAb = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradAb", TensorTypename)); Tensor* gradDb = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradDb", TensorTypename)); Tensor* delta_mid = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "delta_mid", TensorTypename)); Tensor* gradInput = static_cast<Tensor*>( luaT_getfieldcheckudata(L, 1, "gradInput", TensorTypename)); int outputIdx = lua_gettop(L); int batch_size; int input_size; int group_size; if (Tensor_(nDimension)(state, gradInput) == 1) { batch_size = 1; group_size = 1; input_size = Tensor_(size)(state, gradInput, 0); } else if (Tensor_(nDimension)(state, gradInput) == 2) { batch_size = Tensor_(size)(state, gradInput, 0); group_size = 1; input_size = Tensor_(size)(state, gradInput, 1); } else if (Tensor_(nDimension)(state, gradInput) == 3) { batch_size = Tensor_(size)(state, gradInput, 0); group_size = Tensor_(size)(state, gradInput, 1); input_size = Tensor_(size)(state, gradInput, 2); } else { luaL_error(L, "input must have 1 or 2 or 3 dimensions"); } cudaStream_t stream = THCState_getCurrentStream(state); acdc_bp_acc_fast2<float>( stream, input_size, batch_size, group_size, Tensor_(data)(state, gradInput), Tensor_(data)(state, delta_mid), Tensor_(data)(state, A), Tensor_(data)(state, Ab), Tensor_(data)(state, D), //Tensor_(data)(state, Db), Tensor_(data)(state, input), // inputA Tensor_(data)(state, inputD), Tensor_(data)(state, gradA), Tensor_(data)(state, gradAb), Tensor_(data)(state, gradD), Tensor_(data)(state, gradDb), Tensor_(data)(state, tmp1), Tensor_(data)(state, tmp2)); lua_pushvalue(L, outputIdx); return 1; } static const struct luaL_Reg Tensor_(Fast_ACDC_functions_)[] = { {"Fast_ACDC_updateOutput", Tensor_(Fast_ACDC_updateOutput)}, {"Fast_ACDC_updateGradInput", Tensor_(Fast_ACDC_updateGradInput)}, {"Fast_ACDC_accGradParams", Tensor_(Fast_ACDC_accGradParams)}, {NULL, NULL} }; namespace acdc { void Tensor_(initFastACDC)(lua_State* L) { luaT_pushmetatable(L, TensorTypename); luaT_registeratname(L, Tensor_(Fast_ACDC_functions_), "nn"); lua_pop(L, 1); } }
bfe7518dbdf50f96736fe9f04ceac3201550c080.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // The translation unit for reduction `mean` #include "reduction_functions_hip.cuh" #include "compound.cuh" gdf_scalar cudf::reduction::mean(gdf_column const& col, gdf_dtype const output_dtype, hipStream_t stream) { using reducer = cudf::reduction::compound::element_type_dispatcher<cudf::reduction::op::mean>; return cudf::type_dispatcher(col.dtype, reducer(), col, output_dtype, /* ddof is not used for mean*/ 1, stream); }
bfe7518dbdf50f96736fe9f04ceac3201550c080.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // The translation unit for reduction `mean` #include "reduction_functions.cuh" #include "compound.cuh" gdf_scalar cudf::reduction::mean(gdf_column const& col, gdf_dtype const output_dtype, cudaStream_t stream) { using reducer = cudf::reduction::compound::element_type_dispatcher<cudf::reduction::op::mean>; return cudf::type_dispatcher(col.dtype, reducer(), col, output_dtype, /* ddof is not used for mean*/ 1, stream); }
8a7c42448747b4a8a8dd50955f3330fccd142e8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lusol.h" __global__ void LEVEL_CSC_SYNC_0(int n, int *dp, int *jlev, int *tail); #define ZERO_ONE 0x0000000000000001 //[0, 1] /* in computers, it is stored as [low high] : [1, 0] */ __forceinline__ __device__ __host__ int *ull_low(unsigned long long int *x) { return ((int*) x); } __forceinline__ __device__ __host__ int *ull_high(unsigned long long int *x) { return ((int*) x + 1); } #if 0 __forceinline__ __device__ static void atomicInc_ULL_Low(unsigned long long int *address, int *old_low, int *old_high) { volatile unsigned long long *vaddr = address; unsigned long long old_ull = *vaddr, assumed_ull; int assumed_low, assumed_high; do { assumed_ull = old_ull; assumed_low = *(ull_low (&assumed_ull)); assumed_high = *(ull_high(&assumed_ull)); unsigned long long int new_ull = assumed_low < assumed_high ? assumed_ull + ZERO_ONE : assumed_ull; old_ull = atomicCAS(address, assumed_ull, new_ull); } while (old_ull != assumed_ull); *old_low = assumed_low; *old_high = assumed_high; //*old_low = *(ull_low (&old_ull)); //*old_high = *(ull_high(&old_ull)); } #else __forceinline__ __device__ static void atomicInc_ULL_Low(unsigned long long int *address, int *old_low, int *old_high) { volatile unsigned long long *vaddr = address; unsigned long long old_ull = *vaddr, assumed_ull; int assumed_low, assumed_high; do { assumed_ull = old_ull; assumed_low = *(ull_low (&assumed_ull)); assumed_high = *(ull_high(&assumed_ull)); if (assumed_low < assumed_high) { old_ull = atomicCAS(address, assumed_ull, assumed_ull + ZERO_ONE); } } while (old_ull != assumed_ull); *old_low = assumed_low; *old_high = assumed_high; //*old_low = *(ull_low (&old_ull)); //*old_high = *(ull_high(&old_ull)); } #endif __global__ void TOPO_CSC_L(int n, int *ib, int *jb, int *db, int *dp, int *jlev, unsigned long long *ull) { // thread lane in each warp const int lane = threadIdx.x & (WARP - 1); // local warp id const int wlane = threadIdx.x / WARP; int i, first, last; volatile __shared__ int s_first[BLOCKDIM / WARP]; volatile __shared__ int s_last [BLOCKDIM / WARP]; volatile int *vjlev = jlev; do { if (lane == 0) { atomicInc_ULL_Low(ull, &first, &last); s_first[wlane] = first; s_last [wlane] = last; } first = s_first[wlane]; last = s_last [wlane]; if (first < last) { while ((i = vjlev[first]) == 0); --i; int q1 = db[i] + 1; int q2 = ib[i+1]; for (int j = q1 + lane; j < q2; j += WARP) { int k = jb[j-1]-1; int old = atomicSub(&dp[k], 1); if (old == 1) { int p = atomicAdd(ull_high(ull), 1); vjlev[p] = k + 1; } } } } while (first < n); } __global__ void TOPO_CSC_U(int n, int *ib, int *jb, int *db, int *dp, int *jlev, unsigned long long *ull) { // thread lane in each warp const int lane = threadIdx.x & (WARP - 1); // local warp id const int wlane = threadIdx.x / WARP; int i, first, last; volatile __shared__ int s_first[BLOCKDIM / WARP]; volatile __shared__ int s_last [BLOCKDIM / WARP]; volatile int *vjlev = jlev; do { if (lane == 0) { atomicInc_ULL_Low(ull, &first, &last); s_first[wlane] = first; s_last [wlane] = last; } first = s_first[wlane]; last = s_last [wlane]; if (first < last) { while ((i = vjlev[first]) == 0); --i; int q1 = ib[i]; int q2 = db[i]; for (int j = q1 + lane; j < q2; j += WARP) { int k = jb[j-1]-1; int old = atomicSub(&dp[k], 1); if (old == 1) { int p = atomicAdd(ull_high(ull), 1); vjlev[p] = k + 1; } } } } while (first < n); } void makeTopoCSC(int n, int *d_ib, int *d_jb, int *d_db, int *d_dp, int *d_jlevL, int *d_jlevU) { int gDim; int *d_dpL = d_dp; int *d_dpU = d_dp + n; unsigned long long *d_ull; hipMalloc((void **)&d_ull, sizeof(unsigned long long)); int *d_last = ull_high(d_ull); int *d_first = ull_low (d_ull); int nthreads = 8 * WARP; // L hipMemset(d_ull, 0, sizeof(unsigned long long)); hipMemset(d_jlevL, 0, n*sizeof(int)); gDim = (n + BLOCKDIM - 1) / BLOCKDIM; hipLaunchKernelGGL(( LEVEL_CSC_SYNC_0), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_dpL, d_jlevL, d_last); gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM; hipLaunchKernelGGL(( TOPO_CSC_L), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ib, d_jb, d_db, d_dpL, d_jlevL, d_ull); // U hipMemset(d_ull, 0, sizeof(unsigned long long)); hipMemset(d_jlevU, 0, n*sizeof(int)); gDim = (n + BLOCKDIM - 1) / BLOCKDIM; hipLaunchKernelGGL(( LEVEL_CSC_SYNC_0), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_dpU, d_jlevU, d_last); gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM; hipLaunchKernelGGL(( TOPO_CSC_U), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ib, d_jb, d_db, d_dpU, d_jlevU, d_ull); hipFree(d_ull); } void checktopo(int n, int *ib, int *jb, int *db, int *d_jlevL, int *d_jlevU, int *d_dp) { int *jlevL = (int *) malloc(n*sizeof(int)); int *jlevU = (int *) malloc(n*sizeof(int)); int *dp = (int *) malloc(2*n*sizeof(int)); hipMemcpy(jlevL, d_jlevL, n*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(jlevU, d_jlevU, n*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(dp, d_dp, 2*n*sizeof(int), hipMemcpyDeviceToHost); int *dpL = dp; int *dpU = dp + n; //for (int i = 0; i < n; i++) { printf("%d ", jlevL[i]); } printf("\n"); //for (int i = 0; i < n; i++) { printf("%d ", jlevU[i]); } printf("\n"); for (int i = 0; i < n; i++) { int jl = jlevL[i]; int ju = jlevU[i]; if (jl < 1 || jl > n) { printf("topo error: jl = %d\n", jl); exit(0); } if (ju < 1 || ju > n) { printf("topo error: ju = %d\n", ju); exit(0); } if (dpL[jl-1] != 0) { printf("topo error: dpL[%d] = %d\n", jl-1, dpL[jl-1]); exit(0); } if (dpU[ju-1] != 0) { printf("topo error: dpU[%d] = %d\n", ju-1, dpU[ju-1]); exit(0); } for (int j = db[jl-1]+1; j < ib[jl]; j++) { int k = jb[j-1]-1; dpL[k]--; } for (int j = ib[ju-1]; j < db[ju-1]; j++) { int k = jb[j-1]-1; dpU[k]--; } } free(jlevL); free(jlevU); free(dp); }
8a7c42448747b4a8a8dd50955f3330fccd142e8d.cu
#include "lusol.h" __global__ void LEVEL_CSC_SYNC_0(int n, int *dp, int *jlev, int *tail); #define ZERO_ONE 0x0000000000000001 //[0, 1] /* in computers, it is stored as [low high] : [1, 0] */ __forceinline__ __device__ __host__ int *ull_low(unsigned long long int *x) { return ((int*) x); } __forceinline__ __device__ __host__ int *ull_high(unsigned long long int *x) { return ((int*) x + 1); } #if 0 __forceinline__ __device__ static void atomicInc_ULL_Low(unsigned long long int *address, int *old_low, int *old_high) { volatile unsigned long long *vaddr = address; unsigned long long old_ull = *vaddr, assumed_ull; int assumed_low, assumed_high; do { assumed_ull = old_ull; assumed_low = *(ull_low (&assumed_ull)); assumed_high = *(ull_high(&assumed_ull)); unsigned long long int new_ull = assumed_low < assumed_high ? assumed_ull + ZERO_ONE : assumed_ull; old_ull = atomicCAS(address, assumed_ull, new_ull); } while (old_ull != assumed_ull); *old_low = assumed_low; *old_high = assumed_high; //*old_low = *(ull_low (&old_ull)); //*old_high = *(ull_high(&old_ull)); } #else __forceinline__ __device__ static void atomicInc_ULL_Low(unsigned long long int *address, int *old_low, int *old_high) { volatile unsigned long long *vaddr = address; unsigned long long old_ull = *vaddr, assumed_ull; int assumed_low, assumed_high; do { assumed_ull = old_ull; assumed_low = *(ull_low (&assumed_ull)); assumed_high = *(ull_high(&assumed_ull)); if (assumed_low < assumed_high) { old_ull = atomicCAS(address, assumed_ull, assumed_ull + ZERO_ONE); } } while (old_ull != assumed_ull); *old_low = assumed_low; *old_high = assumed_high; //*old_low = *(ull_low (&old_ull)); //*old_high = *(ull_high(&old_ull)); } #endif __global__ void TOPO_CSC_L(int n, int *ib, int *jb, int *db, int *dp, int *jlev, unsigned long long *ull) { // thread lane in each warp const int lane = threadIdx.x & (WARP - 1); // local warp id const int wlane = threadIdx.x / WARP; int i, first, last; volatile __shared__ int s_first[BLOCKDIM / WARP]; volatile __shared__ int s_last [BLOCKDIM / WARP]; volatile int *vjlev = jlev; do { if (lane == 0) { atomicInc_ULL_Low(ull, &first, &last); s_first[wlane] = first; s_last [wlane] = last; } first = s_first[wlane]; last = s_last [wlane]; if (first < last) { while ((i = vjlev[first]) == 0); --i; int q1 = db[i] + 1; int q2 = ib[i+1]; for (int j = q1 + lane; j < q2; j += WARP) { int k = jb[j-1]-1; int old = atomicSub(&dp[k], 1); if (old == 1) { int p = atomicAdd(ull_high(ull), 1); vjlev[p] = k + 1; } } } } while (first < n); } __global__ void TOPO_CSC_U(int n, int *ib, int *jb, int *db, int *dp, int *jlev, unsigned long long *ull) { // thread lane in each warp const int lane = threadIdx.x & (WARP - 1); // local warp id const int wlane = threadIdx.x / WARP; int i, first, last; volatile __shared__ int s_first[BLOCKDIM / WARP]; volatile __shared__ int s_last [BLOCKDIM / WARP]; volatile int *vjlev = jlev; do { if (lane == 0) { atomicInc_ULL_Low(ull, &first, &last); s_first[wlane] = first; s_last [wlane] = last; } first = s_first[wlane]; last = s_last [wlane]; if (first < last) { while ((i = vjlev[first]) == 0); --i; int q1 = ib[i]; int q2 = db[i]; for (int j = q1 + lane; j < q2; j += WARP) { int k = jb[j-1]-1; int old = atomicSub(&dp[k], 1); if (old == 1) { int p = atomicAdd(ull_high(ull), 1); vjlev[p] = k + 1; } } } } while (first < n); } void makeTopoCSC(int n, int *d_ib, int *d_jb, int *d_db, int *d_dp, int *d_jlevL, int *d_jlevU) { int gDim; int *d_dpL = d_dp; int *d_dpU = d_dp + n; unsigned long long *d_ull; cudaMalloc((void **)&d_ull, sizeof(unsigned long long)); int *d_last = ull_high(d_ull); int *d_first = ull_low (d_ull); int nthreads = 8 * WARP; // L cudaMemset(d_ull, 0, sizeof(unsigned long long)); cudaMemset(d_jlevL, 0, n*sizeof(int)); gDim = (n + BLOCKDIM - 1) / BLOCKDIM; LEVEL_CSC_SYNC_0<<<gDim, BLOCKDIM>>>(n, d_dpL, d_jlevL, d_last); gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM; TOPO_CSC_L<<<gDim, BLOCKDIM>>>(n, d_ib, d_jb, d_db, d_dpL, d_jlevL, d_ull); // U cudaMemset(d_ull, 0, sizeof(unsigned long long)); cudaMemset(d_jlevU, 0, n*sizeof(int)); gDim = (n + BLOCKDIM - 1) / BLOCKDIM; LEVEL_CSC_SYNC_0<<<gDim, BLOCKDIM>>>(n, d_dpU, d_jlevU, d_last); gDim = (nthreads + BLOCKDIM - 1) / BLOCKDIM; TOPO_CSC_U<<<gDim, BLOCKDIM>>>(n, d_ib, d_jb, d_db, d_dpU, d_jlevU, d_ull); cudaFree(d_ull); } void checktopo(int n, int *ib, int *jb, int *db, int *d_jlevL, int *d_jlevU, int *d_dp) { int *jlevL = (int *) malloc(n*sizeof(int)); int *jlevU = (int *) malloc(n*sizeof(int)); int *dp = (int *) malloc(2*n*sizeof(int)); cudaMemcpy(jlevL, d_jlevL, n*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(jlevU, d_jlevU, n*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(dp, d_dp, 2*n*sizeof(int), cudaMemcpyDeviceToHost); int *dpL = dp; int *dpU = dp + n; //for (int i = 0; i < n; i++) { printf("%d ", jlevL[i]); } printf("\n"); //for (int i = 0; i < n; i++) { printf("%d ", jlevU[i]); } printf("\n"); for (int i = 0; i < n; i++) { int jl = jlevL[i]; int ju = jlevU[i]; if (jl < 1 || jl > n) { printf("topo error: jl = %d\n", jl); exit(0); } if (ju < 1 || ju > n) { printf("topo error: ju = %d\n", ju); exit(0); } if (dpL[jl-1] != 0) { printf("topo error: dpL[%d] = %d\n", jl-1, dpL[jl-1]); exit(0); } if (dpU[ju-1] != 0) { printf("topo error: dpU[%d] = %d\n", ju-1, dpU[ju-1]); exit(0); } for (int j = db[jl-1]+1; j < ib[jl]; j++) { int k = jb[j-1]-1; dpL[k]--; } for (int j = ib[ju-1]; j < db[ju-1]; j++) { int k = jb[j-1]-1; dpU[k]--; } } free(jlevL); free(jlevU); free(dp); }
d696df83ae974937fb9e0936ee5d56ee0b3d9289.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_left; int xdim0_update_halo_kernel2_zvel_plus_4_left_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_left; int ydim0_update_halo_kernel2_zvel_plus_4_left_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_left; int xdim1_update_halo_kernel2_zvel_plus_4_left_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_left; int ydim1_update_halo_kernel2_zvel_plus_4_left_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_4_left*(y)+xdim0_update_halo_kernel2_zvel_plus_4_left*ydim0_update_halo_kernel2_zvel_plus_4_left*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_4_left*(y)+xdim1_update_halo_kernel2_zvel_plus_4_left*ydim1_update_halo_kernel2_zvel_plus_4_left*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_4_left(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(4,0,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(4,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_left( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_plus_4_left + idx_z * 1 * xdim0_update_halo_kernel2_zvel_plus_4_left * ydim0_update_halo_kernel2_zvel_plus_4_left; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_plus_4_left + idx_z * 1 * xdim1_update_halo_kernel2_zvel_plus_4_left * ydim1_update_halo_kernel2_zvel_plus_4_left; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_left(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_left(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(81,"update_halo_kernel2_zvel_plus_4_left"); OPS_kernels[81].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_left_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_left_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_left_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_left_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_4_left, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_4_left_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_4_left, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_4_left_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_4_left, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_4_left_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_4_left, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_4_left_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[81].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[81].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[81].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[81].transfer += ops_compute_transfer(dim, range, &arg1); }
d696df83ae974937fb9e0936ee5d56ee0b3d9289.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_left; int xdim0_update_halo_kernel2_zvel_plus_4_left_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_left; int ydim0_update_halo_kernel2_zvel_plus_4_left_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_left; int xdim1_update_halo_kernel2_zvel_plus_4_left_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_left; int ydim1_update_halo_kernel2_zvel_plus_4_left_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_4_left*(y)+xdim0_update_halo_kernel2_zvel_plus_4_left*ydim0_update_halo_kernel2_zvel_plus_4_left*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_4_left*(y)+xdim1_update_halo_kernel2_zvel_plus_4_left*ydim1_update_halo_kernel2_zvel_plus_4_left*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_4_left(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(4,0,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(4,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_left( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_plus_4_left + idx_z * 1 * xdim0_update_halo_kernel2_zvel_plus_4_left * ydim0_update_halo_kernel2_zvel_plus_4_left; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_plus_4_left + idx_z * 1 * xdim1_update_halo_kernel2_zvel_plus_4_left * ydim1_update_halo_kernel2_zvel_plus_4_left; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_left(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_left(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(81,"update_halo_kernel2_zvel_plus_4_left"); OPS_kernels[81].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_left_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_left_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_left_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_left_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_4_left, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_4_left_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_4_left, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_4_left_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_4_left, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_4_left_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_4_left, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_4_left_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[81].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_4_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[81].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[81].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[81].transfer += ops_compute_transfer(dim, range, &arg1); }
066d850f34bebd68f533b4eb5da268c6e7684036.hip
// !!! This is a file automatically generated by hipify!!! /* BIMODAL v1 * David W. Pearson * September 28, 2017 * * This version of the code will implement some improvements to make the model better fit non-linear * features present in the data. The algorithm is effectively that of Gil-Marin 2012/2015. */ #include <iostream> #include <fstream> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_vector_types.h> #include "include/gpuerrchk.h" #include "include/mcmc.h" #include "include/harppi.h" #include "include/make_spline.h" #include "include/pk_slope.h" int main(int argc, char *argv[]) { // Use HARPPI hidden in an object file to parse parameters parameters p(argv[1]); // Generate cubic splines of the input BAO and NW power spectra std::vector<float4> Pk = make_spline(p.gets("input_power")); // Copy the splines to the allocated GPU memory gpuErrchk(hipMemcpyToSymbol(d_Pk, Pk.data(), 128*sizeof(float4))); // Copy Gaussian Quadrature weights and evaluation point to GPU constant memory gpuErrchk(hipMemcpyToSymbol(d_wi, &w_i[0], 32*sizeof(float))); gpuErrchk(hipMemcpyToSymbol(d_xi, &x_i[0], 32*sizeof(float))); // Declare a pointer for the integration workspace and allocate memory on the GPU double *d_Bk; float4 *d_ks; gpuErrchk(hipMalloc((void **)&d_Bk, p.geti("num_data")*sizeof(double))); gpuErrchk(hipMalloc((void **)&d_ks, p.geti("num_data")*sizeof(float4))); std::vector<double> start_params; std::vector<bool> limit_params; std::vector<double> var_i; std::vector<double> min; std::vector<double> max; for (int i = 0; i < p.geti("num_params"); ++i) { start_params.push_back(p.getd("start_params", i)); limit_params.push_back(p.getb("limit_params", i)); var_i.push_back(p.getd("vars", i)); min.push_back(p.getd("min_params", i)); max.push_back(p.getd("max_params", i)); } // Initialize bkmcmc object bkmcmc bk_fit(p.gets("data_file"), p.gets("cov_file"), start_params, var_i, d_ks, d_Bk); // Check that the initialization worked bk_fit.check_init(); // Set any limits on the parameters bk_fit.set_param_limits(limit_params, min, max); // Run the MCMC chain bk_fit.run_chain(p.geti("num_draws"), p.geti("num_burn"), p.gets("reals_file"), d_ks, d_Bk, p.getb("new_chain")); // Free device pointers gpuErrchk(hipFree(d_Bk)); gpuErrchk(hipFree(d_ks)); return 0; }
066d850f34bebd68f533b4eb5da268c6e7684036.cu
/* BIMODAL v1 * David W. Pearson * September 28, 2017 * * This version of the code will implement some improvements to make the model better fit non-linear * features present in the data. The algorithm is effectively that of Gil-Marin 2012/2015. */ #include <iostream> #include <fstream> #include <vector> #include <cuda.h> #include <vector_types.h> #include "include/gpuerrchk.h" #include "include/mcmc.h" #include "include/harppi.h" #include "include/make_spline.h" #include "include/pk_slope.h" int main(int argc, char *argv[]) { // Use HARPPI hidden in an object file to parse parameters parameters p(argv[1]); // Generate cubic splines of the input BAO and NW power spectra std::vector<float4> Pk = make_spline(p.gets("input_power")); // Copy the splines to the allocated GPU memory gpuErrchk(cudaMemcpyToSymbol(d_Pk, Pk.data(), 128*sizeof(float4))); // Copy Gaussian Quadrature weights and evaluation point to GPU constant memory gpuErrchk(cudaMemcpyToSymbol(d_wi, &w_i[0], 32*sizeof(float))); gpuErrchk(cudaMemcpyToSymbol(d_xi, &x_i[0], 32*sizeof(float))); // Declare a pointer for the integration workspace and allocate memory on the GPU double *d_Bk; float4 *d_ks; gpuErrchk(cudaMalloc((void **)&d_Bk, p.geti("num_data")*sizeof(double))); gpuErrchk(cudaMalloc((void **)&d_ks, p.geti("num_data")*sizeof(float4))); std::vector<double> start_params; std::vector<bool> limit_params; std::vector<double> var_i; std::vector<double> min; std::vector<double> max; for (int i = 0; i < p.geti("num_params"); ++i) { start_params.push_back(p.getd("start_params", i)); limit_params.push_back(p.getb("limit_params", i)); var_i.push_back(p.getd("vars", i)); min.push_back(p.getd("min_params", i)); max.push_back(p.getd("max_params", i)); } // Initialize bkmcmc object bkmcmc bk_fit(p.gets("data_file"), p.gets("cov_file"), start_params, var_i, d_ks, d_Bk); // Check that the initialization worked bk_fit.check_init(); // Set any limits on the parameters bk_fit.set_param_limits(limit_params, min, max); // Run the MCMC chain bk_fit.run_chain(p.geti("num_draws"), p.geti("num_burn"), p.gets("reals_file"), d_ks, d_Bk, p.getb("new_chain")); // Free device pointers gpuErrchk(cudaFree(d_Bk)); gpuErrchk(cudaFree(d_ks)); return 0; }
19e1cfdb3106321728cec1d918567f0290872f9e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <cmath> #include <nestedtensor/csrc/cuda/transpose.h> #include <stdio.h> namespace nested_tensor { namespace cuda { template<typename T, int num_threads_sqrt> __global__ void transpose_nchw_nhwc( T* input, T* output, const int* block_offsets, const int* offsets, const int batch_size, const int num_channel) { __shared__ T tile[num_threads_sqrt][num_threads_sqrt + 1]; const int block_id = blockIdx.x; const int tid2 = threadIdx.x / 32; const int tid3 = threadIdx.x % 32; int batch_id = threadIdx.x % 32; bool found = false; while (batch_id < batch_size) { if (block_offsets[batch_id] <= block_id && block_id < block_offsets[batch_id + 1]) { found = true; break; } batch_id += 32; } if (!found) { batch_id = 0; } // TODO: Parameterize on warp size instead of assuming 32. for (int warp_offset = 16; warp_offset > 0; warp_offset /= 2) batch_id = batch_id | __shfl_down_sync(0xFFFFFFFF, batch_id, warp_offset); batch_id = __shfl_sync(0xFFFFFFFF, batch_id, 0, 32); const int grain_size = num_threads_sqrt; const int size2 = num_channel; const int block_offset = block_offsets[batch_id]; const int offset = offsets[batch_id]; const int next_offset = offsets[batch_id + 1]; const int size3 = (next_offset - offset) / num_channel; const int num_chunks_3 = (size3 + grain_size - 1) / grain_size; const int current_block = block_id - block_offset; const int current_block_mod = (current_block % num_chunks_3) * grain_size; const int current_block_div = (current_block / num_chunks_3) * grain_size; const int offset1_tid2 = (current_block_mod) + tid2; const int offset2_tid2 = (current_block_div) + tid2; const int offset1_tid3 = (current_block_mod) + tid3; const int offset2_tid3 = (current_block_div) + tid3; const int ii3 = offset1_tid3; #pragma unroll for (int sub = 0; sub < 4; sub++) { const int ii2 = offset2_tid2 + sub * 8; if (ii2 < size2 && ii3 < size3) { const int ii = ii2 * size3 + ii3; tile[tid2 + sub * 8][tid3] = input[offset + ii]; } } __syncthreads(); const int ii21 = offset2_tid3; #pragma unroll for (int sub = 0; sub < 4; sub++) { const int ii31 = offset1_tid2 + sub * 8; if (ii21 < size2 && ii31 < size3) { const int ii1 = ii21 * size3 + ii31; const int j = (ii1 % size3) * size2; const int i = (ii1 / size3); output[offset + j + i] = tile[tid3][tid2 + sub * 8]; } } } template <typename T> void transpose_nchw_nhwc_kernelLauncher( T* input, // [batch_size x None] T* output, // [batch_size x max(input.nested_size(1)) x inner_size] const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const hipStream_t stream) { dim3 grid; grid.x = block_numel; hipLaunchKernelGGL(( transpose_nchw_nhwc<T, 32>), dim3(grid), dim3(256), 0, stream, input, output, block_offsets, offsets, batch_size, num_channel); } template void transpose_nchw_nhwc_kernelLauncher<c10::Half>( c10::Half* input, c10::Half* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const hipStream_t stream); template void transpose_nchw_nhwc_kernelLauncher<float>( float* input, float* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const hipStream_t stream); template<typename T, int num_threads_sqrt> __global__ void transpose_nhwc_nchw( T* input, T* output, const int* block_offsets, const int* offsets, const int batch_size, const int num_channel, const int num_chunks) { __shared__ T tile[num_threads_sqrt][num_threads_sqrt + 1]; const int block_id = blockIdx.x; const int tid2 = threadIdx.x / 32; const int tid3 = threadIdx.x % 32; int batch_id = threadIdx.x % 32; bool found = false; while (batch_id < batch_size) { if (block_offsets[batch_id] <= block_id && block_id < block_offsets[batch_id + 1]) { found = true; break; } batch_id += 32; } if (!found) { batch_id = 0; } // TODO: Parameterize on warp size instead of assuming 32. for (int warp_offset = 16; warp_offset > 0; warp_offset /= 2) batch_id = batch_id | __shfl_down_sync(0xFFFFFFFF, batch_id, warp_offset); batch_id = __shfl_sync(0xFFFFFFFF, batch_id, 0, 32); const int block_offset = block_offsets[batch_id]; const int offset = offsets[batch_id]; const int next_offset = offsets[batch_id + 1]; const int image_numel = next_offset - offset; const int size2 = image_numel / num_channel; const int current_block = block_id - block_offset; const int current_block_mod = (current_block % num_chunks) * num_threads_sqrt; const int current_block_div = (current_block / num_chunks) * num_threads_sqrt; const int offset1_tid2 = (current_block_mod) + tid2; const int offset2_tid3 = (current_block_div) + tid3; int ii = offset + (current_block / num_chunks) * num_threads_sqrt * num_channel + tid2 * num_channel + (current_block_mod) + tid3; if (ii + 3 * 8 * num_channel < next_offset) { tile[tid2 + 0 * 8][tid3] = input[ii + 0 * 8 * num_channel]; tile[tid2 + 1 * 8][tid3] = input[ii + 1 * 8 * num_channel]; tile[tid2 + 2 * 8][tid3] = input[ii + 2 * 8 * num_channel]; tile[tid2 + 3 * 8][tid3] = input[ii + 3 * 8 * num_channel]; } else { #pragma unroll for (int sub = 0; sub < 4; sub++) { if (ii < next_offset) { tile[tid2 + sub * 8][tid3] = input[ii]; } ii += 8 * num_channel; } } __syncthreads(); int ii21 = offset2_tid3; if (ii21 < size2) { ii21 = ii21 * num_channel; if (offset1_tid2 + 3 * 8 < num_channel) { int ii1 = ii21 + offset1_tid2; #pragma unroll for (int sub = 0; sub < 4; sub++) { const int j = (ii1 % num_channel) * size2; const int i = (ii1 / num_channel); output[offset + j + i] = tile[tid3][tid2 + sub * 8]; ii1 += 8; } } else { #pragma unroll for (int sub = 0; sub < 4; sub++) { const int ii31 = offset1_tid2 + sub * 8; if (ii31 < num_channel) { const int ii1 = ii21 + ii31; const int j = (ii1 % num_channel) * size2; const int i = (ii1 / num_channel); output[offset + j + i] = tile[tid3][tid2 + sub * 8]; } } } } } template <typename T> void transpose_nhwc_nchw_kernelLauncher( T* input, // [batch_size x None] T* output, // [batch_size x max(input.nested_size(1)) x inner_size] const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const hipStream_t stream) { dim3 grid; grid.x = block_numel; const int num_chunks = (num_channel + 32 - 1) / 32; hipLaunchKernelGGL(( transpose_nhwc_nchw<T, 32>), dim3(grid), dim3(256), 0, stream, input, output, block_offsets, offsets, batch_size, num_channel, num_chunks); } template void transpose_nhwc_nchw_kernelLauncher<c10::Half>( c10::Half* input, c10::Half* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const hipStream_t stream); template void transpose_nhwc_nchw_kernelLauncher<float>( float* input, float* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const hipStream_t stream); } } // namespace nested_tensor
19e1cfdb3106321728cec1d918567f0290872f9e.cu
#include <cuda_runtime.h> #include <cuda_fp16.h> #include <cmath> #include <nestedtensor/csrc/cuda/transpose.h> #include <stdio.h> namespace nested_tensor { namespace cuda { template<typename T, int num_threads_sqrt> __global__ void transpose_nchw_nhwc( T* input, T* output, const int* block_offsets, const int* offsets, const int batch_size, const int num_channel) { __shared__ T tile[num_threads_sqrt][num_threads_sqrt + 1]; const int block_id = blockIdx.x; const int tid2 = threadIdx.x / 32; const int tid3 = threadIdx.x % 32; int batch_id = threadIdx.x % 32; bool found = false; while (batch_id < batch_size) { if (block_offsets[batch_id] <= block_id && block_id < block_offsets[batch_id + 1]) { found = true; break; } batch_id += 32; } if (!found) { batch_id = 0; } // TODO: Parameterize on warp size instead of assuming 32. for (int warp_offset = 16; warp_offset > 0; warp_offset /= 2) batch_id = batch_id | __shfl_down_sync(0xFFFFFFFF, batch_id, warp_offset); batch_id = __shfl_sync(0xFFFFFFFF, batch_id, 0, 32); const int grain_size = num_threads_sqrt; const int size2 = num_channel; const int block_offset = block_offsets[batch_id]; const int offset = offsets[batch_id]; const int next_offset = offsets[batch_id + 1]; const int size3 = (next_offset - offset) / num_channel; const int num_chunks_3 = (size3 + grain_size - 1) / grain_size; const int current_block = block_id - block_offset; const int current_block_mod = (current_block % num_chunks_3) * grain_size; const int current_block_div = (current_block / num_chunks_3) * grain_size; const int offset1_tid2 = (current_block_mod) + tid2; const int offset2_tid2 = (current_block_div) + tid2; const int offset1_tid3 = (current_block_mod) + tid3; const int offset2_tid3 = (current_block_div) + tid3; const int ii3 = offset1_tid3; #pragma unroll for (int sub = 0; sub < 4; sub++) { const int ii2 = offset2_tid2 + sub * 8; if (ii2 < size2 && ii3 < size3) { const int ii = ii2 * size3 + ii3; tile[tid2 + sub * 8][tid3] = input[offset + ii]; } } __syncthreads(); const int ii21 = offset2_tid3; #pragma unroll for (int sub = 0; sub < 4; sub++) { const int ii31 = offset1_tid2 + sub * 8; if (ii21 < size2 && ii31 < size3) { const int ii1 = ii21 * size3 + ii31; const int j = (ii1 % size3) * size2; const int i = (ii1 / size3); output[offset + j + i] = tile[tid3][tid2 + sub * 8]; } } } template <typename T> void transpose_nchw_nhwc_kernelLauncher( T* input, // [batch_size x None] T* output, // [batch_size x max(input.nested_size(1)) x inner_size] const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const cudaStream_t stream) { dim3 grid; grid.x = block_numel; transpose_nchw_nhwc<T, 32><<<grid, 256, 0, stream>>>( input, output, block_offsets, offsets, batch_size, num_channel); } template void transpose_nchw_nhwc_kernelLauncher<c10::Half>( c10::Half* input, c10::Half* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const cudaStream_t stream); template void transpose_nchw_nhwc_kernelLauncher<float>( float* input, float* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const cudaStream_t stream); template<typename T, int num_threads_sqrt> __global__ void transpose_nhwc_nchw( T* input, T* output, const int* block_offsets, const int* offsets, const int batch_size, const int num_channel, const int num_chunks) { __shared__ T tile[num_threads_sqrt][num_threads_sqrt + 1]; const int block_id = blockIdx.x; const int tid2 = threadIdx.x / 32; const int tid3 = threadIdx.x % 32; int batch_id = threadIdx.x % 32; bool found = false; while (batch_id < batch_size) { if (block_offsets[batch_id] <= block_id && block_id < block_offsets[batch_id + 1]) { found = true; break; } batch_id += 32; } if (!found) { batch_id = 0; } // TODO: Parameterize on warp size instead of assuming 32. for (int warp_offset = 16; warp_offset > 0; warp_offset /= 2) batch_id = batch_id | __shfl_down_sync(0xFFFFFFFF, batch_id, warp_offset); batch_id = __shfl_sync(0xFFFFFFFF, batch_id, 0, 32); const int block_offset = block_offsets[batch_id]; const int offset = offsets[batch_id]; const int next_offset = offsets[batch_id + 1]; const int image_numel = next_offset - offset; const int size2 = image_numel / num_channel; const int current_block = block_id - block_offset; const int current_block_mod = (current_block % num_chunks) * num_threads_sqrt; const int current_block_div = (current_block / num_chunks) * num_threads_sqrt; const int offset1_tid2 = (current_block_mod) + tid2; const int offset2_tid3 = (current_block_div) + tid3; int ii = offset + (current_block / num_chunks) * num_threads_sqrt * num_channel + tid2 * num_channel + (current_block_mod) + tid3; if (ii + 3 * 8 * num_channel < next_offset) { tile[tid2 + 0 * 8][tid3] = input[ii + 0 * 8 * num_channel]; tile[tid2 + 1 * 8][tid3] = input[ii + 1 * 8 * num_channel]; tile[tid2 + 2 * 8][tid3] = input[ii + 2 * 8 * num_channel]; tile[tid2 + 3 * 8][tid3] = input[ii + 3 * 8 * num_channel]; } else { #pragma unroll for (int sub = 0; sub < 4; sub++) { if (ii < next_offset) { tile[tid2 + sub * 8][tid3] = input[ii]; } ii += 8 * num_channel; } } __syncthreads(); int ii21 = offset2_tid3; if (ii21 < size2) { ii21 = ii21 * num_channel; if (offset1_tid2 + 3 * 8 < num_channel) { int ii1 = ii21 + offset1_tid2; #pragma unroll for (int sub = 0; sub < 4; sub++) { const int j = (ii1 % num_channel) * size2; const int i = (ii1 / num_channel); output[offset + j + i] = tile[tid3][tid2 + sub * 8]; ii1 += 8; } } else { #pragma unroll for (int sub = 0; sub < 4; sub++) { const int ii31 = offset1_tid2 + sub * 8; if (ii31 < num_channel) { const int ii1 = ii21 + ii31; const int j = (ii1 % num_channel) * size2; const int i = (ii1 / num_channel); output[offset + j + i] = tile[tid3][tid2 + sub * 8]; } } } } } template <typename T> void transpose_nhwc_nchw_kernelLauncher( T* input, // [batch_size x None] T* output, // [batch_size x max(input.nested_size(1)) x inner_size] const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const cudaStream_t stream) { dim3 grid; grid.x = block_numel; const int num_chunks = (num_channel + 32 - 1) / 32; transpose_nhwc_nchw<T, 32><<<grid, 256, 0, stream>>>( input, output, block_offsets, offsets, batch_size, num_channel, num_chunks); } template void transpose_nhwc_nchw_kernelLauncher<c10::Half>( c10::Half* input, c10::Half* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const cudaStream_t stream); template void transpose_nhwc_nchw_kernelLauncher<float>( float* input, float* output, const int* block_offsets, const int* offsets, const int batch_size, const int block_numel, const int num_channel, const cudaStream_t stream); } } // namespace nested_tensor
1ec9f242ad766e3a2a2870c577fe77a32628b8eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void barKernel() { printf("bar!\n"); } __declspec(dllexport) void bar(){ hipLaunchKernelGGL(( barKernel), dim3(1),dim3(1), 0, 0, ); std::cout << "bar done!\n"; }
1ec9f242ad766e3a2a2870c577fe77a32628b8eb.cu
#include <iostream> __global__ void barKernel() { printf("bar!\n"); } __declspec(dllexport) void bar(){ barKernel<<<1,1>>>(); std::cout << "bar done!\n"; }
6e90ac2b822c1bd78e91b9879e36d259a3491535.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include "Open3D/ML/ContinuousConv/Detail/ContinuousConvCUDAKernels.h" #include "Open3D/Utility/Helper.h" using open3d::utility::DivUp; namespace open3d { namespace ml { namespace detail { /// Kernel for FillColumn template <class TReal, class TIndex, bool ALIGN_CORNERS, CoordinateMapping MAPPING, InterpolationMode INTERPOLATION> __global__ void FillColumnKernel( TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, const TReal* const __restrict__ inp_importance, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, int filter_size_x, int filter_size_y, int filter_size_z, bool INDIVIDUAL_EXTENT, bool ISOTROPIC_EXTENT, bool NORMALIZE, bool POINT_IMPORTANCE, bool NEIGHBOR_IMPORTANCE) { TIndex out_idx = begin_idx + blockIdx.x; if (out_idx >= end_idx) return; const int NUM_INTERP_VALUES = (INTERPOLATION == InterpolationMode::LINEAR || INTERPOLATION == InterpolationMode::LINEAR_BORDER ? 8 : 1); TReal interp_weights[NUM_INTERP_VALUES]; TIndex interp_indices[NUM_INTERP_VALUES]; TReal offset[3] = {offsets[0], offsets[1], offsets[2]}; const TIndex col_idx = out_idx - begin_idx; TReal* out_column = columns + filter_size_x * filter_size_y * filter_size_z * in_channels * col_idx; const int64_t neighbor_start = neighbors_row_splits[out_idx]; const int64_t neighbor_end = neighbors_row_splits[out_idx + 1]; TReal out_pos[3] = {out_positions[out_idx * 3 + 0], out_positions[out_idx * 3 + 1], out_positions[out_idx * 3 + 2]}; TReal inv_extents[3]; if (INDIVIDUAL_EXTENT) { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[out_idx]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[3 * out_idx + 0]; inv_extents[1] = TReal(1) / extents[3 * out_idx + 1]; inv_extents[2] = TReal(1) / extents[3 * out_idx + 2]; } } else { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = TReal(1) / extents[1]; inv_extents[2] = TReal(1) / extents[2]; } } TReal normalizer = TReal(0); if (NORMALIZE) { if (NEIGHBOR_IMPORTANCE) { for (int64_t n_idx = neighbor_start + threadIdx.x; n_idx < neighbor_end; n_idx += blockDim.x) { TReal n_importance = neighbors_importance[n_idx]; normalizer += n_importance; } unsigned int mask = __activemask(); for (int offset = blockDim.x / 2; offset > 0; offset /= 2) normalizer += __shfl_down_sync(mask, normalizer, offset); normalizer = __shfl_sync(mask, normalizer, 0); } else { int64_t num_neighbors = neighbor_end - neighbor_start; normalizer = num_neighbors; } } for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) { const TIndex inp_idx = neighbors_index[n_idx]; const TReal n_importance = NEIGHBOR_IMPORTANCE ? neighbors_importance[n_idx] : TReal(1); TReal x, y, z; x = inp_positions[inp_idx * 3 + 0] - out_pos[0]; y = inp_positions[inp_idx * 3 + 1] - out_pos[1]; z = inp_positions[inp_idx * 3 + 2] - out_pos[2]; ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>( x, y, z, filter_size_x, filter_size_y, filter_size_z, inv_extents[0], inv_extents[1], inv_extents[2], offset[0], offset[1], offset[2]); Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z, filter_size_x, filter_size_y, filter_size_z); TReal infeat = 0; TReal importance = 1; if (POINT_IMPORTANCE) importance = inp_importance[inp_idx]; if (NEIGHBOR_IMPORTANCE) importance *= n_importance; if (NORMALIZE && normalizer != 0) importance /= normalizer; for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) { infeat = importance * inp_features[inp_idx * in_channels + ic]; for (int j = 0; j < NUM_INTERP_VALUES; ++j) { TReal value = interp_weights[j] * infeat; out_column[interp_indices[j] * in_channels + ic] += value; } } } // for n } template <class TReal, class TIndex> void FillColumn(const hipStream_t& stream, TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, const TReal* const __restrict__ inp_importance, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize) { const int filter_size_z = filter_dims[0]; const int filter_size_y = filter_dims[1]; const int filter_size_x = filter_dims[2]; TIndex num_columns = end_idx - begin_idx; int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z; hipMemsetAsync( columns, 0, sizeof(TReal) * filter_spatial_size * in_channels * num_columns, stream); const int BLOCKSIZE = 32; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = num_columns; #define FN_PARAMETERS \ columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \ inp_positions, inp_features, inp_importance, neighbors_index_size, \ neighbors_index, neighbors_importance, neighbors_row_splits, \ extents, offsets, filter_size_x, filter_size_y, filter_size_z, \ individual_extent, isotropic_extent, normalize, \ inp_importance != nullptr, neighbors_importance != nullptr #define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \ if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \ ALIGN_CORNERS == align_corners) \ hipLaunchKernelGGL(( FillColumnKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, INTERPOLATION>) \ , dim3(grid), dim3(block), 0, stream, FN_PARAMETERS); #define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, false) #define CALL_TEMPLATE3(INTERPOLATION) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \ CALL_TEMPLATE2(INTERPOLATION, \ CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY) #define CALL_TEMPLATE4 \ CALL_TEMPLATE3(InterpolationMode::LINEAR) \ CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \ CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR) if (grid.x) { CALL_TEMPLATE4 /*CHECK_CUDA_ERROR*/ } #undef CALL_TEMPLATE #undef CALL_TEMPLATE2 #undef CALL_TEMPLATE3 #undef CALL_TEMPLATE4 #undef FN_PARAMETERS } template void FillColumn<float, int32_t>( const hipStream_t& stream, float* columns, int in_channels, int32_t begin_idx, int32_t end_idx, int32_t num_out, const float* const __restrict__ out_positions, int32_t num_inp, const float* const __restrict__ inp_positions, const float* const __restrict__ inp_features, const float* const __restrict__ inp_importance, size_t neighbors_index_size, const int32_t* const __restrict__ neighbors_index, const float* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const float* const __restrict__ extents, const float* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize); template <class TReal, class TIndex, bool ALIGN_CORNERS, CoordinateMapping MAPPING, InterpolationMode INTERPOLATION> __global__ void FillColumnTransposeKernel( TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ inp_neighbors_importance_sum, const int64_t* const __restrict__ inp_neighbors_prefix_sum, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, int filter_size_x, int filter_size_y, int filter_size_z, bool INDIVIDUAL_EXTENT, bool ISOTROPIC_EXTENT, bool NORMALIZE, bool NEIGHBOR_IMPORTANCE) { TIndex out_idx = begin_idx + blockIdx.x; if (out_idx >= end_idx) return; const int NUM_INTERP_VALUES = (INTERPOLATION == InterpolationMode::LINEAR || INTERPOLATION == InterpolationMode::LINEAR_BORDER ? 8 : 1); TReal interp_weights[NUM_INTERP_VALUES]; TIndex interp_indices[NUM_INTERP_VALUES]; TReal offset[3] = {offsets[0], offsets[1], offsets[2]}; const TIndex col_idx = out_idx - begin_idx; TReal* out_column = columns + filter_size_x * filter_size_y * filter_size_z * in_channels * col_idx; const int64_t neighbor_start = neighbors_row_splits[out_idx]; const int64_t neighbor_end = neighbors_row_splits[out_idx + 1]; TReal out_pos[3] = {out_positions[out_idx * 3 + 0], out_positions[out_idx * 3 + 1], out_positions[out_idx * 3 + 2]}; TReal inv_extents[3]; if (INDIVIDUAL_EXTENT == false) { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = TReal(1) / extents[1]; inv_extents[2] = TReal(1) / extents[2]; } } for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) { const TIndex inp_idx = neighbors_index[n_idx]; TReal x, y, z; x = out_pos[0] - inp_positions[inp_idx * 3 + 0]; y = out_pos[1] - inp_positions[inp_idx * 3 + 1]; z = out_pos[2] - inp_positions[inp_idx * 3 + 2]; if (INDIVIDUAL_EXTENT) { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[inp_idx]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[3 * inp_idx + 0]; inv_extents[1] = TReal(1) / extents[3 * inp_idx + 1]; inv_extents[2] = TReal(1) / extents[3 * inp_idx + 2]; } } TReal num_inp_neighbors_normalizer = 1; if (NORMALIZE) { if (NEIGHBOR_IMPORTANCE) { if (inp_neighbors_importance_sum[inp_idx] != 0) num_inp_neighbors_normalizer /= inp_neighbors_importance_sum[inp_idx]; } else { const int64_t inp_neighbor_start = inp_neighbors_prefix_sum[inp_idx]; const int64_t inp_neighbor_end = inp_idx + 1 < num_inp ? inp_neighbors_prefix_sum[inp_idx + 1] : neighbors_index_size; const size_t num_inp_neighbors = inp_neighbor_end - inp_neighbor_start; if (num_inp_neighbors > 0) num_inp_neighbors_normalizer /= num_inp_neighbors; } } ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>( x, y, z, filter_size_x, filter_size_y, filter_size_z, inv_extents[0], inv_extents[1], inv_extents[2], offset[0], offset[1], offset[2]); Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z, filter_size_x, filter_size_y, filter_size_z); TReal infeat = 0; for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) { infeat = inp_features[inp_idx * in_channels + ic]; if (NEIGHBOR_IMPORTANCE) infeat *= neighbors_importance[n_idx]; if (NORMALIZE) infeat *= num_inp_neighbors_normalizer; for (int j = 0; j < NUM_INTERP_VALUES; ++j) { TReal value = interp_weights[j] * infeat; out_column[interp_indices[j] * in_channels + ic] += value; } } } // for n } template <class TReal, class TIndex> void FillColumnTranspose( const hipStream_t& stream, TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, const TReal* const __restrict__ inp_neighbors_importance_sum, const int64_t* const __restrict__ inp_neighbors_prefix_sum, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize) { const bool has_neighbors_importance = inp_neighbors_importance_sum; const int filter_size_z = filter_dims[0]; const int filter_size_y = filter_dims[1]; const int filter_size_x = filter_dims[2]; TIndex num_columns = end_idx - begin_idx; int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z; hipMemsetAsync( columns, 0, sizeof(TReal) * filter_spatial_size * in_channels * num_columns, stream); const int BLOCKSIZE = 32; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = num_columns; #define FN_PARAMETERS \ columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \ inp_positions, inp_features, neighbors_index_size, \ neighbors_index, inp_neighbors_importance_sum, \ inp_neighbors_prefix_sum, neighbors_importance, \ neighbors_row_splits, extents, offsets, filter_size_x, \ filter_size_y, filter_size_z, individual_extent, isotropic_extent, \ normalize, has_neighbors_importance #define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \ if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \ ALIGN_CORNERS == align_corners) \ hipLaunchKernelGGL(( FillColumnTransposeKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, \ INTERPOLATION>) \ , dim3(grid), dim3(block), 0, stream, FN_PARAMETERS); #define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, false) #define CALL_TEMPLATE3(INTERPOLATION) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \ CALL_TEMPLATE2(INTERPOLATION, \ CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY) #define CALL_TEMPLATE4 \ CALL_TEMPLATE3(InterpolationMode::LINEAR) \ CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \ CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR) if (grid.x) { CALL_TEMPLATE4 /*CHECK_CUDA_ERROR*/ } #undef CALL_TEMPLATE #undef CALL_TEMPLATE2 #undef CALL_TEMPLATE3 #undef CALL_TEMPLATE4 #undef FN_PARAMETERS } template void FillColumnTranspose<float, int32_t>( const hipStream_t& stream, float* columns, int in_channels, int32_t begin_idx, int32_t end_idx, int32_t num_out, const float* const __restrict__ out_positions, int32_t num_inp, const float* const __restrict__ inp_positions, const float* const __restrict__ inp_features, const float* const __restrict__ inp_neighbors_importance_sum, const int64_t* const __restrict__ inp_neighbors_prefix_sum, size_t neighbors_index_size, const int32_t* const __restrict__ neighbors_index, const float* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const float* const __restrict__ extents, const float* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize); template <class T> __global__ void MultiplyColumnsKernel(size_t rows, size_t cols, T* __restrict__ col_major_matrix, const T* const __restrict__ vector) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= rows * cols) return; size_t col = idx / rows; T factor = vector[col]; col_major_matrix[idx] *= factor; } template <class T> void MultiplyColumns(const hipStream_t& stream, size_t rows, size_t cols, T* __restrict__ col_major_matrix, const T* const __restrict__ vector) { const int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = DivUp(rows * cols, BLOCKSIZE); if (grid.x) { hipLaunchKernelGGL(( MultiplyColumnsKernel<T>), dim3(grid), dim3(block), 0, stream, rows, cols, col_major_matrix, vector); } } template void MultiplyColumns<float>(const hipStream_t& stream, size_t rows, size_t cols, float* __restrict__ col_major_matrix, const float* const __restrict__ vector); template <class T> __global__ void MultiplyAndCopyColumnsKernel( size_t rows, size_t cols, T* __restrict__ out_ptr, const T* const __restrict__ col_major_matrix, const T* const __restrict__ vector) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= rows * cols) return; size_t col = idx / rows; T factor = vector[col]; out_ptr[idx] = col_major_matrix[idx] * factor; } template <class T> void MultiplyAndCopyColumns(const hipStream_t& stream, size_t rows, size_t cols, T* __restrict__ out_ptr, const T* const __restrict__ col_major_matrix, const T* const __restrict__ vector) { const int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = DivUp(rows * cols, BLOCKSIZE); if (grid.x) { hipLaunchKernelGGL(( MultiplyAndCopyColumnsKernel<T>), dim3(grid), dim3(block), 0, stream, rows, cols, out_ptr, col_major_matrix, vector); } } template void MultiplyAndCopyColumns<float>( const hipStream_t& stream, size_t rows, size_t cols, float* __restrict__ out_ptr, const float* const __restrict__ col_major_matrix, const float* const __restrict__ vector); } // namespace detail } // namespace ml } // namespace open3d
6e90ac2b822c1bd78e91b9879e36d259a3491535.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include "Open3D/ML/ContinuousConv/Detail/ContinuousConvCUDAKernels.h" #include "Open3D/Utility/Helper.h" using open3d::utility::DivUp; namespace open3d { namespace ml { namespace detail { /// Kernel for FillColumn template <class TReal, class TIndex, bool ALIGN_CORNERS, CoordinateMapping MAPPING, InterpolationMode INTERPOLATION> __global__ void FillColumnKernel( TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, const TReal* const __restrict__ inp_importance, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, int filter_size_x, int filter_size_y, int filter_size_z, bool INDIVIDUAL_EXTENT, bool ISOTROPIC_EXTENT, bool NORMALIZE, bool POINT_IMPORTANCE, bool NEIGHBOR_IMPORTANCE) { TIndex out_idx = begin_idx + blockIdx.x; if (out_idx >= end_idx) return; const int NUM_INTERP_VALUES = (INTERPOLATION == InterpolationMode::LINEAR || INTERPOLATION == InterpolationMode::LINEAR_BORDER ? 8 : 1); TReal interp_weights[NUM_INTERP_VALUES]; TIndex interp_indices[NUM_INTERP_VALUES]; TReal offset[3] = {offsets[0], offsets[1], offsets[2]}; const TIndex col_idx = out_idx - begin_idx; TReal* out_column = columns + filter_size_x * filter_size_y * filter_size_z * in_channels * col_idx; const int64_t neighbor_start = neighbors_row_splits[out_idx]; const int64_t neighbor_end = neighbors_row_splits[out_idx + 1]; TReal out_pos[3] = {out_positions[out_idx * 3 + 0], out_positions[out_idx * 3 + 1], out_positions[out_idx * 3 + 2]}; TReal inv_extents[3]; if (INDIVIDUAL_EXTENT) { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[out_idx]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[3 * out_idx + 0]; inv_extents[1] = TReal(1) / extents[3 * out_idx + 1]; inv_extents[2] = TReal(1) / extents[3 * out_idx + 2]; } } else { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = TReal(1) / extents[1]; inv_extents[2] = TReal(1) / extents[2]; } } TReal normalizer = TReal(0); if (NORMALIZE) { if (NEIGHBOR_IMPORTANCE) { for (int64_t n_idx = neighbor_start + threadIdx.x; n_idx < neighbor_end; n_idx += blockDim.x) { TReal n_importance = neighbors_importance[n_idx]; normalizer += n_importance; } unsigned int mask = __activemask(); for (int offset = blockDim.x / 2; offset > 0; offset /= 2) normalizer += __shfl_down_sync(mask, normalizer, offset); normalizer = __shfl_sync(mask, normalizer, 0); } else { int64_t num_neighbors = neighbor_end - neighbor_start; normalizer = num_neighbors; } } for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) { const TIndex inp_idx = neighbors_index[n_idx]; const TReal n_importance = NEIGHBOR_IMPORTANCE ? neighbors_importance[n_idx] : TReal(1); TReal x, y, z; x = inp_positions[inp_idx * 3 + 0] - out_pos[0]; y = inp_positions[inp_idx * 3 + 1] - out_pos[1]; z = inp_positions[inp_idx * 3 + 2] - out_pos[2]; ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>( x, y, z, filter_size_x, filter_size_y, filter_size_z, inv_extents[0], inv_extents[1], inv_extents[2], offset[0], offset[1], offset[2]); Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z, filter_size_x, filter_size_y, filter_size_z); TReal infeat = 0; TReal importance = 1; if (POINT_IMPORTANCE) importance = inp_importance[inp_idx]; if (NEIGHBOR_IMPORTANCE) importance *= n_importance; if (NORMALIZE && normalizer != 0) importance /= normalizer; for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) { infeat = importance * inp_features[inp_idx * in_channels + ic]; for (int j = 0; j < NUM_INTERP_VALUES; ++j) { TReal value = interp_weights[j] * infeat; out_column[interp_indices[j] * in_channels + ic] += value; } } } // for n } template <class TReal, class TIndex> void FillColumn(const cudaStream_t& stream, TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, const TReal* const __restrict__ inp_importance, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize) { const int filter_size_z = filter_dims[0]; const int filter_size_y = filter_dims[1]; const int filter_size_x = filter_dims[2]; TIndex num_columns = end_idx - begin_idx; int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z; cudaMemsetAsync( columns, 0, sizeof(TReal) * filter_spatial_size * in_channels * num_columns, stream); const int BLOCKSIZE = 32; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = num_columns; #define FN_PARAMETERS \ columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \ inp_positions, inp_features, inp_importance, neighbors_index_size, \ neighbors_index, neighbors_importance, neighbors_row_splits, \ extents, offsets, filter_size_x, filter_size_y, filter_size_z, \ individual_extent, isotropic_extent, normalize, \ inp_importance != nullptr, neighbors_importance != nullptr #define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \ if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \ ALIGN_CORNERS == align_corners) \ FillColumnKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, INTERPOLATION> \ <<<grid, block, 0, stream>>>(FN_PARAMETERS); #define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, false) #define CALL_TEMPLATE3(INTERPOLATION) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \ CALL_TEMPLATE2(INTERPOLATION, \ CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY) #define CALL_TEMPLATE4 \ CALL_TEMPLATE3(InterpolationMode::LINEAR) \ CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \ CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR) if (grid.x) { CALL_TEMPLATE4 /*CHECK_CUDA_ERROR*/ } #undef CALL_TEMPLATE #undef CALL_TEMPLATE2 #undef CALL_TEMPLATE3 #undef CALL_TEMPLATE4 #undef FN_PARAMETERS } template void FillColumn<float, int32_t>( const cudaStream_t& stream, float* columns, int in_channels, int32_t begin_idx, int32_t end_idx, int32_t num_out, const float* const __restrict__ out_positions, int32_t num_inp, const float* const __restrict__ inp_positions, const float* const __restrict__ inp_features, const float* const __restrict__ inp_importance, size_t neighbors_index_size, const int32_t* const __restrict__ neighbors_index, const float* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const float* const __restrict__ extents, const float* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize); template <class TReal, class TIndex, bool ALIGN_CORNERS, CoordinateMapping MAPPING, InterpolationMode INTERPOLATION> __global__ void FillColumnTransposeKernel( TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ inp_neighbors_importance_sum, const int64_t* const __restrict__ inp_neighbors_prefix_sum, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, int filter_size_x, int filter_size_y, int filter_size_z, bool INDIVIDUAL_EXTENT, bool ISOTROPIC_EXTENT, bool NORMALIZE, bool NEIGHBOR_IMPORTANCE) { TIndex out_idx = begin_idx + blockIdx.x; if (out_idx >= end_idx) return; const int NUM_INTERP_VALUES = (INTERPOLATION == InterpolationMode::LINEAR || INTERPOLATION == InterpolationMode::LINEAR_BORDER ? 8 : 1); TReal interp_weights[NUM_INTERP_VALUES]; TIndex interp_indices[NUM_INTERP_VALUES]; TReal offset[3] = {offsets[0], offsets[1], offsets[2]}; const TIndex col_idx = out_idx - begin_idx; TReal* out_column = columns + filter_size_x * filter_size_y * filter_size_z * in_channels * col_idx; const int64_t neighbor_start = neighbors_row_splits[out_idx]; const int64_t neighbor_end = neighbors_row_splits[out_idx + 1]; TReal out_pos[3] = {out_positions[out_idx * 3 + 0], out_positions[out_idx * 3 + 1], out_positions[out_idx * 3 + 2]}; TReal inv_extents[3]; if (INDIVIDUAL_EXTENT == false) { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[0]; inv_extents[1] = TReal(1) / extents[1]; inv_extents[2] = TReal(1) / extents[2]; } } for (int64_t n_idx = neighbor_start; n_idx < neighbor_end; ++n_idx) { const TIndex inp_idx = neighbors_index[n_idx]; TReal x, y, z; x = out_pos[0] - inp_positions[inp_idx * 3 + 0]; y = out_pos[1] - inp_positions[inp_idx * 3 + 1]; z = out_pos[2] - inp_positions[inp_idx * 3 + 2]; if (INDIVIDUAL_EXTENT) { if (ISOTROPIC_EXTENT) { inv_extents[0] = TReal(1) / extents[inp_idx]; inv_extents[1] = inv_extents[0]; inv_extents[2] = inv_extents[0]; } else { inv_extents[0] = TReal(1) / extents[3 * inp_idx + 0]; inv_extents[1] = TReal(1) / extents[3 * inp_idx + 1]; inv_extents[2] = TReal(1) / extents[3 * inp_idx + 2]; } } TReal num_inp_neighbors_normalizer = 1; if (NORMALIZE) { if (NEIGHBOR_IMPORTANCE) { if (inp_neighbors_importance_sum[inp_idx] != 0) num_inp_neighbors_normalizer /= inp_neighbors_importance_sum[inp_idx]; } else { const int64_t inp_neighbor_start = inp_neighbors_prefix_sum[inp_idx]; const int64_t inp_neighbor_end = inp_idx + 1 < num_inp ? inp_neighbors_prefix_sum[inp_idx + 1] : neighbors_index_size; const size_t num_inp_neighbors = inp_neighbor_end - inp_neighbor_start; if (num_inp_neighbors > 0) num_inp_neighbors_normalizer /= num_inp_neighbors; } } ComputeFilterCoordinates<ALIGN_CORNERS, MAPPING>( x, y, z, filter_size_x, filter_size_y, filter_size_z, inv_extents[0], inv_extents[1], inv_extents[2], offset[0], offset[1], offset[2]); Interpolate<INTERPOLATION>(interp_weights, interp_indices, x, y, z, filter_size_x, filter_size_y, filter_size_z); TReal infeat = 0; for (int ic = threadIdx.x; ic < in_channels; ic += blockDim.x) { infeat = inp_features[inp_idx * in_channels + ic]; if (NEIGHBOR_IMPORTANCE) infeat *= neighbors_importance[n_idx]; if (NORMALIZE) infeat *= num_inp_neighbors_normalizer; for (int j = 0; j < NUM_INTERP_VALUES; ++j) { TReal value = interp_weights[j] * infeat; out_column[interp_indices[j] * in_channels + ic] += value; } } } // for n } template <class TReal, class TIndex> void FillColumnTranspose( const cudaStream_t& stream, TReal* columns, int in_channels, TIndex begin_idx, TIndex end_idx, TIndex num_out, const TReal* const __restrict__ out_positions, TIndex num_inp, const TReal* const __restrict__ inp_positions, const TReal* const __restrict__ inp_features, const TReal* const __restrict__ inp_neighbors_importance_sum, const int64_t* const __restrict__ inp_neighbors_prefix_sum, size_t neighbors_index_size, const TIndex* const __restrict__ neighbors_index, const TReal* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const TReal* const __restrict__ extents, const TReal* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize) { const bool has_neighbors_importance = inp_neighbors_importance_sum; const int filter_size_z = filter_dims[0]; const int filter_size_y = filter_dims[1]; const int filter_size_x = filter_dims[2]; TIndex num_columns = end_idx - begin_idx; int filter_spatial_size = filter_size_x * filter_size_y * filter_size_z; cudaMemsetAsync( columns, 0, sizeof(TReal) * filter_spatial_size * in_channels * num_columns, stream); const int BLOCKSIZE = 32; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = num_columns; #define FN_PARAMETERS \ columns, in_channels, begin_idx, end_idx, num_out, out_positions, num_inp, \ inp_positions, inp_features, neighbors_index_size, \ neighbors_index, inp_neighbors_importance_sum, \ inp_neighbors_prefix_sum, neighbors_importance, \ neighbors_row_splits, extents, offsets, filter_size_x, \ filter_size_y, filter_size_z, individual_extent, isotropic_extent, \ normalize, has_neighbors_importance #define CALL_TEMPLATE(INTERPOLATION, MAPPING, ALIGN_CORNERS) \ if (INTERPOLATION == interpolation && MAPPING == coordinate_mapping && \ ALIGN_CORNERS == align_corners) \ FillColumnTransposeKernel<TReal, TIndex, ALIGN_CORNERS, MAPPING, \ INTERPOLATION> \ <<<grid, block, 0, stream>>>(FN_PARAMETERS); #define CALL_TEMPLATE2(INTERPOLATION, MAPPING) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, true) \ CALL_TEMPLATE(INTERPOLATION, MAPPING, false) #define CALL_TEMPLATE3(INTERPOLATION) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::BALL_TO_CUBE_RADIAL) \ CALL_TEMPLATE2(INTERPOLATION, \ CoordinateMapping::BALL_TO_CUBE_VOLUME_PRESERVING) \ CALL_TEMPLATE2(INTERPOLATION, CoordinateMapping::IDENTITY) #define CALL_TEMPLATE4 \ CALL_TEMPLATE3(InterpolationMode::LINEAR) \ CALL_TEMPLATE3(InterpolationMode::LINEAR_BORDER) \ CALL_TEMPLATE3(InterpolationMode::NEAREST_NEIGHBOR) if (grid.x) { CALL_TEMPLATE4 /*CHECK_CUDA_ERROR*/ } #undef CALL_TEMPLATE #undef CALL_TEMPLATE2 #undef CALL_TEMPLATE3 #undef CALL_TEMPLATE4 #undef FN_PARAMETERS } template void FillColumnTranspose<float, int32_t>( const cudaStream_t& stream, float* columns, int in_channels, int32_t begin_idx, int32_t end_idx, int32_t num_out, const float* const __restrict__ out_positions, int32_t num_inp, const float* const __restrict__ inp_positions, const float* const __restrict__ inp_features, const float* const __restrict__ inp_neighbors_importance_sum, const int64_t* const __restrict__ inp_neighbors_prefix_sum, size_t neighbors_index_size, const int32_t* const __restrict__ neighbors_index, const float* const __restrict__ neighbors_importance, const int64_t* const __restrict__ neighbors_row_splits, const float* const __restrict__ extents, const float* const __restrict__ offsets, const std::vector<int>& filter_dims, InterpolationMode interpolation, CoordinateMapping coordinate_mapping, bool align_corners, bool individual_extent, bool isotropic_extent, bool normalize); template <class T> __global__ void MultiplyColumnsKernel(size_t rows, size_t cols, T* __restrict__ col_major_matrix, const T* const __restrict__ vector) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= rows * cols) return; size_t col = idx / rows; T factor = vector[col]; col_major_matrix[idx] *= factor; } template <class T> void MultiplyColumns(const cudaStream_t& stream, size_t rows, size_t cols, T* __restrict__ col_major_matrix, const T* const __restrict__ vector) { const int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = DivUp(rows * cols, BLOCKSIZE); if (grid.x) { MultiplyColumnsKernel<T><<<grid, block, 0, stream>>>( rows, cols, col_major_matrix, vector); } } template void MultiplyColumns<float>(const cudaStream_t& stream, size_t rows, size_t cols, float* __restrict__ col_major_matrix, const float* const __restrict__ vector); template <class T> __global__ void MultiplyAndCopyColumnsKernel( size_t rows, size_t cols, T* __restrict__ out_ptr, const T* const __restrict__ col_major_matrix, const T* const __restrict__ vector) { size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= rows * cols) return; size_t col = idx / rows; T factor = vector[col]; out_ptr[idx] = col_major_matrix[idx] * factor; } template <class T> void MultiplyAndCopyColumns(const cudaStream_t& stream, size_t rows, size_t cols, T* __restrict__ out_ptr, const T* const __restrict__ col_major_matrix, const T* const __restrict__ vector) { const int BLOCKSIZE = 128; dim3 block(BLOCKSIZE, 1, 1); dim3 grid(0, 1, 1); grid.x = DivUp(rows * cols, BLOCKSIZE); if (grid.x) { MultiplyAndCopyColumnsKernel<T><<<grid, block, 0, stream>>>( rows, cols, out_ptr, col_major_matrix, vector); } } template void MultiplyAndCopyColumns<float>( const cudaStream_t& stream, size_t rows, size_t cols, float* __restrict__ out_ptr, const float* const __restrict__ col_major_matrix, const float* const __restrict__ vector); } // namespace detail } // namespace ml } // namespace open3d
ab4140d7057f09d63adafc97716b4d974e5e3e01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////// // // Copyright 2014 PMC-Sierra, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 Unless required by // applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for // the specific language governing permissions and limitations under the // License. // //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // // Author: Logan Gunthorpe // // Date: Oct 23 2014 // // Description: // Image CUDA Routines // //////////////////////////////////////////////////////////////////////// #include "image_cuda.h" #include "error.h" #include <assert.h> __global__ void rot180(image_px *src, size_t w, size_t h, size_t bufwidth) { extern __shared__ image_px block1[]; image_px *block2 = &block1[blockDim.x]; int tid = threadIdx.x; int x1 = tid + blockIdx.x * blockDim.x; int y1 = blockIdx.y * blockDim.y; int x2 = w - x1; int y2 = h - y1; block1[tid] = src[y1*bufwidth + x1]; block2[tid] = src[y2*bufwidth + x2]; __syncthreads(); if (x2 < 0 || y2 < 0) return; if (y1 == y2 && x1 > w /2) return; src[y1*bufwidth + x1] = block2[tid]; src[y2*bufwidth + x2] = block1[tid]; } hipError_t image_rot180_cuda(struct image *img) { size_t threads_per_block = 1024; while (threads_per_block > img->width) threads_per_block -= 32; dim3 block_size, grid_size; block_size.x = threads_per_block; grid_size.x = (img->width + threads_per_block - 1) / threads_per_block; grid_size.y = (img->height+1)/2; size_t shared_mem = threads_per_block * 2 * sizeof(image_px); hipLaunchKernelGGL(( rot180), dim3(grid_size), dim3(block_size), shared_mem, image_stream(img), img->buf, img->width-1, img->height-1, img->width); return hipPeekAtLastError(); }
ab4140d7057f09d63adafc97716b4d974e5e3e01.cu
//////////////////////////////////////////////////////////////////////// // // Copyright 2014 PMC-Sierra, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 Unless required by // applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for // the specific language governing permissions and limitations under the // License. // //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // // Author: Logan Gunthorpe // // Date: Oct 23 2014 // // Description: // Image CUDA Routines // //////////////////////////////////////////////////////////////////////// #include "image_cuda.h" #include "error.h" #include <assert.h> __global__ void rot180(image_px *src, size_t w, size_t h, size_t bufwidth) { extern __shared__ image_px block1[]; image_px *block2 = &block1[blockDim.x]; int tid = threadIdx.x; int x1 = tid + blockIdx.x * blockDim.x; int y1 = blockIdx.y * blockDim.y; int x2 = w - x1; int y2 = h - y1; block1[tid] = src[y1*bufwidth + x1]; block2[tid] = src[y2*bufwidth + x2]; __syncthreads(); if (x2 < 0 || y2 < 0) return; if (y1 == y2 && x1 > w /2) return; src[y1*bufwidth + x1] = block2[tid]; src[y2*bufwidth + x2] = block1[tid]; } cudaError_t image_rot180_cuda(struct image *img) { size_t threads_per_block = 1024; while (threads_per_block > img->width) threads_per_block -= 32; dim3 block_size, grid_size; block_size.x = threads_per_block; grid_size.x = (img->width + threads_per_block - 1) / threads_per_block; grid_size.y = (img->height+1)/2; size_t shared_mem = threads_per_block * 2 * sizeof(image_px); rot180<<<grid_size, block_size, shared_mem, image_stream(img)>>> (img->buf, img->width-1, img->height-1, img->width); return cudaPeekAtLastError(); }
a1628a6065fc3a434382af00839c4a0a8d5f2461.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "hip/hip_runtime.h" #include "functions.c" //compute a*b mod p safely __device__ unsigned int modprodC(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } // end while return ab; } //end modprodC function //compute a^b mod p safely __device__ unsigned int modExpC(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprodC(aExpb, z, p); z = modprodC(z, z, p); b /= 2; } // end while return aExpb; } // end modprodC function __global__ void findSecretKey(unsigned int g, unsigned int h, unsigned int p, unsigned int *d_a) { int threadId = threadIdx.x; int blockId = blockIdx.x; int Nblock = blockDim.x; int id = threadId+blockId*Nblock; if (id < (p-1)) { if(modExpC(g, id, p) == h) { *d_a = id; } // end inner if } // end outer if } // end findKey int main (int argc, char **argv) { /* Part 2. Start this program by first copying the contents of the main function from your completed decrypt.c main function. */ //declare storage for an ElGamal cryptosytem unsigned int n, p, g, h, x; unsigned int Nints; //get the secret key from the user printf("Enter the secret key (0 if unknown): "); fflush(stdout); char stat = scanf("%u",&x); printf("Reading file.\n"); /* Q3 Complete this function. Read in the public key data from public_key.txt and the cyphertexts from messages.txt. */ // for bonus just change the quotes to "bonus_public_key.txt", pretty self explanatory FILE* pk = fopen("public_key.txt","r"); // same for this except change quotes to "bonus_message.txt" FILE* msg = fopen("messages.txt", "r"); fscanf(pk, "%u\n%u\n%u\n%u\n", &n, &p, &g, &h); fclose(pk); fscanf(msg, "%u\n", &Nints); unsigned int *mhat = (unsigned int *) malloc(Nints*sizeof(unsigned int)); unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); for (int i = 0; i < Nints; i++) { fscanf(msg, "%u %u\n", &mhat[i], &a[i]); } // end for fclose(msg); // } // end main /* Q3 After finding the secret key, decrypt the message */ unsigned int found; unsigned int *d_a; hipMalloc(&d_a, sizeof(unsigned int)); if (x == 0 || modExp(g, x, p) != h) { printf("Finding the secret key ... \n"); } // end if double startTime = clock(); unsigned int Nthreads = 32; unsigned int Nblocks = (p-1)/Nthreads; hipLaunchKernelGGL(( findSecretKey) , dim3(Nblocks), dim3(Nthreads), 0, 0, g, h, p, d_a); hipDeviceSynchronize(); hipMemcpy(&found, d_a, sizeof(unsigned int), hipMemcpyDeviceToHost); double endTime = clock(); double totalTime = (endTime-startTime)/CLOCKS_PER_SEC; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); printf("Secret key found! x = %u \n", found); int Nchars = ((n-1)/8)*Nints; unsigned char *decrypted = (unsigned char *) malloc(1024*sizeof(unsigned char)); ElGamalDecrypt(mhat, a, Nints, p, found); convertZToString(mhat, Nints, decrypted, Nchars); printf("Decrypted message: %s\n", decrypted); hipFree(d_a); /* Q4 Make the search for the secret key parallel on the GPU using CUDA. */ return 0; }
a1628a6065fc3a434382af00839c4a0a8d5f2461.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "cuda.h" #include "functions.c" //compute a*b mod p safely __device__ unsigned int modprodC(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } // end while return ab; } //end modprodC function //compute a^b mod p safely __device__ unsigned int modExpC(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprodC(aExpb, z, p); z = modprodC(z, z, p); b /= 2; } // end while return aExpb; } // end modprodC function __global__ void findSecretKey(unsigned int g, unsigned int h, unsigned int p, unsigned int *d_a) { int threadId = threadIdx.x; int blockId = blockIdx.x; int Nblock = blockDim.x; int id = threadId+blockId*Nblock; if (id < (p-1)) { if(modExpC(g, id, p) == h) { *d_a = id; } // end inner if } // end outer if } // end findKey int main (int argc, char **argv) { /* Part 2. Start this program by first copying the contents of the main function from your completed decrypt.c main function. */ //declare storage for an ElGamal cryptosytem unsigned int n, p, g, h, x; unsigned int Nints; //get the secret key from the user printf("Enter the secret key (0 if unknown): "); fflush(stdout); char stat = scanf("%u",&x); printf("Reading file.\n"); /* Q3 Complete this function. Read in the public key data from public_key.txt and the cyphertexts from messages.txt. */ // for bonus just change the quotes to "bonus_public_key.txt", pretty self explanatory FILE* pk = fopen("public_key.txt","r"); // same for this except change quotes to "bonus_message.txt" FILE* msg = fopen("messages.txt", "r"); fscanf(pk, "%u\n%u\n%u\n%u\n", &n, &p, &g, &h); fclose(pk); fscanf(msg, "%u\n", &Nints); unsigned int *mhat = (unsigned int *) malloc(Nints*sizeof(unsigned int)); unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); for (int i = 0; i < Nints; i++) { fscanf(msg, "%u %u\n", &mhat[i], &a[i]); } // end for fclose(msg); // } // end main /* Q3 After finding the secret key, decrypt the message */ unsigned int found; unsigned int *d_a; cudaMalloc(&d_a, sizeof(unsigned int)); if (x == 0 || modExp(g, x, p) != h) { printf("Finding the secret key ... \n"); } // end if double startTime = clock(); unsigned int Nthreads = 32; unsigned int Nblocks = (p-1)/Nthreads; findSecretKey <<<Nblocks, Nthreads>>>(g, h, p, d_a); cudaDeviceSynchronize(); cudaMemcpy(&found, d_a, sizeof(unsigned int), cudaMemcpyDeviceToHost); double endTime = clock(); double totalTime = (endTime-startTime)/CLOCKS_PER_SEC; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); printf("Secret key found! x = %u \n", found); int Nchars = ((n-1)/8)*Nints; unsigned char *decrypted = (unsigned char *) malloc(1024*sizeof(unsigned char)); ElGamalDecrypt(mhat, a, Nints, p, found); convertZToString(mhat, Nints, decrypted, Nchars); printf("Decrypted message: %s\n", decrypted); cudaFree(d_a); /* Q4 Make the search for the secret key parallel on the GPU using CUDA. */ return 0; }
4e46b9570399a0c00eb4a75fcfd2a1ac8cc17f4b.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (C) 2016 Yusuke Suzuki <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "host_memory.cuh" #include "utility.h" #include <hip/hip_runtime.h> namespace gloop { HostMemory::HostMemory(std::size_t size, unsigned flags) : m_size(size) , m_flags(flags) { GLOOP_CUDA_SAFE_CALL(hipHostMalloc(&m_hostPointer, size, flags)); } HostMemory::~HostMemory() { GLOOP_CUDA_SAFE_CALL(hipHostFree(m_hostPointer)); } } // namespace gloop
4e46b9570399a0c00eb4a75fcfd2a1ac8cc17f4b.cu
/* Copyright (C) 2016 Yusuke Suzuki <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "host_memory.cuh" #include "utility.h" #include <cuda_runtime.h> namespace gloop { HostMemory::HostMemory(std::size_t size, unsigned flags) : m_size(size) , m_flags(flags) { GLOOP_CUDA_SAFE_CALL(cudaHostAlloc(&m_hostPointer, size, flags)); } HostMemory::~HostMemory() { GLOOP_CUDA_SAFE_CALL(cudaFreeHost(m_hostPointer)); } } // namespace gloop
9180e408868ba0dc5d632364ba1b460fb90e100c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <bits/stdc++.h> #include <cmath> #include "../include/common.h" #include "../include/functions.cuh" void convolutionOnHost(unsigned char *dst, unsigned char *src, float *kernel, int kernelSide, const int width, const int height, const int channels) { unsigned int margin = int((kernelSide - 1) / 2); // Loop through each pixel. for (int y = margin; y < width - margin; y++) { for (int x = margin; x < height - margin; x++) { // Loop through each element of the kernel. for (int dy = 0; dy < kernelSide; dy++) { for (int dx = 0; dx < kernelSide; dx++) { // Loop through the channels of the image. for (int c = 0; c < channels; c++) { int src_i = channels * ((x + (dx - margin)) * width + (y + (dy - margin))) + c; int ker_i = dx * kernelSide + dy; int dst_i = channels * (x * width + y) + c; // Reset dst element at the start of the conv. if (ker_i == 0) { dst[dst_i] = 0; } // Add result of multiplication. dst[dst_i] += int(src[src_i] * kernel[ker_i]); } } } } } } __global__ void convolutionOnDevice(unsigned char *dst, unsigned char *src, float *kernel, int kernelSide, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } unsigned int margin = int((kernelSide - 1) / 2); int x = (int)i / width; int y = (i % width); // Check for minimum padding. if (y < margin or y > width - margin - 1 or x < margin or x > height - margin - 1) { return; } // Loop through each element of the kernel. for (int dy = 0; dy < kernelSide; dy++) { for (int dx = 0; dx < kernelSide; dx++) { // Loop through the channels of the image. for (int c = 0; c < channels; c++) { int src_i = channels * ((x + (dx - margin)) * width + (y + (dy - margin))) + c; int ker_i = dx * kernelSide + dy; int dst_i = channels * i + c; // Reset dst element at the start of the conv. if (ker_i == 0) { dst[dst_i] = 0; } // Add result of multiplication. dst[dst_i] += int(src[src_i] * kernel[ker_i]); } } } } void drawLineOnHost(unsigned char *data, int x1, int y1, int x2, int y2, int radius, int *color, int colorSize, int width, int height, int channels) { for (int dy = min(y1, y2); dy < max(y1, y2); dy++) { for (int dx = min(x1, x2); dx < max(x1, x2); dx++) { int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1); if (interpolatedY - radius > dy or interpolatedY + radius < dy) { continue; } int index = (dx * width + dy) * channels; for (int c = 0; c < min(channels, colorSize); c++) { if (index + c < width * height * channels) { data[index + c] = color[c]; } } } } } __global__ void drawLineOnDevice(unsigned char *data, int x1, int y1, int x2, int y2, int radius, int *color, int colorSize, int width, int height, int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int dx = (int)i / width; int dy = (i % width); // Check for boundaries. int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1); if (dx < min(x1, x2) or dx >= max(x1, x2) or dy < min(y1, y2) or dy >= max(y1, y2) or interpolatedY - radius > dy or interpolatedY + radius < dy) { return; } for (int c = 0; c < min(channels, colorSize); c++) { int index = channels * i; if (index + c < width * height * channels) { data[index + c] = color[c]; } } } void drawPointOnHost(unsigned char *data, int x, int y, int radius, int *color, int colorSize, int width, int height, int channels) { for (int dy = max(0, y - radius); dy < y + radius; dy++) { for (int dx = max(0, x - radius); dx < x + radius; dx++) { int index = (dx * width + dy) * channels; for (int c = 0; c < min(channels, colorSize); c++) { if (index + c < width * height * channels) { data[index + c] = color[c]; } } } } } __global__ void drawPointOnDevice(unsigned char *data, int x, int y, int radius, int *color, int colorSize, int width, int height, int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int dx = (int)i / width; int dy = (i % width); // Check for point boundaries. if (dy < y - radius or dy >= y + radius or dx < x - radius or dx >= x + radius) { return; } for (int c = 0; c < min(channels, colorSize); c++) { int index = channels * i; if (index + c < width * height * channels) { data[index + c] = color[c]; } } } void differenceOnHost(unsigned char *dst, unsigned char *src, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { for (int c = 0; c < channels; c++) { int i = channels * (x * width + y) + c; if (dst[i] > src[i]) { dst[i] = dst[i] - src[i]; } else { dst[i] = src[i] - dst[i]; } } } } } __global__ void differenceOnDevice(unsigned char *dst, unsigned char *src, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height * channels) { return; } if (dst[i] > src[i]) { dst[i] = dst[i] - src[i]; } else { dst[i] = src[i] - dst[i]; } } void cornerScoreOnHost(unsigned char *gradX, unsigned char *gradY, float *R, int width, int height) { const int windowSide = 3; const int windowMargin = int((windowSide - 1) / 2); for (int i = 0; i < width * height; i++) { int x = (int)i / width; int y = (i % width); // Check for out-of-bound coordinates. R[i] = 0; if (x < windowMargin or y < windowMargin or x > height - windowMargin - 1 or y > width - windowMargin - 1) { continue; } // Create the windows Ix and Iy. float *Ix = new float[windowSide * windowSide]; float *Iy = new float[windowSide * windowSide]; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); Ix[wi] = (float)gradX[di] / PIXEL_VALUES; Iy[wi] = (float)gradY[di] / PIXEL_VALUES; } // Construct the structural matrix. float *M = new float[4]; sumOfMatmulOnHost(&M[0], Ix, Ix, windowSide); sumOfMatmulOnHost(&M[1], Ix, Iy, windowSide); sumOfMatmulOnHost(&M[2], Iy, Ix, windowSide); sumOfMatmulOnHost(&M[3], Iy, Iy, windowSide); // Evaluate the pixel score. float m = (M[0] + M[3]) / 2; float p = (M[0] * M[3]) - (M[1] * M[2]); float lambda1 = m + sqrt(m * m - p); float lambda2 = m - sqrt(m * m - p); R[i] = min(lambda1, lambda2); // Free memory. delete[] Ix; delete[] Iy; delete[] M; } } __global__ void cornerScoreOnDevice(unsigned char *gradX, unsigned char *gradY, float *R, int width, int height) { int i = blockIdx.x * blockDim.x + threadIdx.x; const int windowSide = 3; const int windowMargin = int((windowSide - 1) / 2); // Check for overflow. if (i >= width * height) { return; } int x = (int)i / width; int y = (i % width); // Check for out-of-bound coordinates. R[i] = 0; if (x < windowMargin or y < windowMargin or x > height - windowMargin - 1 or y > width - windowMargin - 1) { return; } // Create the windows Ix and Iy. float *Ix = new float[windowSide * windowSide]; float *Iy = new float[windowSide * windowSide]; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); Ix[wi] = (float)gradX[di] / PIXEL_VALUES; Iy[wi] = (float)gradY[di] / PIXEL_VALUES; } // Construct the structural matrix. float *M = new float[4]{0, 0, 0, 0}; sumOfMatmulOnDevice(&M[0], Ix, Ix, windowSide); sumOfMatmulOnDevice(&M[1], Ix, Iy, windowSide); sumOfMatmulOnDevice(&M[2], Iy, Ix, windowSide); sumOfMatmulOnDevice(&M[3], Iy, Iy, windowSide); // Evaluate the pixel score. float m = (M[0] + M[3]) / 2; float p = (M[0] * M[3]) - (M[1] * M[2]); float lambda1 = m + sqrt(m * m - p); float lambda2 = m - sqrt(m * m - p); R[i] = min(lambda1, lambda2); // Free memory. delete[] Ix; delete[] Iy; delete[] M; } void opticalFLowOnHost(int *currentCorners, int *corners, int maxCorners, unsigned char **currPyramidalScales, unsigned char **prevPyramidalScales, int levels, int width0, int height0) { const int patchSide = 5; const int windowSide = 9; const int windowMargin = int((windowSide - 1) / 2); unsigned char *prevPatch = new unsigned char[patchSide * patchSide]; unsigned char *currPatch = new unsigned char[patchSide * patchSide]; for (int l = levels - 1; l >= 0; l--) { int width = width0 / pow(2, l); int height = height0 / pow(2, l); float minSse; for (int i = 0; i < maxCorners; i++) { // Downscale corner from the previous frame. int lx = (corners[i] / width0) * pow(2, -l); int ly = (corners[i] % width0) * pow(2, -l); int prevCorner = int(lx * width + ly); minSse = 100; if (l == levels - 1) { currentCorners[i] = prevCorner; } else { // Upscale corner from the previous layer. int ux = int(currentCorners[i] / (width * 0.5)) * 2; int uy = (currentCorners[i] % int((width * 0.5))) * 2; currentCorners[i] = int(ux * width + uy); } extractPatchOnHost(prevPatch, prevPyramidalScales[l], prevCorner, patchSide, width, height); int x = (int)currentCorners[i] / width; int y = currentCorners[i] % width; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); extractPatchOnHost(currPatch, currPyramidalScales[l], di, patchSide, width, height); float sse = sumOfSquareDifferencesOnHost(prevPatch, currPatch, patchSide); if (sse < minSse) { currentCorners[i] = di; minSse = sse; } } } } } __global__ void opticalFLowOnDevice(int *currentCorners, int *corners, int maxCorners, unsigned char *currPyramidalScales, unsigned char *prevPyramidalScales, int levels, int offsetSize, int width0, int height0) { int i = blockIdx.x * blockDim.x + threadIdx.x; const int patchSide = 5; const int windowSide = 9; const int windowMargin = int((windowSide - 1) / 2); unsigned char *prevPatch = new unsigned char[patchSide * patchSide]; unsigned char *currPatch = new unsigned char[patchSide * patchSide]; for (int l = levels - 1; l >= 0; l--) { int width = width0 / pow(2, l); int height = height0 / pow(2, l); float minSse = 100; // Downscale corner from the previous frame. int lx = (corners[i] / width0) * pow(2, -l); int ly = (corners[i] % width0) * pow(2, -l); int prevCorner = int(lx * width + ly); if (l == levels - 1) { currentCorners[i] = prevCorner; } else { // Upscale corner from the previous layer. int ux = int(currentCorners[i] / (width * 0.5)) * 2; int uy = (currentCorners[i] % int((width * 0.5))) * 2; currentCorners[i] = int(ux * width + uy); } extractPatchOnDevice(prevPatch, prevPyramidalScales + l * offsetSize, prevCorner, patchSide, width, height); int x = (int)currentCorners[i] / width; int y = currentCorners[i] % width; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); extractPatchOnDevice(currPatch, currPyramidalScales + l * offsetSize, di, patchSide, width, height); float sse = sumOfSquareDifferencesOnDevice(prevPatch, currPatch, patchSide); if (sse < minSse) { currentCorners[i] = di; minSse = sse; } } } delete[] prevPatch; delete[] currPatch; } void rotateOnHost(unsigned char *dst, unsigned char *src, const double radian, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { // Evaluate the source pixels. int x_center = x - round(height / 2.0); int y_center = y - round(width / 2.0); double xa = x_center * cos(-radian) - y_center * sin(-radian) + round(height / 2.0); double ya = x_center * sin(-radian) + y_center * cos(-radian) + round(width / 2.0); // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } continue; } for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; // Evaluate the four pixels given xs and ys roundings. int ia[4] = { channels * (int(floor(xa)) * width + int(floor(ya))) + c, channels * (int(floor(xa)) * width + int(ceil(ya))) + c, channels * (int(ceil(xa)) * width + int(floor(ya))) + c, channels * (int(ceil(xa)) * width + int(ceil(ya))) + c}; // Evaluate the average value of the destination pixel. float sum = 0.0; int count = 0; for (int k = 0; k < 4; k++) { if (0 <= ia[k] and ia[k] <= width * height * channels) { sum += src[ia[k]]; count++; } } dst[ib] = int(sum / count); } } } } __global__ void rotateOnDevice(unsigned char *dst, unsigned char *src, const double radian, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int x = (int)i / width; int y = (i % width); // Evaluate the source pixels. int x_center = x - round(height / 2.0); int y_center = y - round(width / 2.0); double xa = x_center * cos(-radian) - y_center * sin(-radian) + round(height / 2.0); double ya = x_center * sin(-radian) + y_center * cos(-radian) + round(width / 2.0); // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } return; } for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; // Evaluate the four pixels given xs and ys roundings. int ia[4] = {channels * (int(floor(xa)) * width + int(floor(ya))) + c, channels * (int(floor(xa)) * width + int(ceil(ya))) + c, channels * (int(ceil(xa)) * width + int(floor(ya))) + c, channels * (int(ceil(xa)) * width + int(ceil(ya))) + c}; // Evaluate the average value of the destination pixel. float sum = 0.0; int count = 0; for (int k = 0; k < 4; k++) { if (0 <= ia[k] and ia[k] <= width * height * channels) { sum += src[ia[k]]; count++; } } dst[ib] = int(sum / count); } } void scaleOnHost(unsigned char *dst, unsigned char *src, const double ratio, const int width, const int height, const int channels) { int newWidth = width * ratio; int newHeight = height * ratio; float inverseRatio = 1.0 / ratio; for (int y = 0; y < newWidth; y++) { for (int x = 0; x < newHeight; x++) { for (int c = 0; c < channels; c++) { int i = (x * newWidth + y) * channels + c; float tempValue = 0.0; for (int dy = -1; dy < 2; dy++) { for (int dx = -1; dx < 2; dx++) { int oldI = ((int(inverseRatio * x) + dx) * width + (int(inverseRatio * y) + dy)) * channels + c; float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy))); if (oldI < 0 or oldI > width * height * channels) { continue; } tempValue += weight * src[oldI]; } } dst[i] = tempValue; } } } } __global__ void scaleOnDevice(unsigned char *dst, unsigned char *src, const double ratio, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; int newWidth = width * ratio; int newHeight = height * ratio; float inverseRatio = 1.0 / ratio; // Check for overflow. if (i > newWidth * newHeight) { return; } int x = (int)i / newWidth; int y = (i % newWidth); for (int c = 0; c < channels; c++) { float tempValue = 0.0; for (int dy = -1; dy < 2; dy++) { for (int dx = -1; dx < 2; dx++) { int src_i = ((int(inverseRatio * x) + dx) * width + (int(inverseRatio * y) + dy)) * channels + c; float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy))); if (src_i < 0 or src_i > width * height * channels) { continue; } tempValue += weight * src[src_i]; } } dst[i * channels + c] = tempValue; } } void translateOnHost(unsigned char *dst, unsigned char *src, int px, int py, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { // Evaluate the source pixels. int xa = x - px; int ya = y - py; // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } continue; } for (int c = 0; c < channels; c++) { int ia = channels * (xa * width + ya) + c; int ib = channels * (x * width + y) + c; dst[ib] = src[ia]; } } } } __global__ void translateOnDevice(unsigned char *dst, unsigned char *src, int px, int py, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int x = (int)i / width; int y = (i % width); // Evaluate the source pixels. int xa = x - px; int ya = y - py; // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit. for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } return; } for (int c = 0; c < channels; c++) { int ia = channels * (xa * width + ya) + c; int ib = channels * (x * width + y) + c; dst[ib] = src[ia]; } } void transposeOnHost(unsigned char *data, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { for (int c = 0; c < channels; c++) { int ia = channels * (y * width + x) + c; int ib = channels * (x * height + y) + c; if (ia > ib) { continue; } unsigned char temp = data[ib]; data[ib] = data[ia]; data[ia] = temp; } } } } __global__ void transposeOnDevice(unsigned char *data, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } for (int c = 0; c < channels; c++) { int ia = channels * i + c; int ib = channels * ((i % width) * height + ((int)i / width)) + c; if (ia > ib) { continue; } unsigned char temp = data[ib]; data[ib] = data[ia]; data[ia] = temp; } } void sumOfMatmulOnHost(float *total, float *A, float *B, int side) { float *C = new float[side * side]; *total = 0; for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); C[i] = 0; for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; C[i] += A[ia] * B[ib]; } *total += C[i]; } delete[] C; } __device__ void sumOfMatmulOnDevice(float *total, float *A, float *B, int side) { *total = 0; for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; *total += A[ia] * B[ib]; } } } float sumOfSquareDifferencesOnHost(unsigned char *patch1, unsigned char *patch2, int patchSide) { float sse = 0.0; for (int i = 0; i < patchSide * patchSide; i++) { sse += pow(float(patch1[i] - patch2[i]), 2); } return sse; } __device__ float sumOfSquareDifferencesOnDevice(unsigned char *patch1, unsigned char *patch2, int patchSide) { float sse = 0.0; for (int i = 0; i < patchSide * patchSide; i++) { sse += pow(float(patch1[i] - patch2[i]), 2); } return sse; } void extractPatchOnHost(unsigned char *patch, unsigned char *data, int centerIndex, int patchSide, int width, int height) { const int patchMargin = int((patchSide - 1) / 2); for (int pi = 0; pi < patchSide * patchSide; pi++) { int x = (int)centerIndex / width; int y = centerIndex % width; int dx = ((int)pi / patchSide) - patchMargin; int dy = (pi % patchSide) - patchMargin; int di = (x + dx) * width + (y + dy); if (di < 0 or di > width * height) { patch[pi] = 0; } else { patch[pi] = data[di]; } } } __device__ void extractPatchOnDevice(unsigned char *patch, unsigned char *data, int centerIndex, int patchSide, int width, int height) { const int patchMargin = int((patchSide - 1) / 2); for (int pi = 0; pi < patchSide * patchSide; pi++) { int x = (int)centerIndex / width; int y = centerIndex % width; int dx = ((int)pi / patchSide) - patchMargin; int dy = (pi % patchSide) - patchMargin; int di = (x + dx) * width + (y + dy); if (di < 0 or di > width * height) { patch[pi] = 0; } else { patch[pi] = data[di]; } } } void findHomographyRANSACOnHost(float *matrices, float *scores, int maxIter, int *currentCorners, int *previousCorners, int maxCorners, int width, int height, float thresholdError, float minConfidence) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> uniform(0, maxCorners); const int N_POINTS = 3; const int SPACE_DIM = 2; // Create maxIter models. float *srcTriplet = new float[N_POINTS * SPACE_DIM]; float *dstTriplet = new float[N_POINTS * SPACE_DIM]; float *estPoint = new float[SPACE_DIM]; float *srcPoint = new float[SPACE_DIM]; float *dstPoint = new float[SPACE_DIM]; for (int n = 0; n < maxIter; n++) { int offset = n * (N_POINTS * (SPACE_DIM + 1)); scores[n] = INFINITY; // Select the minimum number of data points to estimate a model. for (int k = 0; k < N_POINTS; k++) { int i = uniform(gen); srcTriplet[k * SPACE_DIM] = (int)previousCorners[i] / width; srcTriplet[k * SPACE_DIM + 1] = previousCorners[i] % width; dstTriplet[k * SPACE_DIM] = (int)currentCorners[i] / width; dstTriplet[k * SPACE_DIM + 1] = currentCorners[i] % width; } // Estimate the model that fit the hypothetical inliers. estimateTransformOnHost(matrices + offset, srcTriplet, dstTriplet); // Count the points that fit the model and the total error. int nInliers = 0; float totalError = 0.0; for (int i = 0; i < maxCorners; i++) { srcPoint[0] = (int)previousCorners[i] / width; srcPoint[1] = previousCorners[i] % width; dstPoint[0] = (int)currentCorners[i] / width; dstPoint[1] = currentCorners[i] % width; // Apply the transform and evaluate the error. applyTransformOnHost(estPoint, srcPoint, matrices + offset); float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) + pow(int(estPoint[1] - dstPoint[1]), 2); nInliers += int(reprojError < thresholdError); totalError += reprojError; } // Set the matrix score to the error if the confidence is high // enough. float confidence = (float)nInliers / maxCorners; if (confidence >= minConfidence) { scores[n] = totalError; } } delete[] srcTriplet; delete[] dstTriplet; delete[] estPoint; delete[] srcPoint; delete[] dstPoint; } __global__ void findHomographyRANSACOnDevice( float *matrices, float *scores, int maxIter, int *currentCorners, int *previousCorners, int maxCorners, int *randomCornerIndices, int width, int height, float thresholdError, float minConfidence) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= maxIter) { return; } const int N_POINTS = 3; const int SPACE_DIM = 2; // Create maxIter models. float *srcTriplet = new float[N_POINTS * SPACE_DIM]; float *dstTriplet = new float[N_POINTS * SPACE_DIM]; float *estPoint = new float[SPACE_DIM]; float *srcPoint = new float[SPACE_DIM]; float *dstPoint = new float[SPACE_DIM]; int offset = i * (N_POINTS * (SPACE_DIM + 1)); scores[i] = INFINITY; // Select the minimum number of data points to estimate a model. for (int k = 0; k < N_POINTS; k++) { int index = randomCornerIndices[N_POINTS * i + k]; srcTriplet[k * SPACE_DIM] = (int)previousCorners[index] / width; srcTriplet[k * SPACE_DIM + 1] = previousCorners[index] % width; dstTriplet[k * SPACE_DIM] = (int)currentCorners[index] / width; dstTriplet[k * SPACE_DIM + 1] = currentCorners[index] % width; } // Estimate the model that fit the hypothetical inliers. estimateTransformOnDevice(matrices + offset, srcTriplet, dstTriplet); // Count the points that fit the model and the total error. int nInliers = 0; float totalError = 0.0; for (int index = 0; index < maxCorners; index++) { srcPoint[0] = (int)previousCorners[index] / width; srcPoint[1] = previousCorners[index] % width; dstPoint[0] = (int)currentCorners[index] / width; dstPoint[1] = currentCorners[index] % width; // Apply the transform and evaluate the error. applyTransformOnDevice(estPoint, srcPoint, matrices + offset); float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) + pow(int(estPoint[1] - dstPoint[1]), 2); nInliers += int(reprojError < thresholdError); totalError += reprojError; } // Set the matrix score to the error if the confidence is high // enough. float confidence = (float)nInliers / maxCorners; if (confidence >= minConfidence) { scores[i] = totalError; } delete[] srcTriplet; delete[] dstTriplet; delete[] estPoint; delete[] srcPoint; delete[] dstPoint; } void estimateTransformOnHost(float *A, float *Ui, float *vi) { const int N_POINTS = 3; const int SPACE_DIM = 2; // Create X and Y matrices. float *X = new float[N_POINTS * (SPACE_DIM + 1)]; float *Y = new float[N_POINTS * (SPACE_DIM + 1)]; for (int d = 0; d < SPACE_DIM + 1; d++) { for (int n = 0; n < N_POINTS; n++) { int i = d * (N_POINTS) + n; int j = n * (SPACE_DIM) + d; if (d == SPACE_DIM) { X[i] = 1; Y[i] = int(n >= N_POINTS - 1); } else { X[i] = Ui[j]; Y[i] = vi[j]; } } } float *Xi = new float[N_POINTS * (SPACE_DIM + 1)]; invert3x3MatrixOnHost(Xi, X); // Get the affine transformation matrix. matmulOnHost(A, Y, Xi, N_POINTS); delete[] X; delete[] Y; delete[] Xi; } __device__ void estimateTransformOnDevice(float *A, float *Ui, float *vi) { const int N_POINTS = 3; const int SPACE_DIM = 2; // Create X and Y matrices. float *X = new float[N_POINTS * (SPACE_DIM + 1)]; float *Y = new float[N_POINTS * (SPACE_DIM + 1)]; for (int d = 0; d < SPACE_DIM + 1; d++) { for (int n = 0; n < N_POINTS; n++) { int i = d * (N_POINTS) + n; int j = n * (SPACE_DIM) + d; if (d == SPACE_DIM) { X[i] = 1; Y[i] = int(n >= N_POINTS - 1); } else { X[i] = Ui[j]; Y[i] = vi[j]; } } } float *Xi = new float[N_POINTS * (SPACE_DIM + 1)]; invert3x3MatrixOnDevice(Xi, X); // Get the affine transformation matrix. matmulOnDevice(A, Y, Xi, N_POINTS); delete[] X; delete[] Y; delete[] Xi; } void invert3x3MatrixOnHost(float *Xi, float *X) { float det = X[0] * (X[4] * X[8] - X[5] * X[7]) - X[1] * (X[3] * X[8] - X[5] * X[6]) + X[2] * (X[3] * X[7] - X[4] * X[6]); Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det; Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det; Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det; Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det; Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det; Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det; Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det; Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det; Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det; } __device__ void invert3x3MatrixOnDevice(float *Xi, float *X) { float det = X[0] * (X[4] * X[8] - X[5] * X[7]) - X[1] * (X[3] * X[8] - X[5] * X[6]) + X[2] * (X[3] * X[7] - X[4] * X[6]); Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det; Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det; Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det; Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det; Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det; Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det; Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det; Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det; Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det; } void matmulOnHost(float *C, float *A, float *B, int side) { for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); C[i] = 0; for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; C[i] += A[ia] * B[ib]; } } } __device__ void matmulOnDevice(float *C, float *A, float *B, int side) { for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); C[i] = 0; for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; C[i] += A[ia] * B[ib]; } } } void applyTransformOnHost(float *dst, float *src, float *A) { const int SPACE_DIM = 2; for (int i = 0; i < SPACE_DIM; i++) { dst[i] = 0.0; dst[i] += src[0] * A[i * 3 + 0]; dst[i] += src[1] * A[i * 3 + 1]; } } __device__ void applyTransformOnDevice(float *dst, float *src, float *A) { const int SPACE_DIM = 2; for (int i = 0; i < SPACE_DIM; i++) { dst[i] = 0.0; dst[i] += src[0] * A[i * 3 + 0]; dst[i] += src[1] * A[i * 3 + 1]; } }
9180e408868ba0dc5d632364ba1b460fb90e100c.cu
#include <bits/stdc++.h> #include <cmath> #include "../include/common.h" #include "../include/functions.cuh" void convolutionOnHost(unsigned char *dst, unsigned char *src, float *kernel, int kernelSide, const int width, const int height, const int channels) { unsigned int margin = int((kernelSide - 1) / 2); // Loop through each pixel. for (int y = margin; y < width - margin; y++) { for (int x = margin; x < height - margin; x++) { // Loop through each element of the kernel. for (int dy = 0; dy < kernelSide; dy++) { for (int dx = 0; dx < kernelSide; dx++) { // Loop through the channels of the image. for (int c = 0; c < channels; c++) { int src_i = channels * ((x + (dx - margin)) * width + (y + (dy - margin))) + c; int ker_i = dx * kernelSide + dy; int dst_i = channels * (x * width + y) + c; // Reset dst element at the start of the conv. if (ker_i == 0) { dst[dst_i] = 0; } // Add result of multiplication. dst[dst_i] += int(src[src_i] * kernel[ker_i]); } } } } } } __global__ void convolutionOnDevice(unsigned char *dst, unsigned char *src, float *kernel, int kernelSide, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } unsigned int margin = int((kernelSide - 1) / 2); int x = (int)i / width; int y = (i % width); // Check for minimum padding. if (y < margin or y > width - margin - 1 or x < margin or x > height - margin - 1) { return; } // Loop through each element of the kernel. for (int dy = 0; dy < kernelSide; dy++) { for (int dx = 0; dx < kernelSide; dx++) { // Loop through the channels of the image. for (int c = 0; c < channels; c++) { int src_i = channels * ((x + (dx - margin)) * width + (y + (dy - margin))) + c; int ker_i = dx * kernelSide + dy; int dst_i = channels * i + c; // Reset dst element at the start of the conv. if (ker_i == 0) { dst[dst_i] = 0; } // Add result of multiplication. dst[dst_i] += int(src[src_i] * kernel[ker_i]); } } } } void drawLineOnHost(unsigned char *data, int x1, int y1, int x2, int y2, int radius, int *color, int colorSize, int width, int height, int channels) { for (int dy = min(y1, y2); dy < max(y1, y2); dy++) { for (int dx = min(x1, x2); dx < max(x1, x2); dx++) { int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1); if (interpolatedY - radius > dy or interpolatedY + radius < dy) { continue; } int index = (dx * width + dy) * channels; for (int c = 0; c < min(channels, colorSize); c++) { if (index + c < width * height * channels) { data[index + c] = color[c]; } } } } } __global__ void drawLineOnDevice(unsigned char *data, int x1, int y1, int x2, int y2, int radius, int *color, int colorSize, int width, int height, int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int dx = (int)i / width; int dy = (i % width); // Check for boundaries. int interpolatedY = (y1 * (x2 - dx) + y2 * (dx - x1)) / (x2 - x1); if (dx < min(x1, x2) or dx >= max(x1, x2) or dy < min(y1, y2) or dy >= max(y1, y2) or interpolatedY - radius > dy or interpolatedY + radius < dy) { return; } for (int c = 0; c < min(channels, colorSize); c++) { int index = channels * i; if (index + c < width * height * channels) { data[index + c] = color[c]; } } } void drawPointOnHost(unsigned char *data, int x, int y, int radius, int *color, int colorSize, int width, int height, int channels) { for (int dy = max(0, y - radius); dy < y + radius; dy++) { for (int dx = max(0, x - radius); dx < x + radius; dx++) { int index = (dx * width + dy) * channels; for (int c = 0; c < min(channels, colorSize); c++) { if (index + c < width * height * channels) { data[index + c] = color[c]; } } } } } __global__ void drawPointOnDevice(unsigned char *data, int x, int y, int radius, int *color, int colorSize, int width, int height, int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int dx = (int)i / width; int dy = (i % width); // Check for point boundaries. if (dy < y - radius or dy >= y + radius or dx < x - radius or dx >= x + radius) { return; } for (int c = 0; c < min(channels, colorSize); c++) { int index = channels * i; if (index + c < width * height * channels) { data[index + c] = color[c]; } } } void differenceOnHost(unsigned char *dst, unsigned char *src, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { for (int c = 0; c < channels; c++) { int i = channels * (x * width + y) + c; if (dst[i] > src[i]) { dst[i] = dst[i] - src[i]; } else { dst[i] = src[i] - dst[i]; } } } } } __global__ void differenceOnDevice(unsigned char *dst, unsigned char *src, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height * channels) { return; } if (dst[i] > src[i]) { dst[i] = dst[i] - src[i]; } else { dst[i] = src[i] - dst[i]; } } void cornerScoreOnHost(unsigned char *gradX, unsigned char *gradY, float *R, int width, int height) { const int windowSide = 3; const int windowMargin = int((windowSide - 1) / 2); for (int i = 0; i < width * height; i++) { int x = (int)i / width; int y = (i % width); // Check for out-of-bound coordinates. R[i] = 0; if (x < windowMargin or y < windowMargin or x > height - windowMargin - 1 or y > width - windowMargin - 1) { continue; } // Create the windows Ix and Iy. float *Ix = new float[windowSide * windowSide]; float *Iy = new float[windowSide * windowSide]; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); Ix[wi] = (float)gradX[di] / PIXEL_VALUES; Iy[wi] = (float)gradY[di] / PIXEL_VALUES; } // Construct the structural matrix. float *M = new float[4]; sumOfMatmulOnHost(&M[0], Ix, Ix, windowSide); sumOfMatmulOnHost(&M[1], Ix, Iy, windowSide); sumOfMatmulOnHost(&M[2], Iy, Ix, windowSide); sumOfMatmulOnHost(&M[3], Iy, Iy, windowSide); // Evaluate the pixel score. float m = (M[0] + M[3]) / 2; float p = (M[0] * M[3]) - (M[1] * M[2]); float lambda1 = m + sqrt(m * m - p); float lambda2 = m - sqrt(m * m - p); R[i] = min(lambda1, lambda2); // Free memory. delete[] Ix; delete[] Iy; delete[] M; } } __global__ void cornerScoreOnDevice(unsigned char *gradX, unsigned char *gradY, float *R, int width, int height) { int i = blockIdx.x * blockDim.x + threadIdx.x; const int windowSide = 3; const int windowMargin = int((windowSide - 1) / 2); // Check for overflow. if (i >= width * height) { return; } int x = (int)i / width; int y = (i % width); // Check for out-of-bound coordinates. R[i] = 0; if (x < windowMargin or y < windowMargin or x > height - windowMargin - 1 or y > width - windowMargin - 1) { return; } // Create the windows Ix and Iy. float *Ix = new float[windowSide * windowSide]; float *Iy = new float[windowSide * windowSide]; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); Ix[wi] = (float)gradX[di] / PIXEL_VALUES; Iy[wi] = (float)gradY[di] / PIXEL_VALUES; } // Construct the structural matrix. float *M = new float[4]{0, 0, 0, 0}; sumOfMatmulOnDevice(&M[0], Ix, Ix, windowSide); sumOfMatmulOnDevice(&M[1], Ix, Iy, windowSide); sumOfMatmulOnDevice(&M[2], Iy, Ix, windowSide); sumOfMatmulOnDevice(&M[3], Iy, Iy, windowSide); // Evaluate the pixel score. float m = (M[0] + M[3]) / 2; float p = (M[0] * M[3]) - (M[1] * M[2]); float lambda1 = m + sqrt(m * m - p); float lambda2 = m - sqrt(m * m - p); R[i] = min(lambda1, lambda2); // Free memory. delete[] Ix; delete[] Iy; delete[] M; } void opticalFLowOnHost(int *currentCorners, int *corners, int maxCorners, unsigned char **currPyramidalScales, unsigned char **prevPyramidalScales, int levels, int width0, int height0) { const int patchSide = 5; const int windowSide = 9; const int windowMargin = int((windowSide - 1) / 2); unsigned char *prevPatch = new unsigned char[patchSide * patchSide]; unsigned char *currPatch = new unsigned char[patchSide * patchSide]; for (int l = levels - 1; l >= 0; l--) { int width = width0 / pow(2, l); int height = height0 / pow(2, l); float minSse; for (int i = 0; i < maxCorners; i++) { // Downscale corner from the previous frame. int lx = (corners[i] / width0) * pow(2, -l); int ly = (corners[i] % width0) * pow(2, -l); int prevCorner = int(lx * width + ly); minSse = 100; if (l == levels - 1) { currentCorners[i] = prevCorner; } else { // Upscale corner from the previous layer. int ux = int(currentCorners[i] / (width * 0.5)) * 2; int uy = (currentCorners[i] % int((width * 0.5))) * 2; currentCorners[i] = int(ux * width + uy); } extractPatchOnHost(prevPatch, prevPyramidalScales[l], prevCorner, patchSide, width, height); int x = (int)currentCorners[i] / width; int y = currentCorners[i] % width; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); extractPatchOnHost(currPatch, currPyramidalScales[l], di, patchSide, width, height); float sse = sumOfSquareDifferencesOnHost(prevPatch, currPatch, patchSide); if (sse < minSse) { currentCorners[i] = di; minSse = sse; } } } } } __global__ void opticalFLowOnDevice(int *currentCorners, int *corners, int maxCorners, unsigned char *currPyramidalScales, unsigned char *prevPyramidalScales, int levels, int offsetSize, int width0, int height0) { int i = blockIdx.x * blockDim.x + threadIdx.x; const int patchSide = 5; const int windowSide = 9; const int windowMargin = int((windowSide - 1) / 2); unsigned char *prevPatch = new unsigned char[patchSide * patchSide]; unsigned char *currPatch = new unsigned char[patchSide * patchSide]; for (int l = levels - 1; l >= 0; l--) { int width = width0 / pow(2, l); int height = height0 / pow(2, l); float minSse = 100; // Downscale corner from the previous frame. int lx = (corners[i] / width0) * pow(2, -l); int ly = (corners[i] % width0) * pow(2, -l); int prevCorner = int(lx * width + ly); if (l == levels - 1) { currentCorners[i] = prevCorner; } else { // Upscale corner from the previous layer. int ux = int(currentCorners[i] / (width * 0.5)) * 2; int uy = (currentCorners[i] % int((width * 0.5))) * 2; currentCorners[i] = int(ux * width + uy); } extractPatchOnDevice(prevPatch, prevPyramidalScales + l * offsetSize, prevCorner, patchSide, width, height); int x = (int)currentCorners[i] / width; int y = currentCorners[i] % width; for (int wi = 0; wi < windowSide * windowSide; wi++) { int dx = ((int)wi / windowSide) - windowMargin; int dy = (wi % windowSide) - windowMargin; int di = (x + dx) * width + (y + dy); extractPatchOnDevice(currPatch, currPyramidalScales + l * offsetSize, di, patchSide, width, height); float sse = sumOfSquareDifferencesOnDevice(prevPatch, currPatch, patchSide); if (sse < minSse) { currentCorners[i] = di; minSse = sse; } } } delete[] prevPatch; delete[] currPatch; } void rotateOnHost(unsigned char *dst, unsigned char *src, const double radian, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { // Evaluate the source pixels. int x_center = x - round(height / 2.0); int y_center = y - round(width / 2.0); double xa = x_center * cos(-radian) - y_center * sin(-radian) + round(height / 2.0); double ya = x_center * sin(-radian) + y_center * cos(-radian) + round(width / 2.0); // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } continue; } for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; // Evaluate the four pixels given xs and ys roundings. int ia[4] = { channels * (int(floor(xa)) * width + int(floor(ya))) + c, channels * (int(floor(xa)) * width + int(ceil(ya))) + c, channels * (int(ceil(xa)) * width + int(floor(ya))) + c, channels * (int(ceil(xa)) * width + int(ceil(ya))) + c}; // Evaluate the average value of the destination pixel. float sum = 0.0; int count = 0; for (int k = 0; k < 4; k++) { if (0 <= ia[k] and ia[k] <= width * height * channels) { sum += src[ia[k]]; count++; } } dst[ib] = int(sum / count); } } } } __global__ void rotateOnDevice(unsigned char *dst, unsigned char *src, const double radian, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int x = (int)i / width; int y = (i % width); // Evaluate the source pixels. int x_center = x - round(height / 2.0); int y_center = y - round(width / 2.0); double xa = x_center * cos(-radian) - y_center * sin(-radian) + round(height / 2.0); double ya = x_center * sin(-radian) + y_center * cos(-radian) + round(width / 2.0); // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } return; } for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; // Evaluate the four pixels given xs and ys roundings. int ia[4] = {channels * (int(floor(xa)) * width + int(floor(ya))) + c, channels * (int(floor(xa)) * width + int(ceil(ya))) + c, channels * (int(ceil(xa)) * width + int(floor(ya))) + c, channels * (int(ceil(xa)) * width + int(ceil(ya))) + c}; // Evaluate the average value of the destination pixel. float sum = 0.0; int count = 0; for (int k = 0; k < 4; k++) { if (0 <= ia[k] and ia[k] <= width * height * channels) { sum += src[ia[k]]; count++; } } dst[ib] = int(sum / count); } } void scaleOnHost(unsigned char *dst, unsigned char *src, const double ratio, const int width, const int height, const int channels) { int newWidth = width * ratio; int newHeight = height * ratio; float inverseRatio = 1.0 / ratio; for (int y = 0; y < newWidth; y++) { for (int x = 0; x < newHeight; x++) { for (int c = 0; c < channels; c++) { int i = (x * newWidth + y) * channels + c; float tempValue = 0.0; for (int dy = -1; dy < 2; dy++) { for (int dx = -1; dx < 2; dx++) { int oldI = ((int(inverseRatio * x) + dx) * width + (int(inverseRatio * y) + dy)) * channels + c; float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy))); if (oldI < 0 or oldI > width * height * channels) { continue; } tempValue += weight * src[oldI]; } } dst[i] = tempValue; } } } } __global__ void scaleOnDevice(unsigned char *dst, unsigned char *src, const double ratio, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; int newWidth = width * ratio; int newHeight = height * ratio; float inverseRatio = 1.0 / ratio; // Check for overflow. if (i > newWidth * newHeight) { return; } int x = (int)i / newWidth; int y = (i % newWidth); for (int c = 0; c < channels; c++) { float tempValue = 0.0; for (int dy = -1; dy < 2; dy++) { for (int dx = -1; dx < 2; dx++) { int src_i = ((int(inverseRatio * x) + dx) * width + (int(inverseRatio * y) + dy)) * channels + c; float weight = 1 / (pow(2, 2 + abs(dx) + abs(dy))); if (src_i < 0 or src_i > width * height * channels) { continue; } tempValue += weight * src[src_i]; } } dst[i * channels + c] = tempValue; } } void translateOnHost(unsigned char *dst, unsigned char *src, int px, int py, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { // Evaluate the source pixels. int xa = x - px; int ya = y - py; // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } continue; } for (int c = 0; c < channels; c++) { int ia = channels * (xa * width + ya) + c; int ib = channels * (x * width + y) + c; dst[ib] = src[ia]; } } } } __global__ void translateOnDevice(unsigned char *dst, unsigned char *src, int px, int py, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } int x = (int)i / width; int y = (i % width); // Evaluate the source pixels. int xa = x - px; int ya = y - py; // Check for out-of-bound coordinates. if (xa < 0 or xa > height or ya < 0 or ya > width) { // Set pixels to black and exit. for (int c = 0; c < channels; c++) { int ib = channels * (x * width + y) + c; dst[ib] = 0; } return; } for (int c = 0; c < channels; c++) { int ia = channels * (xa * width + ya) + c; int ib = channels * (x * width + y) + c; dst[ib] = src[ia]; } } void transposeOnHost(unsigned char *data, const int width, const int height, const int channels) { for (int y = 0; y < width; y++) { for (int x = 0; x < height; x++) { for (int c = 0; c < channels; c++) { int ia = channels * (y * width + x) + c; int ib = channels * (x * height + y) + c; if (ia > ib) { continue; } unsigned char temp = data[ib]; data[ib] = data[ia]; data[ia] = temp; } } } } __global__ void transposeOnDevice(unsigned char *data, const int width, const int height, const int channels) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Check for overflow. if (i >= width * height) { return; } for (int c = 0; c < channels; c++) { int ia = channels * i + c; int ib = channels * ((i % width) * height + ((int)i / width)) + c; if (ia > ib) { continue; } unsigned char temp = data[ib]; data[ib] = data[ia]; data[ia] = temp; } } void sumOfMatmulOnHost(float *total, float *A, float *B, int side) { float *C = new float[side * side]; *total = 0; for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); C[i] = 0; for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; C[i] += A[ia] * B[ib]; } *total += C[i]; } delete[] C; } __device__ void sumOfMatmulOnDevice(float *total, float *A, float *B, int side) { *total = 0; for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; *total += A[ia] * B[ib]; } } } float sumOfSquareDifferencesOnHost(unsigned char *patch1, unsigned char *patch2, int patchSide) { float sse = 0.0; for (int i = 0; i < patchSide * patchSide; i++) { sse += pow(float(patch1[i] - patch2[i]), 2); } return sse; } __device__ float sumOfSquareDifferencesOnDevice(unsigned char *patch1, unsigned char *patch2, int patchSide) { float sse = 0.0; for (int i = 0; i < patchSide * patchSide; i++) { sse += pow(float(patch1[i] - patch2[i]), 2); } return sse; } void extractPatchOnHost(unsigned char *patch, unsigned char *data, int centerIndex, int patchSide, int width, int height) { const int patchMargin = int((patchSide - 1) / 2); for (int pi = 0; pi < patchSide * patchSide; pi++) { int x = (int)centerIndex / width; int y = centerIndex % width; int dx = ((int)pi / patchSide) - patchMargin; int dy = (pi % patchSide) - patchMargin; int di = (x + dx) * width + (y + dy); if (di < 0 or di > width * height) { patch[pi] = 0; } else { patch[pi] = data[di]; } } } __device__ void extractPatchOnDevice(unsigned char *patch, unsigned char *data, int centerIndex, int patchSide, int width, int height) { const int patchMargin = int((patchSide - 1) / 2); for (int pi = 0; pi < patchSide * patchSide; pi++) { int x = (int)centerIndex / width; int y = centerIndex % width; int dx = ((int)pi / patchSide) - patchMargin; int dy = (pi % patchSide) - patchMargin; int di = (x + dx) * width + (y + dy); if (di < 0 or di > width * height) { patch[pi] = 0; } else { patch[pi] = data[di]; } } } void findHomographyRANSACOnHost(float *matrices, float *scores, int maxIter, int *currentCorners, int *previousCorners, int maxCorners, int width, int height, float thresholdError, float minConfidence) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution<> uniform(0, maxCorners); const int N_POINTS = 3; const int SPACE_DIM = 2; // Create maxIter models. float *srcTriplet = new float[N_POINTS * SPACE_DIM]; float *dstTriplet = new float[N_POINTS * SPACE_DIM]; float *estPoint = new float[SPACE_DIM]; float *srcPoint = new float[SPACE_DIM]; float *dstPoint = new float[SPACE_DIM]; for (int n = 0; n < maxIter; n++) { int offset = n * (N_POINTS * (SPACE_DIM + 1)); scores[n] = INFINITY; // Select the minimum number of data points to estimate a model. for (int k = 0; k < N_POINTS; k++) { int i = uniform(gen); srcTriplet[k * SPACE_DIM] = (int)previousCorners[i] / width; srcTriplet[k * SPACE_DIM + 1] = previousCorners[i] % width; dstTriplet[k * SPACE_DIM] = (int)currentCorners[i] / width; dstTriplet[k * SPACE_DIM + 1] = currentCorners[i] % width; } // Estimate the model that fit the hypothetical inliers. estimateTransformOnHost(matrices + offset, srcTriplet, dstTriplet); // Count the points that fit the model and the total error. int nInliers = 0; float totalError = 0.0; for (int i = 0; i < maxCorners; i++) { srcPoint[0] = (int)previousCorners[i] / width; srcPoint[1] = previousCorners[i] % width; dstPoint[0] = (int)currentCorners[i] / width; dstPoint[1] = currentCorners[i] % width; // Apply the transform and evaluate the error. applyTransformOnHost(estPoint, srcPoint, matrices + offset); float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) + pow(int(estPoint[1] - dstPoint[1]), 2); nInliers += int(reprojError < thresholdError); totalError += reprojError; } // Set the matrix score to the error if the confidence is high // enough. float confidence = (float)nInliers / maxCorners; if (confidence >= minConfidence) { scores[n] = totalError; } } delete[] srcTriplet; delete[] dstTriplet; delete[] estPoint; delete[] srcPoint; delete[] dstPoint; } __global__ void findHomographyRANSACOnDevice( float *matrices, float *scores, int maxIter, int *currentCorners, int *previousCorners, int maxCorners, int *randomCornerIndices, int width, int height, float thresholdError, float minConfidence) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= maxIter) { return; } const int N_POINTS = 3; const int SPACE_DIM = 2; // Create maxIter models. float *srcTriplet = new float[N_POINTS * SPACE_DIM]; float *dstTriplet = new float[N_POINTS * SPACE_DIM]; float *estPoint = new float[SPACE_DIM]; float *srcPoint = new float[SPACE_DIM]; float *dstPoint = new float[SPACE_DIM]; int offset = i * (N_POINTS * (SPACE_DIM + 1)); scores[i] = INFINITY; // Select the minimum number of data points to estimate a model. for (int k = 0; k < N_POINTS; k++) { int index = randomCornerIndices[N_POINTS * i + k]; srcTriplet[k * SPACE_DIM] = (int)previousCorners[index] / width; srcTriplet[k * SPACE_DIM + 1] = previousCorners[index] % width; dstTriplet[k * SPACE_DIM] = (int)currentCorners[index] / width; dstTriplet[k * SPACE_DIM + 1] = currentCorners[index] % width; } // Estimate the model that fit the hypothetical inliers. estimateTransformOnDevice(matrices + offset, srcTriplet, dstTriplet); // Count the points that fit the model and the total error. int nInliers = 0; float totalError = 0.0; for (int index = 0; index < maxCorners; index++) { srcPoint[0] = (int)previousCorners[index] / width; srcPoint[1] = previousCorners[index] % width; dstPoint[0] = (int)currentCorners[index] / width; dstPoint[1] = currentCorners[index] % width; // Apply the transform and evaluate the error. applyTransformOnDevice(estPoint, srcPoint, matrices + offset); float reprojError = pow(int(estPoint[0] - dstPoint[0]), 2) + pow(int(estPoint[1] - dstPoint[1]), 2); nInliers += int(reprojError < thresholdError); totalError += reprojError; } // Set the matrix score to the error if the confidence is high // enough. float confidence = (float)nInliers / maxCorners; if (confidence >= minConfidence) { scores[i] = totalError; } delete[] srcTriplet; delete[] dstTriplet; delete[] estPoint; delete[] srcPoint; delete[] dstPoint; } void estimateTransformOnHost(float *A, float *Ui, float *vi) { const int N_POINTS = 3; const int SPACE_DIM = 2; // Create X and Y matrices. float *X = new float[N_POINTS * (SPACE_DIM + 1)]; float *Y = new float[N_POINTS * (SPACE_DIM + 1)]; for (int d = 0; d < SPACE_DIM + 1; d++) { for (int n = 0; n < N_POINTS; n++) { int i = d * (N_POINTS) + n; int j = n * (SPACE_DIM) + d; if (d == SPACE_DIM) { X[i] = 1; Y[i] = int(n >= N_POINTS - 1); } else { X[i] = Ui[j]; Y[i] = vi[j]; } } } float *Xi = new float[N_POINTS * (SPACE_DIM + 1)]; invert3x3MatrixOnHost(Xi, X); // Get the affine transformation matrix. matmulOnHost(A, Y, Xi, N_POINTS); delete[] X; delete[] Y; delete[] Xi; } __device__ void estimateTransformOnDevice(float *A, float *Ui, float *vi) { const int N_POINTS = 3; const int SPACE_DIM = 2; // Create X and Y matrices. float *X = new float[N_POINTS * (SPACE_DIM + 1)]; float *Y = new float[N_POINTS * (SPACE_DIM + 1)]; for (int d = 0; d < SPACE_DIM + 1; d++) { for (int n = 0; n < N_POINTS; n++) { int i = d * (N_POINTS) + n; int j = n * (SPACE_DIM) + d; if (d == SPACE_DIM) { X[i] = 1; Y[i] = int(n >= N_POINTS - 1); } else { X[i] = Ui[j]; Y[i] = vi[j]; } } } float *Xi = new float[N_POINTS * (SPACE_DIM + 1)]; invert3x3MatrixOnDevice(Xi, X); // Get the affine transformation matrix. matmulOnDevice(A, Y, Xi, N_POINTS); delete[] X; delete[] Y; delete[] Xi; } void invert3x3MatrixOnHost(float *Xi, float *X) { float det = X[0] * (X[4] * X[8] - X[5] * X[7]) - X[1] * (X[3] * X[8] - X[5] * X[6]) + X[2] * (X[3] * X[7] - X[4] * X[6]); Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det; Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det; Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det; Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det; Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det; Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det; Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det; Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det; Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det; } __device__ void invert3x3MatrixOnDevice(float *Xi, float *X) { float det = X[0] * (X[4] * X[8] - X[5] * X[7]) - X[1] * (X[3] * X[8] - X[5] * X[6]) + X[2] * (X[3] * X[7] - X[4] * X[6]); Xi[0] = +float(X[4] * X[8] - X[5] * X[7]) / det; Xi[1] = -float(X[1] * X[8] - X[2] * X[7]) / det; Xi[2] = +float(X[1] * X[5] - X[2] * X[4]) / det; Xi[3] = -float(X[3] * X[8] - X[5] * X[6]) / det; Xi[4] = +float(X[0] * X[8] - X[2] * X[6]) / det; Xi[5] = -float(X[0] * X[5] - X[2] * X[3]) / det; Xi[6] = +float(X[3] * X[7] - X[4] * X[6]) / det; Xi[7] = -float(X[0] * X[7] - X[1] * X[6]) / det; Xi[8] = +float(X[0] * X[4] - X[1] * X[3]) / det; } void matmulOnHost(float *C, float *A, float *B, int side) { for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); C[i] = 0; for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; C[i] += A[ia] * B[ib]; } } } __device__ void matmulOnDevice(float *C, float *A, float *B, int side) { for (int i = 0; i < side * side; i++) { int x = (int)i / side; int y = (i % side); C[i] = 0; for (int d = 0; d < side; d++) { int ia = x * side + d; int ib = d * side + y; C[i] += A[ia] * B[ib]; } } } void applyTransformOnHost(float *dst, float *src, float *A) { const int SPACE_DIM = 2; for (int i = 0; i < SPACE_DIM; i++) { dst[i] = 0.0; dst[i] += src[0] * A[i * 3 + 0]; dst[i] += src[1] * A[i * 3 + 1]; } } __device__ void applyTransformOnDevice(float *dst, float *src, float *A) { const int SPACE_DIM = 2; for (int i = 0; i < SPACE_DIM; i++) { dst[i] = 0.0; dst[i] += src[0] * A[i * 3 + 0]; dst[i] += src[1] * A[i * 3 + 1]; } }
77079463367d1039aefb5507ff780360efaa5928.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common.h" static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} extern "C" __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // this thread handles the data at its thread id if (tid < N) c[tid] = a[tid] + b[tid]; } extern "C" void add_wrapper(int *a, int *b, int *c) { hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, a, b, c); }
77079463367d1039aefb5507ff780360efaa5928.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "common.h" static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} extern "C" __global__ void add( int *a, int *b, int *c ) { int tid = blockIdx.x; // this thread handles the data at its thread id if (tid < N) c[tid] = a[tid] + b[tid]; } extern "C" void add_wrapper(int *a, int *b, int *c) { add<<<N,1>>>(a, b, c); }
ae4d1c9522aa64d4f810c2178713e842d8e43de7.hip
// !!! This is a file automatically generated by hipify!!! // Copyright Oliver Kowalke 2013. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <chrono> #include <cstdlib> #include <iostream> #include <memory> #include <random> #include <tuple> #include <hip/hip_runtime.h> #include <boost/assert.hpp> #include <boost/bind.hpp> #include <boost/intrusive_ptr.hpp> #include <boost/fiber/all.hpp> #include <boost/fiber/cuda/waitfor.hpp> __global__ void vector_add( int * a, int * b, int * c, int size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx < size) { c[idx] = a[idx] + b[idx]; } } int main() { try { bool done = false; boost::fibers::fiber f1( [&done]{ std::cout << "f1: entered" << std::endl; try { hipStream_t stream0, stream1; hipStreamCreate( & stream0); hipStreamCreate( & stream1); int size = 1024 * 1024; int full_size = 20 * size; int * host_a, * host_b, * host_c; hipHostMalloc( & host_a, full_size * sizeof( int), hipHostMallocDefault); hipHostMalloc( & host_b, full_size * sizeof( int), hipHostMallocDefault); hipHostMalloc( & host_c, full_size * sizeof( int), hipHostMallocDefault); int * dev_a0, * dev_b0, * dev_c0; int * dev_a1, * dev_b1, * dev_c1; hipMalloc( & dev_a0, size * sizeof( int) ); hipMalloc( & dev_b0, size * sizeof( int) ); hipMalloc( & dev_c0, size * sizeof( int) ); hipMalloc( & dev_a1, size * sizeof( int) ); hipMalloc( & dev_b1, size * sizeof( int) ); hipMalloc( & dev_c1, size * sizeof( int) ); std::minstd_rand generator; std::uniform_int_distribution<> distribution(1, 6); for ( int i = 0; i < full_size; ++i) { host_a[i] = distribution( generator); host_b[i] = distribution( generator); } for ( int i = 0; i < full_size; i += 2 * size) { hipMemcpyAsync( dev_a0, host_a + i, size * sizeof( int), hipMemcpyHostToDevice, stream0); hipMemcpyAsync( dev_a1, host_a + i + size, size * sizeof( int), hipMemcpyHostToDevice, stream1); hipMemcpyAsync( dev_b0, host_b + i, size * sizeof( int), hipMemcpyHostToDevice, stream0); hipMemcpyAsync( dev_b1, host_b + i + size, size * sizeof( int), hipMemcpyHostToDevice, stream1); hipLaunchKernelGGL(( vector_add), dim3(size / 256), dim3(256), 0, stream0 , dev_a0, dev_b0, dev_c0, size); hipLaunchKernelGGL(( vector_add), dim3(size / 256), dim3(256), 0, stream1 , dev_a1, dev_b1, dev_c1, size); hipMemcpyAsync( host_c + i, dev_c0, size * sizeof( int), hipMemcpyDeviceToHost, stream0); hipMemcpyAsync( host_c + i + size, dev_c1, size * sizeof( int), hipMemcpyDeviceToHost, stream1); } auto results = boost::fibers::cuda::waitfor_all( stream0, stream1); for ( auto & result : results) { BOOST_ASSERT( stream0 == std::get< 0 >( result) || stream1 == std::get< 0 >( result) ); BOOST_ASSERT( hipSuccess == std::get< 1 >( result) ); } std::cout << "f1: GPU computation finished" << std::endl; hipHostFree( host_a); hipHostFree( host_b); hipHostFree( host_c); hipFree( dev_a0); hipFree( dev_b0); hipFree( dev_c0); hipFree( dev_a1); hipFree( dev_b1); hipFree( dev_c1); hipStreamDestroy( stream0); hipStreamDestroy( stream1); done = true; } catch ( std::exception const& ex) { std::cerr << "exception: " << ex.what() << std::endl; } std::cout << "f1: leaving" << std::endl; }); boost::fibers::fiber f2([&done]{ std::cout << "f2: entered" << std::endl; while ( ! done) { std::cout << "f2: sleeping" << std::endl; boost::this_fiber::sleep_for( std::chrono::milliseconds( 1 ) ); } std::cout << "f2: leaving" << std::endl; }); f1.join(); f2.join(); std::cout << "done." << std::endl; return EXIT_SUCCESS; } catch ( std::exception const& e) { std::cerr << "exception: " << e.what() << std::endl; } catch (...) { std::cerr << "unhandled exception" << std::endl; } return EXIT_FAILURE; }
ae4d1c9522aa64d4f810c2178713e842d8e43de7.cu
// Copyright Oliver Kowalke 2013. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <chrono> #include <cstdlib> #include <iostream> #include <memory> #include <random> #include <tuple> #include <cuda.h> #include <boost/assert.hpp> #include <boost/bind.hpp> #include <boost/intrusive_ptr.hpp> #include <boost/fiber/all.hpp> #include <boost/fiber/cuda/waitfor.hpp> __global__ void vector_add( int * a, int * b, int * c, int size) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if ( idx < size) { c[idx] = a[idx] + b[idx]; } } int main() { try { bool done = false; boost::fibers::fiber f1( [&done]{ std::cout << "f1: entered" << std::endl; try { cudaStream_t stream0, stream1; cudaStreamCreate( & stream0); cudaStreamCreate( & stream1); int size = 1024 * 1024; int full_size = 20 * size; int * host_a, * host_b, * host_c; cudaHostAlloc( & host_a, full_size * sizeof( int), cudaHostAllocDefault); cudaHostAlloc( & host_b, full_size * sizeof( int), cudaHostAllocDefault); cudaHostAlloc( & host_c, full_size * sizeof( int), cudaHostAllocDefault); int * dev_a0, * dev_b0, * dev_c0; int * dev_a1, * dev_b1, * dev_c1; cudaMalloc( & dev_a0, size * sizeof( int) ); cudaMalloc( & dev_b0, size * sizeof( int) ); cudaMalloc( & dev_c0, size * sizeof( int) ); cudaMalloc( & dev_a1, size * sizeof( int) ); cudaMalloc( & dev_b1, size * sizeof( int) ); cudaMalloc( & dev_c1, size * sizeof( int) ); std::minstd_rand generator; std::uniform_int_distribution<> distribution(1, 6); for ( int i = 0; i < full_size; ++i) { host_a[i] = distribution( generator); host_b[i] = distribution( generator); } for ( int i = 0; i < full_size; i += 2 * size) { cudaMemcpyAsync( dev_a0, host_a + i, size * sizeof( int), cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync( dev_a1, host_a + i + size, size * sizeof( int), cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync( dev_b0, host_b + i, size * sizeof( int), cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync( dev_b1, host_b + i + size, size * sizeof( int), cudaMemcpyHostToDevice, stream1); vector_add<<< size / 256, 256, 0, stream0 >>>( dev_a0, dev_b0, dev_c0, size); vector_add<<< size / 256, 256, 0, stream1 >>>( dev_a1, dev_b1, dev_c1, size); cudaMemcpyAsync( host_c + i, dev_c0, size * sizeof( int), cudaMemcpyDeviceToHost, stream0); cudaMemcpyAsync( host_c + i + size, dev_c1, size * sizeof( int), cudaMemcpyDeviceToHost, stream1); } auto results = boost::fibers::cuda::waitfor_all( stream0, stream1); for ( auto & result : results) { BOOST_ASSERT( stream0 == std::get< 0 >( result) || stream1 == std::get< 0 >( result) ); BOOST_ASSERT( cudaSuccess == std::get< 1 >( result) ); } std::cout << "f1: GPU computation finished" << std::endl; cudaFreeHost( host_a); cudaFreeHost( host_b); cudaFreeHost( host_c); cudaFree( dev_a0); cudaFree( dev_b0); cudaFree( dev_c0); cudaFree( dev_a1); cudaFree( dev_b1); cudaFree( dev_c1); cudaStreamDestroy( stream0); cudaStreamDestroy( stream1); done = true; } catch ( std::exception const& ex) { std::cerr << "exception: " << ex.what() << std::endl; } std::cout << "f1: leaving" << std::endl; }); boost::fibers::fiber f2([&done]{ std::cout << "f2: entered" << std::endl; while ( ! done) { std::cout << "f2: sleeping" << std::endl; boost::this_fiber::sleep_for( std::chrono::milliseconds( 1 ) ); } std::cout << "f2: leaving" << std::endl; }); f1.join(); f2.join(); std::cout << "done." << std::endl; return EXIT_SUCCESS; } catch ( std::exception const& e) { std::cerr << "exception: " << e.what() << std::endl; } catch (...) { std::cerr << "unhandled exception" << std::endl; } return EXIT_FAILURE; }
6f713ff3389ad3287b845bc3ecb6b8e1aef66ac7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <helper_cuda.h> #include <helper_math.h> typedef unsigned int uint; typedef unsigned char uchar; hipArray *d_volumeArray = 0; hipArray *d_transferFuncArray; typedef unsigned char VolumeType; //typedef unsigned short VolumeType; texture<VolumeType, 3, hipReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, hipReadModeElementType> transferTex; // 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __device__ void BinSingle(float input, uint* histogram, size_t size) { uint bin_count = size/sizeof(uint); float step = 1.f/bin_count; uint idx = (uint)(input/step); atomicAdd(&histogram[idx], 1); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, uint* pVolumeDataHist, size_t histSize) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data BinSingle(sample, pVolumeDataHist, histSize); __syncthreads(); // lookup in transfer function texture float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; } extern "C" void initCuda(void *h_volume, hipExtent volumeSize) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>(); checkCudaErrors(hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize)); // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; checkCudaErrors(hipMemcpy3D(&copyParams)); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = hipFilterModeLinear; // linear interpolation tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(tex, d_volumeArray, channelDesc)); // create transfer function texture float4 transferFunc[] = { { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, }; hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArray; checkCudaErrors(hipMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTex.filterMode = hipFilterModePoint; transferTex.normalized = true; // access with normalized texture coordinates transferTex.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2)); } extern "C" void freeCudaBuffers() { checkCudaErrors(hipFreeArray(d_volumeArray)); checkCudaErrors(hipFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, uint* pVolumeDataHist, size_t histSize) { hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density, brightness, transferOffset, transferScale, pVolumeDataHist, histSize); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix)); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
6f713ff3389ad3287b845bc3ecb6b8e1aef66ac7.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <helper_cuda.h> #include <helper_math.h> typedef unsigned int uint; typedef unsigned char uchar; cudaArray *d_volumeArray = 0; cudaArray *d_transferFuncArray; typedef unsigned char VolumeType; //typedef unsigned short VolumeType; texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, cudaReadModeElementType> transferTex; // 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __device__ void BinSingle(float input, uint* histogram, size_t size) { uint bin_count = size/sizeof(uint); float step = 1.f/bin_count; uint idx = (uint)(input/step); atomicAdd(&histogram[idx], 1); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, uint* pVolumeDataHist, size_t histSize) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data BinSingle(sample, pVolumeDataHist, histSize); __syncthreads(); // lookup in transfer function texture float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; } extern "C" void initCuda(void *h_volume, cudaExtent volumeSize) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>(); checkCudaErrors(cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize)); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors(cudaMemcpy3D(&copyParams)); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = cudaFilterModeLinear; // linear interpolation tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates tex.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(tex, d_volumeArray, channelDesc)); // create transfer function texture float4 transferFunc[] = { { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, }; cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArray; checkCudaErrors(cudaMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTex.filterMode = cudaFilterModePoint; transferTex.normalized = true; // access with normalized texture coordinates transferTex.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2)); } extern "C" void freeCudaBuffers() { checkCudaErrors(cudaFreeArray(d_volumeArray)); checkCudaErrors(cudaFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, uint* pVolumeDataHist, size_t histSize) { d_render<<<gridSize, blockSize>>>(d_output, imageW, imageH, density, brightness, transferOffset, transferScale, pVolumeDataHist, histSize); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix)); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
b8ef33b813f7b67645ae260d18d8a9b7a526aacc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include "rocblas.h" #include <stdio.h> #include <iostream> #include <iomanip> #include <cmath> #include <chrono> // Input size int const BATCH = 1; //Must be 1 in this program int const DEPTH = 1; int const WIDTH = 32; int const LENGTH = 32; // Kernel characteristics int const ZPADX = 0; int const ZPADY = 0; int const STRIDEX = 1; int const STRIDEY = 1; int const CONV_RECP_SIZEX = 3; int const CONV_RECP_SIZEY = 3; int const NUM_OF_KERNELS = 1; // Convolution output characteristics int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1); int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1); // transformation matrix characteristics int const transformSizeY = convLayerSizeY * convLayerSizeX; int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH; int const CONV_FINAL_SIZE = convLayerSizeX * convLayerSizeY * NUM_OF_KERNELS; __global__ void transformToMul(float* inputMatrix, float* reducedMatrix) { int Y = blockIdx.y * blockDim.y + threadIdx.y; int X = blockIdx.x * blockDim.x + threadIdx.x; if (Y < transformSizeY) { int inputX = (Y % convLayerSizeX) * STRIDEX + X % CONV_RECP_SIZEY; int inputY = (Y / convLayerSizeX) * STRIDEY + (X % (CONV_RECP_SIZEX * CONV_RECP_SIZEY)) / CONV_RECP_SIZEX; int inputZ = X / (CONV_RECP_SIZEX * CONV_RECP_SIZEY); if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1))) { reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX]; } else { reducedMatrix[(Y * transformSizeX) + X] = 0; } } } __global__ void rowMul(float* A, float* B, float* C) { int X = blockIdx.x * blockDim.x + threadIdx.x; int N = X % transformSizeY; int n = X / transformSizeY; float sum = 0; if (X < CONV_FINAL_SIZE) { for (int i = 0; i < transformSizeX; i++) { sum += A[n * transformSizeX + i] * B[N * transformSizeX + i]; } C[X] = sum; } } void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump) { double w = jump; for (int b = 0; b < d; b++) { for (int c = 0; c < z; c++) { //std::cout << "slice: " << c + 1 << "\n"; for (int j = 0; j < y; j++) { for (int i = 0; i < x; i++) { if (type == -1) { matrix[((b * z + c) * y + j) * x + i] = rand() % 10; } else if (type == 0) { matrix[((b * z + c) * y + j) * x + i] = jump; } else { matrix[((b * z + c) * y + j) * x + i] = w; w += jump; } //std::cout << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , "; } //std::cout << "\n"; } //std::cout << "\n"; } //std::cout << "\n"; } } int main() { // Performance test variables hipEvent_t start1, stop1, start2, stop2; float time1,time2; hipEventCreate(&start1); hipEventCreate(&stop1); hipEventCreate(&start2); hipEventCreate(&stop2); hipError_t cudaStatus; // Initialize Host data, kernel and output float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH]; float* hostTransformedInput = new float[transformSizeY * transformSizeX](); float* hostConvResult = new float[CONV_FINAL_SIZE](); float* hostConvLayerWeights = new float[NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX]; // GENERATING INPUT std::cout << "Inputs:\n"; generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1); generateFlat4DData(hostConvLayerWeights, CONV_RECP_SIZEX, CONV_RECP_SIZEY, DEPTH, NUM_OF_KERNELS, 1, 0.1); // Initializing and allocating Device data, kernels and output float* deviceInputMatrix; float* deviceTransformedInput; float* deviceConvLayerWeights; float* deviceConvResult; cudaStatus = hipMalloc((void **)&deviceConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void **)&deviceTransformedInput, (transformSizeY * transformSizeX) * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void **)&deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(deviceConvLayerWeights, hostConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Initializing sizes of grid and block of threads dim3 threadsPerBlock(transformSizeX, transformSizeY); dim3 blocksPerGrid(1, 1); if (transformSizeY * transformSizeX > 1024) { threadsPerBlock.x = transformSizeX; threadsPerBlock.y = 1024 / transformSizeX; blocksPerGrid.x = ceil(double(transformSizeX) / double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(transformSizeY) / double(threadsPerBlock.y)); } // Run the kernel function and meassure time hipEventRecord(start1, 0); transformToMul << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceTransformedInput); cudaStatus = cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "Transform addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipEventRecord(stop1, 0); if (cudaStatus != hipSuccess) { fprintf(stderr, "EventRecord failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipEventSynchronize(stop1); if (cudaStatus != hipSuccess) { fprintf(stderr, "EventSynchronize failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipEventElapsedTime(&time1, start1, stop1); if (cudaStatus != hipSuccess) { fprintf(stderr, "ElapsedTime failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } time1 = time1 * 1000; cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "DeviceSynchronize failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } dim3 threadsPerBlockMul(CONV_FINAL_SIZE); dim3 blocksPerGridMul(1); if (CONV_FINAL_SIZE > 1024) { threadsPerBlockMul.x = 1024; blocksPerGridMul.x = ceil(double(CONV_FINAL_SIZE) / double(threadsPerBlock.x)); } // Run the kernel function and meassure time hipEventRecord(start2, 0); rowMul << < blocksPerGridMul, threadsPerBlockMul >> > (deviceConvLayerWeights, deviceTransformedInput, deviceConvResult); cudaStatus = hipEventRecord(stop2, 0); if (cudaStatus != hipSuccess) { fprintf(stderr, "EventRecord failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "Mul addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipEventSynchronize(stop2); if (cudaStatus != hipSuccess) { fprintf(stderr, "EventSynchronize failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipEventElapsedTime(&time2, start2, stop2); if (cudaStatus != hipSuccess) { fprintf(stderr, "ElapsedTime failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } time2 = time2 * 1000; cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "DeviceSynchronize failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // Get the results from device cudaStatus = hipMemcpy(hostTransformedInput, deviceTransformedInput, (transformSizeX * transformSizeY) * sizeof(float), hipMemcpyDeviceToHost); // Not relevant to this program if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(hostConvResult, deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float), hipMemcpyDeviceToHost); // Not relevant to this program if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // PRINTING RESULTS std::cout << "Transformed matrix:\n"; for (int k = 0; k < transformSizeY; k++) { //for (int j = 0; j < transformSizeX; j++) //{ //std::cout << std::setprecision(1) << std::fixed << hostTransformedInput[k * transformSizeX + j] << " "; //} //std::cout << "\n"; } std::cout << "Convolution result:\n"; for (int k = 0; k < CONV_FINAL_SIZE; k++) { if (k % convLayerSizeX == 0) { //printf("\n"); } if (k % (convLayerSizeX * convLayerSizeY) == 0) { //printf("Depth=%d\n", k / (convLayerSizeX * convLayerSizeY)); } //std::cout << std::setprecision(1) << std::fixed << hostConvResult[k] << " "; } printf( "\n\n"); // CLEAN UP printf("Time for the kernel transform: %f us\n", time1); printf("Time for the kernel mul: %f us\n", time2); Error: hipFree(deviceInputMatrix); hipFree(deviceTransformedInput); hipFree(deviceConvLayerWeights); return 0; }
b8ef33b813f7b67645ae260d18d8a9b7a526aacc.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include "cublas_v2.h" #include <stdio.h> #include <iostream> #include <iomanip> #include <cmath> #include <chrono> // Input size int const BATCH = 1; //Must be 1 in this program int const DEPTH = 1; int const WIDTH = 32; int const LENGTH = 32; // Kernel characteristics int const ZPADX = 0; int const ZPADY = 0; int const STRIDEX = 1; int const STRIDEY = 1; int const CONV_RECP_SIZEX = 3; int const CONV_RECP_SIZEY = 3; int const NUM_OF_KERNELS = 1; // Convolution output characteristics int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1); int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1); // transformation matrix characteristics int const transformSizeY = convLayerSizeY * convLayerSizeX; int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH; int const CONV_FINAL_SIZE = convLayerSizeX * convLayerSizeY * NUM_OF_KERNELS; __global__ void transformToMul(float* inputMatrix, float* reducedMatrix) { int Y = blockIdx.y * blockDim.y + threadIdx.y; int X = blockIdx.x * blockDim.x + threadIdx.x; if (Y < transformSizeY) { int inputX = (Y % convLayerSizeX) * STRIDEX + X % CONV_RECP_SIZEY; int inputY = (Y / convLayerSizeX) * STRIDEY + (X % (CONV_RECP_SIZEX * CONV_RECP_SIZEY)) / CONV_RECP_SIZEX; int inputZ = X / (CONV_RECP_SIZEX * CONV_RECP_SIZEY); if ((inputX >= ZPADX && inputX <= (ZPADX + WIDTH - 1)) && (inputY >= ZPADY && inputY <= (ZPADY + LENGTH - 1))) { reducedMatrix[(Y * transformSizeX) + X] = inputMatrix[(inputZ * LENGTH + inputY - ZPADY) * WIDTH + inputX - ZPADX]; } else { reducedMatrix[(Y * transformSizeX) + X] = 0; } } } __global__ void rowMul(float* A, float* B, float* C) { int X = blockIdx.x * blockDim.x + threadIdx.x; int N = X % transformSizeY; int n = X / transformSizeY; float sum = 0; if (X < CONV_FINAL_SIZE) { for (int i = 0; i < transformSizeX; i++) { sum += A[n * transformSizeX + i] * B[N * transformSizeX + i]; } C[X] = sum; } } void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump) { double w = jump; for (int b = 0; b < d; b++) { for (int c = 0; c < z; c++) { //std::cout << "slice: " << c + 1 << "\n"; for (int j = 0; j < y; j++) { for (int i = 0; i < x; i++) { if (type == -1) { matrix[((b * z + c) * y + j) * x + i] = rand() % 10; } else if (type == 0) { matrix[((b * z + c) * y + j) * x + i] = jump; } else { matrix[((b * z + c) * y + j) * x + i] = w; w += jump; } //std::cout << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , "; } //std::cout << "\n"; } //std::cout << "\n"; } //std::cout << "\n"; } } int main() { // Performance test variables cudaEvent_t start1, stop1, start2, stop2; float time1,time2; cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaError_t cudaStatus; // Initialize Host data, kernel and output float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH]; float* hostTransformedInput = new float[transformSizeY * transformSizeX](); float* hostConvResult = new float[CONV_FINAL_SIZE](); float* hostConvLayerWeights = new float[NUM_OF_KERNELS * DEPTH * CONV_RECP_SIZEY * CONV_RECP_SIZEX]; // GENERATING INPUT std::cout << "Inputs:\n"; generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1); generateFlat4DData(hostConvLayerWeights, CONV_RECP_SIZEX, CONV_RECP_SIZEY, DEPTH, NUM_OF_KERNELS, 1, 0.1); // Initializing and allocating Device data, kernels and output float* deviceInputMatrix; float* deviceTransformedInput; float* deviceConvLayerWeights; float* deviceConvResult; cudaStatus = cudaMalloc((void **)&deviceConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void **)&deviceTransformedInput, (transformSizeY * transformSizeX) * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void **)&deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(deviceConvLayerWeights, hostConvLayerWeights, (CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH * NUM_OF_KERNELS) * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Initializing sizes of grid and block of threads dim3 threadsPerBlock(transformSizeX, transformSizeY); dim3 blocksPerGrid(1, 1); if (transformSizeY * transformSizeX > 1024) { threadsPerBlock.x = transformSizeX; threadsPerBlock.y = 1024 / transformSizeX; blocksPerGrid.x = ceil(double(transformSizeX) / double(threadsPerBlock.x)); blocksPerGrid.y = ceil(double(transformSizeY) / double(threadsPerBlock.y)); } // Run the kernel function and meassure time cudaEventRecord(start1, 0); transformToMul << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceTransformedInput); cudaStatus = cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Transform addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaEventRecord(stop1, 0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "EventRecord failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaEventSynchronize(stop1); if (cudaStatus != cudaSuccess) { fprintf(stderr, "EventSynchronize failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaEventElapsedTime(&time1, start1, stop1); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ElapsedTime failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } time1 = time1 * 1000; cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "DeviceSynchronize failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } dim3 threadsPerBlockMul(CONV_FINAL_SIZE); dim3 blocksPerGridMul(1); if (CONV_FINAL_SIZE > 1024) { threadsPerBlockMul.x = 1024; blocksPerGridMul.x = ceil(double(CONV_FINAL_SIZE) / double(threadsPerBlock.x)); } // Run the kernel function and meassure time cudaEventRecord(start2, 0); rowMul << < blocksPerGridMul, threadsPerBlockMul >> > (deviceConvLayerWeights, deviceTransformedInput, deviceConvResult); cudaStatus = cudaEventRecord(stop2, 0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "EventRecord failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Mul addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaEventSynchronize(stop2); if (cudaStatus != cudaSuccess) { fprintf(stderr, "EventSynchronize failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaEventElapsedTime(&time2, start2, stop2); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ElapsedTime failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } time2 = time2 * 1000; cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "DeviceSynchronize failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // Get the results from device cudaStatus = cudaMemcpy(hostTransformedInput, deviceTransformedInput, (transformSizeX * transformSizeY) * sizeof(float), cudaMemcpyDeviceToHost); // Not relevant to this program if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(hostConvResult, deviceConvResult, (CONV_FINAL_SIZE) * sizeof(float), cudaMemcpyDeviceToHost); // Not relevant to this program if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // PRINTING RESULTS std::cout << "Transformed matrix:\n"; for (int k = 0; k < transformSizeY; k++) { //for (int j = 0; j < transformSizeX; j++) //{ //std::cout << std::setprecision(1) << std::fixed << hostTransformedInput[k * transformSizeX + j] << " "; //} //std::cout << "\n"; } std::cout << "Convolution result:\n"; for (int k = 0; k < CONV_FINAL_SIZE; k++) { if (k % convLayerSizeX == 0) { //printf("\n"); } if (k % (convLayerSizeX * convLayerSizeY) == 0) { //printf("Depth=%d\n", k / (convLayerSizeX * convLayerSizeY)); } //std::cout << std::setprecision(1) << std::fixed << hostConvResult[k] << " "; } printf( "\n\n"); // CLEAN UP printf("Time for the kernel transform: %f us\n", time1); printf("Time for the kernel mul: %f us\n", time2); Error: cudaFree(deviceInputMatrix); cudaFree(deviceTransformedInput); cudaFree(deviceConvLayerWeights); return 0; }
eed559fc9d605eb7c6a5c8dc6a528f8c5357bfba.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <svd.hpp> #include <err_common.hpp> #include <cusolverDnManager.hpp> #include "transpose.hpp" #include <memory.hpp> #include <copy.hpp> #include <math.hpp> #include <err_common.hpp> #if defined(WITH_CUDA_LINEAR_ALGEBRA) #include <cusolverDnManager.hpp> namespace cuda { using cusolver::getDnHandle; template<typename T> cusolverStatus_t gesvd_buf_func(hipsolverDnHandle_t handle, int m, int n, int *Lwork) { return CUSOLVER_STATUS_ARCH_MISMATCH; } template<typename T, typename Tr> cusolverStatus_t gesvd_func(hipsolverDnHandle_t handle, char jobu, char jobvt, int m, int n, T *A, int lda, Tr *S, T *U, int ldu, T *VT, int ldvt, T *Work, int Lwork, Tr *rwork, int *devInfo) { return CUSOLVER_STATUS_ARCH_MISMATCH; } #define SVD_SPECIALIZE(T, Tr, X) \ template<> cusolverStatus_t \ gesvd_buf_func<T>(hipsolverDnHandle_t handle, \ int m, int n, int *Lwork) \ { \ return cusolverDn##X##gesvd_bufferSize(handle, m, n, Lwork); \ } \ SVD_SPECIALIZE(float , float , S); SVD_SPECIALIZE(double , double, D); SVD_SPECIALIZE(cfloat , float , C); SVD_SPECIALIZE(cdouble, double, Z); #undef SVD_SPECIALIZE #define SVD_SPECIALIZE(T, Tr, X) \ template<> cusolverStatus_t \ gesvd_func<T, Tr>(hipsolverDnHandle_t handle, \ char jobu, char jobvt, \ int m, int n, \ T *A, int lda, \ Tr *S, \ T *U, int ldu, \ T *VT, int ldvt, \ T *Work, int Lwork, \ Tr *rwork, int *devInfo) \ { \ return cusolverDn##X##gesvd(handle, jobu, jobvt, \ m, n, A, lda, S, U, ldu, VT, ldvt, \ Work, Lwork, rwork, devInfo); \ } \ SVD_SPECIALIZE(float , float , S); SVD_SPECIALIZE(double , double, D); SVD_SPECIALIZE(cfloat , float , C); SVD_SPECIALIZE(cdouble, double, Z); template <typename T, typename Tr> void svdInPlace(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in) { dim4 iDims = in.dims(); int M = iDims[0]; int N = iDims[1]; int lwork = 0; CUSOLVER_CHECK(gesvd_buf_func<T>(getDnHandle(), M, N, &lwork)); T *lWorkspace = memAlloc<T >(lwork); Tr *rWorkspace = memAlloc<Tr>(5 * ::min(M, N)); int *info = memAlloc<int>(1); gesvd_func<T, Tr>(getDnHandle(), 'A', 'A', M, N, in.get(), M, s.get(), u.get(), M, vt.get(), N, lWorkspace, lwork, rWorkspace, info); memFree(info); memFree(lWorkspace); memFree(rWorkspace); } template <typename T, typename Tr> void svd(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in) { dim4 iDims = in.dims(); int M = iDims[0]; int N = iDims[1]; if (M >= N) { Array<T> in_copy = copyArray(in); svdInPlace(s, u, vt, in_copy); } else { Array<T> in_trans = transpose(in, true); svdInPlace(s, vt, u, in_trans); transpose_inplace(vt, true); transpose_inplace(u, true); } } } #elif defined(WITH_CPU_LINEAR_ALGEBRA) #include <cpu_lapack/cpu_svd.hpp> namespace cuda { template<typename T, typename Tr> void svd(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in) { return cpu::svd<T, Tr>(s, u, vt, in); } template<typename T, typename Tr> void svdInPlace(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in) { return cpu::svdInPlace<T, Tr>(s, u, vt, in); } } #else namespace cuda { template<typename T, typename Tr> void svd(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in) { AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled", AF_ERR_NOT_CONFIGURED); } template<typename T, typename Tr> void svdInPlace(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in) { AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled", AF_ERR_NOT_CONFIGURED); } } #endif namespace cuda { #define INSTANTIATE(T, Tr) \ template void svd<T, Tr>(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in); \ template void svdInPlace<T, Tr>(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in); INSTANTIATE(float, float) INSTANTIATE(double, double) INSTANTIATE(cfloat, float) INSTANTIATE(cdouble, double) }
eed559fc9d605eb7c6a5c8dc6a528f8c5357bfba.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <svd.hpp> #include <err_common.hpp> #include <cusolverDnManager.hpp> #include "transpose.hpp" #include <memory.hpp> #include <copy.hpp> #include <math.hpp> #include <err_common.hpp> #if defined(WITH_CUDA_LINEAR_ALGEBRA) #include <cusolverDnManager.hpp> namespace cuda { using cusolver::getDnHandle; template<typename T> cusolverStatus_t gesvd_buf_func(cusolverDnHandle_t handle, int m, int n, int *Lwork) { return CUSOLVER_STATUS_ARCH_MISMATCH; } template<typename T, typename Tr> cusolverStatus_t gesvd_func(cusolverDnHandle_t handle, char jobu, char jobvt, int m, int n, T *A, int lda, Tr *S, T *U, int ldu, T *VT, int ldvt, T *Work, int Lwork, Tr *rwork, int *devInfo) { return CUSOLVER_STATUS_ARCH_MISMATCH; } #define SVD_SPECIALIZE(T, Tr, X) \ template<> cusolverStatus_t \ gesvd_buf_func<T>(cusolverDnHandle_t handle, \ int m, int n, int *Lwork) \ { \ return cusolverDn##X##gesvd_bufferSize(handle, m, n, Lwork); \ } \ SVD_SPECIALIZE(float , float , S); SVD_SPECIALIZE(double , double, D); SVD_SPECIALIZE(cfloat , float , C); SVD_SPECIALIZE(cdouble, double, Z); #undef SVD_SPECIALIZE #define SVD_SPECIALIZE(T, Tr, X) \ template<> cusolverStatus_t \ gesvd_func<T, Tr>(cusolverDnHandle_t handle, \ char jobu, char jobvt, \ int m, int n, \ T *A, int lda, \ Tr *S, \ T *U, int ldu, \ T *VT, int ldvt, \ T *Work, int Lwork, \ Tr *rwork, int *devInfo) \ { \ return cusolverDn##X##gesvd(handle, jobu, jobvt, \ m, n, A, lda, S, U, ldu, VT, ldvt, \ Work, Lwork, rwork, devInfo); \ } \ SVD_SPECIALIZE(float , float , S); SVD_SPECIALIZE(double , double, D); SVD_SPECIALIZE(cfloat , float , C); SVD_SPECIALIZE(cdouble, double, Z); template <typename T, typename Tr> void svdInPlace(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in) { dim4 iDims = in.dims(); int M = iDims[0]; int N = iDims[1]; int lwork = 0; CUSOLVER_CHECK(gesvd_buf_func<T>(getDnHandle(), M, N, &lwork)); T *lWorkspace = memAlloc<T >(lwork); Tr *rWorkspace = memAlloc<Tr>(5 * std::min(M, N)); int *info = memAlloc<int>(1); gesvd_func<T, Tr>(getDnHandle(), 'A', 'A', M, N, in.get(), M, s.get(), u.get(), M, vt.get(), N, lWorkspace, lwork, rWorkspace, info); memFree(info); memFree(lWorkspace); memFree(rWorkspace); } template <typename T, typename Tr> void svd(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in) { dim4 iDims = in.dims(); int M = iDims[0]; int N = iDims[1]; if (M >= N) { Array<T> in_copy = copyArray(in); svdInPlace(s, u, vt, in_copy); } else { Array<T> in_trans = transpose(in, true); svdInPlace(s, vt, u, in_trans); transpose_inplace(vt, true); transpose_inplace(u, true); } } } #elif defined(WITH_CPU_LINEAR_ALGEBRA) #include <cpu_lapack/cpu_svd.hpp> namespace cuda { template<typename T, typename Tr> void svd(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in) { return cpu::svd<T, Tr>(s, u, vt, in); } template<typename T, typename Tr> void svdInPlace(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in) { return cpu::svdInPlace<T, Tr>(s, u, vt, in); } } #else namespace cuda { template<typename T, typename Tr> void svd(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in) { AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled", AF_ERR_NOT_CONFIGURED); } template<typename T, typename Tr> void svdInPlace(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in) { AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled", AF_ERR_NOT_CONFIGURED); } } #endif namespace cuda { #define INSTANTIATE(T, Tr) \ template void svd<T, Tr>(Array<Tr> &s, Array<T> &u, Array<T> &vt, const Array<T> &in); \ template void svdInPlace<T, Tr>(Array<Tr> &s, Array<T> &u, Array<T> &vt, Array<T> &in); INSTANTIATE(float, float) INSTANTIATE(double, double) INSTANTIATE(cfloat, float) INSTANTIATE(cdouble, double) }
13708bbfca40795cec3d4351ba4bf6824f1e3287.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pointwise_hist2.cuh" #include "split_properties_helpers.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <cstdlib> namespace NKernel { template <int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCK_SIZE> struct TPointHist { volatile float* Buffer; int BlockId; __forceinline__ __device__ int SliceOffset() { const int warpOffset = 1024 * (threadIdx.x / 32); const int blocks = 4 >> INNER_HIST_BITS_COUNT; const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3))); return warpOffset + innerHistStart; } __device__ TPointHist(float* buff) { const int HIST_SIZE = 32 * BLOCK_SIZE; for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) buff[i] = 0; __syncthreads(); Buffer = buff + SliceOffset(); BlockId = (threadIdx.x / 32) & ((1 << OUTER_HIST_BITS_COUNT) - 1); } __device__ void AddPoint(ui32 ci, const float t, const float w) { const bool flag = threadIdx.x & 1; #pragma unroll for (int i = 0; i < 4; i++) { short f = ((threadIdx.x & 7) + (i << 1)) & 6; short bin = bfe(ci, 24 - (f << 2), 8); short pass = (bin >> (5 + INNER_HIST_BITS_COUNT)) == BlockId; int offset0 = f + flag; int offset1 = f + !flag; const int mask = (1 << INNER_HIST_BITS_COUNT) - 1; const int tmp = (((bin >> INNER_HIST_BITS_COUNT) & 31) << 5) + 8 * (bin & mask); offset0 += tmp; offset1 += tmp; if (INNER_HIST_BITS_COUNT > 0) { #pragma unroll for (int k = 0; k < (1 << INNER_HIST_BITS_COUNT); ++k) { if (((threadIdx.x >> 3) & ((1 << INNER_HIST_BITS_COUNT) - 1)) == k) { Buffer[offset0] += (flag ? t : w) * pass; Buffer[offset1] += (flag ? w : t) * pass; } } } else { Buffer[offset0] += (flag ? t : w) * pass; Buffer[offset1] += (flag ? w : t) * pass; } } } //After reduce we store histograms by blocks: 256 floats (4 x 2 x 32) // for first 32 bins; than 256 floats for second 32 bins, etc __device__ void Reduce() { Buffer -= SliceOffset(); const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT; const int warpCount = BLOCK_SIZE >> 5; const int warpHistCount = warpCount >> OUTER_HIST_BITS_COUNT; const int fold = (threadIdx.x >> 3) & 31; const int mask = (1 << INNER_HIST_BITS_COUNT) - 1; const int binOffset = ((fold >> INNER_HIST_BITS_COUNT) << 5) + 8 * (fold & mask); const int offset = (threadIdx.x & 7) + binOffset; #pragma unroll for (int outerBits = 0; outerBits < 1 << (OUTER_HIST_BITS_COUNT); ++outerBits) { for (int innerBits = 0; innerBits < (1 << (INNER_HIST_BITS_COUNT)); ++innerBits) { float sum = 0.0; const int innerOffset = innerBits << (10 - INNER_HIST_BITS_COUNT); if (threadIdx.x < 256) { #pragma unroll for (int hist = 0; hist < warpHistCount; ++hist) { const int warpOffset = ((hist << OUTER_HIST_BITS_COUNT) + outerBits) * 1024; #pragma unroll for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) { sum += Buffer[offset + warpOffset + innerOffset + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))]; } } } __syncthreads(); if (threadIdx.x < 256) { Buffer[threadIdx.x + 256 * (innerBits | (outerBits << INNER_HIST_BITS_COUNT))] = sum; } } } __syncthreads(); } }; template <int STRIPE_SIZE, int HIST_BLOCK_COUNT, int N, int BLOCKS_PER_FEATURE, typename THist> __forceinline__ __device__ void ComputeHistogram( const ui32* __restrict indices, int dsSize, const float* __restrict target, const float* __restrict weight, const ui32* __restrict cindex, float* result) { indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE; target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE; weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE; dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE, 0); const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE; THist hist(result); if (dsSize) { int i = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32; int iteration_count = (dsSize - i + (stripe - 1)) / stripe; int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N; weight += i; target += i; indices += i; #pragma unroll 4 for (int j = 0; j < blocked_iteration_count; ++j) { ui32 local_index[N]; #pragma unroll for (int k = 0; k < N; k++) { local_index[k] = __ldg(indices + stripe * k); } ui32 local_ci[N]; float local_w[N]; float local_wt[N]; #pragma unroll for (int k = 0; k < N; ++k) { local_ci[k] = __ldg(cindex + local_index[k]); local_w[k] = __ldg(weight + stripe * k); local_wt[k] = __ldg(target + stripe * k); } #pragma unroll for (int k = 0; k < N; ++k) { hist.AddPoint(local_ci[k], local_wt[k], local_w[k]); } i += stripe * N; indices += stripe * N; target += stripe * N; weight += stripe * N; } for (int k = blocked_iteration_count * N; k < iteration_count; ++k) { const int index = __ldg(indices); ui32 ci = __ldg(cindex + index); float w = __ldg(weight); float wt = __ldg(target); hist.AddPoint(ci, wt, w); i += stripe; indices += stripe; target += stripe; weight += stripe; } __syncthreads(); hist.Reduce(); } } template <int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int N, int BLOCKS_PER_FEATURE> __forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict feature, const ui32* __restrict cindex, const float* __restrict target, const float* __restrict weight, const ui32* __restrict indices, const TDataPartition* __restrict partition, int fCount, float* binSumsForPart, float* smem) { using THist = TPointHist < OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE >; const int stripeSize = BLOCK_SIZE >> OUTER_HIST_BITS_COUNT; const int histBlockCount = 1 << OUTER_HIST_BITS_COUNT; ComputeHistogram<stripeSize, histBlockCount, N, BLOCKS_PER_FEATURE, THist >(indices + partition->Offset, partition->Size, target + partition->Offset, weight + partition->Offset, cindex, smem); __syncthreads(); int fid = (threadIdx.x / 64); int fold = (threadIdx.x / 2) & 31; for (int upperBits = 0; upperBits < (1 << (OUTER_HIST_BITS_COUNT + INNER_HIST_BITS_COUNT)); ++upperBits) { const int binOffset = upperBits << 5; if (fid < fCount && fold < min((int)feature[fid].Folds - binOffset, 32)) { int w = threadIdx.x & 1; if (BLOCKS_PER_FEATURE > 1) { atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w, smem[fold * 8 + 2 * fid + w + 256 * upperBits]); } else { binSumsForPart[(feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w] = smem[fold * 8 + 2 * fid + w + 256 * upperBits]; } } } __syncthreads(); } #define DECLARE_PASS(O, I, N, M) \ ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, N, M>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]); template <int BLOCK_SIZE, bool FULL_PASS, int M> #if __CUDA_ARCH__ >= 520 __launch_bounds__(BLOCK_SIZE, 2) #else __launch_bounds__(BLOCK_SIZE, 1) #endif __global__ void ComputeSplitPropertiesNBImpl( const TCFeature* __restrict feature, int fCount, const ui32* __restrict cindex, const float* __restrict target, const float* __restrict weight, int dsSize, const ui32* __restrict indices, const TDataPartition* __restrict partition, float* binSums, const int totalFeatureCount) { TPartOffsetsHelper helper(gridDim.z); if (FULL_PASS) { partition += helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); binSums += helper.GetHistogramOffset(blockIdx.y, blockIdx.z) * 2 * totalFeatureCount; } else { const ui64 leftPartOffset = helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); const ui64 rightPartOffset = helper.GetDataPartitionOffset(gridDim.y | blockIdx.y, blockIdx.z); const int leftPartSize = partition[leftPartOffset].Size; const int rightPartSize = partition[rightPartOffset].Size; partition += (leftPartSize < rightPartSize) ? leftPartOffset : rightPartOffset; binSums += 2 * totalFeatureCount * helper.GetHistogramOffset(gridDim.y | blockIdx.y, blockIdx.z); } feature += (blockIdx.x / M) * 4; cindex += feature->Offset * ((size_t)dsSize); fCount = min(fCount - (blockIdx.x / M) * 4, 4); // __shared__ float counters[32 * BLOCK_SIZE]; const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]); __syncthreads(); if (partition->Size) { if (maxBinCount <= 32) { DECLARE_PASS(0, 0, 8, M); } else if (maxBinCount <= 64) { DECLARE_PASS(0, 1, 4, M); } else if (maxBinCount <= 128) { DECLARE_PASS(0, 2, 4, M); } else { DECLARE_PASS(1, 2, 4, M); } } } template <int BIN_COUNT, int BLOCK_SIZE, bool FULL_PASS, int M> __launch_bounds__(BLOCK_SIZE, 1) __global__ void ComputeSplitPropertiesBImpl( const TCFeature* __restrict feature, int fCount, const ui32* __restrict cindex, const float* __restrict target, const float* __restrict weight, int dsSize, const ui32* __restrict indices, const TDataPartition* __restrict partition, float* __restrict binSums, int totalFeatureCount) { TPartOffsetsHelper helper(gridDim.z); if (FULL_PASS) { partition += helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); binSums += helper.GetHistogramOffset(blockIdx.y, blockIdx.z) * 2 * totalFeatureCount; } else { const ui64 leftPartOffset = helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); const ui64 rightPartOffset = helper.GetDataPartitionOffset(gridDim.y | blockIdx.y, blockIdx.z); const int leftPartSize = partition[leftPartOffset].Size; const int rightPartSize = partition[rightPartOffset].Size; partition += (leftPartSize < rightPartSize) ? leftPartOffset : rightPartOffset; binSums += 2 * totalFeatureCount * helper.GetHistogramOffset(gridDim.y | blockIdx.y, blockIdx.z); } feature += (blockIdx.x / M) * 20; cindex += feature->Offset * ((size_t)dsSize); fCount = min(fCount - (blockIdx.x / M) * 20, 20); __shared__ float counters[BIN_COUNT * BLOCK_SIZE]; if (partition->Size) { ComputeHistogram < BLOCK_SIZE, 1, 8, M, TPointHist<0, 0, BLOCK_SIZE> > (indices + partition->Offset, partition->Size, target + partition->Offset, weight + partition->Offset, cindex, &counters[0]); uchar fold = (threadIdx.x >> 1) & 1; uchar fid = (threadIdx.x >> 2); if (fid < fCount && fold < feature[fid].Folds) { uchar w = threadIdx.x & 1; uchar fMask = 1 << (4 - fid % 5); float sum = 0.f; #pragma uroll for (int i = 0; i < 32; i++) { if (!(i & fMask) || fold) sum += counters[i * 8 + 2 * (fid / 5) + w]; } if (M > 1) { atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, sum); } else { binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = sum; } } } } template <int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT> inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, dsSize, indices, partition, binSums, binFeatureCount ); } else { ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT> << <numBlocks, BLOCK_SIZE, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, dsSize, indices, partition, binSums, binFeatureCount); } } inline ui32 EstimateBlockPerFeatureMultiplier(dim3 numBlocks, ui32 dsSize) { ui32 multiplier = 1; while ((numBlocks.x * numBlocks.y * min(numBlocks.z, 4) * multiplier < TArchProps::SMCount()) && ((dsSize / multiplier) > 15000) && (multiplier < 64)) { multiplier *= 2; } return multiplier; } void ComputeHist2NonBinary(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, ui32 partCount, ui32 foldCount, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream) { if (nbCount) { dim3 numBlocks; numBlocks.x = (nbCount + 3) / 4; const int histPartCount = (fullPass ? partCount : partCount / 2); numBlocks.y = histPartCount; numBlocks.z = foldCount; const int blockSize = 384; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, dsSize), 64); numBlocks.x *= multiplier; if (multiplier == 1) { RunComputeHist2NonBinaryKernel<blockSize, 1>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 2) { RunComputeHist2NonBinaryKernel<blockSize, 2>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 4) { RunComputeHist2NonBinaryKernel<blockSize, 4>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 8) { RunComputeHist2NonBinaryKernel<blockSize, 8>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 16) { RunComputeHist2NonBinaryKernel<blockSize, 16>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 32) { RunComputeHist2NonBinaryKernel<blockSize, 32>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 64) { RunComputeHist2NonBinaryKernel<blockSize, 64>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else { exit(1); } const int scanBlockSize = 256; dim3 scanBlocks; scanBlocks.x = (nbCount * 32 + scanBlockSize - 1) / scanBlockSize; scanBlocks.y = histPartCount; scanBlocks.z = foldCount; const int scanOffset = fullPass ? 0 : ((partCount / 2) * binFeatureCount * 2) * foldCount; hipLaunchKernelGGL(( ScanHistogramsImpl<scanBlockSize, 2>), dim3(scanBlocks), dim3(scanBlockSize), 0, stream, nbFeatures, nbCount, binFeatureCount, binSums + scanOffset); if (!fullPass) { UpdatePointwiseHistograms(binSums, binFeatureCount, partCount, foldCount, 2, partition, stream); } } } template <int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT> void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesBImpl < 32, BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>( bFeatures, bCount, cindex, target, weight, dsSize, indices, partition, binSums, bCount ); } else { ComputeSplitPropertiesBImpl < 32, BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>( bFeatures, bCount, cindex, target, weight, dsSize, indices, partition, binSums, bCount ); } }; void ComputeHist2Binary(const TCFeature* bFeatures, int bCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, ui32 partsCount, ui32 foldCount, float* binSums, bool fullPass, TCudaStream stream) { dim3 numBlocks; numBlocks.x = (bCount + 19) / 20; const int histCount = fullPass ? partsCount : partsCount / 2; numBlocks.y = histCount; numBlocks.z = foldCount; const int blockSize = 384; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, dsSize), 64); numBlocks.x *= multiplier; if (bCount) { if (multiplier == 1) { RunComputeHist2BinaryKernel<blockSize, 1>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 2) { RunComputeHist2BinaryKernel<blockSize, 2>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 4) { RunComputeHist2BinaryKernel<blockSize, 4>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 8) { RunComputeHist2BinaryKernel<blockSize, 8>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 16) { RunComputeHist2BinaryKernel<blockSize, 16>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 32) { RunComputeHist2BinaryKernel<blockSize, 32>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 64) { RunComputeHist2BinaryKernel<blockSize, 64>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else { exit(1); } if (!fullPass) { UpdatePointwiseHistograms(binSums, bCount, partsCount, foldCount, 2, partition, stream); } } } __global__ void UpdateBinsImpl(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size, ui32 loadBit, ui32 foldBits) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { const ui32 idx = LdgWithFallback(docIndices, i); const ui32 bit = (LdgWithFallback(bins, idx) >> loadBit) & 1; dstBins[i] = dstBins[i] | (bit << (loadBit + foldBits)); } } void UpdateFoldBins(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size, ui32 loadBit, ui32 foldBits, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = CeilDivide(size, blockSize); hipLaunchKernelGGL(( UpdateBinsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, dstBins, bins, docIndices, size, loadBit, foldBits); } }
13708bbfca40795cec3d4351ba4bf6824f1e3287.cu
#include "pointwise_hist2.cuh" #include "split_properties_helpers.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <cstdlib> namespace NKernel { template <int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCK_SIZE> struct TPointHist { volatile float* Buffer; int BlockId; __forceinline__ __device__ int SliceOffset() { const int warpOffset = 1024 * (threadIdx.x / 32); const int blocks = 4 >> INNER_HIST_BITS_COUNT; const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3))); return warpOffset + innerHistStart; } __device__ TPointHist(float* buff) { const int HIST_SIZE = 32 * BLOCK_SIZE; for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) buff[i] = 0; __syncthreads(); Buffer = buff + SliceOffset(); BlockId = (threadIdx.x / 32) & ((1 << OUTER_HIST_BITS_COUNT) - 1); } __device__ void AddPoint(ui32 ci, const float t, const float w) { const bool flag = threadIdx.x & 1; #pragma unroll for (int i = 0; i < 4; i++) { short f = ((threadIdx.x & 7) + (i << 1)) & 6; short bin = bfe(ci, 24 - (f << 2), 8); short pass = (bin >> (5 + INNER_HIST_BITS_COUNT)) == BlockId; int offset0 = f + flag; int offset1 = f + !flag; const int mask = (1 << INNER_HIST_BITS_COUNT) - 1; const int tmp = (((bin >> INNER_HIST_BITS_COUNT) & 31) << 5) + 8 * (bin & mask); offset0 += tmp; offset1 += tmp; if (INNER_HIST_BITS_COUNT > 0) { #pragma unroll for (int k = 0; k < (1 << INNER_HIST_BITS_COUNT); ++k) { if (((threadIdx.x >> 3) & ((1 << INNER_HIST_BITS_COUNT) - 1)) == k) { Buffer[offset0] += (flag ? t : w) * pass; Buffer[offset1] += (flag ? w : t) * pass; } } } else { Buffer[offset0] += (flag ? t : w) * pass; Buffer[offset1] += (flag ? w : t) * pass; } } } //After reduce we store histograms by blocks: 256 floats (4 x 2 x 32) // for first 32 bins; than 256 floats for second 32 bins, etc __device__ void Reduce() { Buffer -= SliceOffset(); const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT; const int warpCount = BLOCK_SIZE >> 5; const int warpHistCount = warpCount >> OUTER_HIST_BITS_COUNT; const int fold = (threadIdx.x >> 3) & 31; const int mask = (1 << INNER_HIST_BITS_COUNT) - 1; const int binOffset = ((fold >> INNER_HIST_BITS_COUNT) << 5) + 8 * (fold & mask); const int offset = (threadIdx.x & 7) + binOffset; #pragma unroll for (int outerBits = 0; outerBits < 1 << (OUTER_HIST_BITS_COUNT); ++outerBits) { for (int innerBits = 0; innerBits < (1 << (INNER_HIST_BITS_COUNT)); ++innerBits) { float sum = 0.0; const int innerOffset = innerBits << (10 - INNER_HIST_BITS_COUNT); if (threadIdx.x < 256) { #pragma unroll for (int hist = 0; hist < warpHistCount; ++hist) { const int warpOffset = ((hist << OUTER_HIST_BITS_COUNT) + outerBits) * 1024; #pragma unroll for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) { sum += Buffer[offset + warpOffset + innerOffset + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))]; } } } __syncthreads(); if (threadIdx.x < 256) { Buffer[threadIdx.x + 256 * (innerBits | (outerBits << INNER_HIST_BITS_COUNT))] = sum; } } } __syncthreads(); } }; template <int STRIPE_SIZE, int HIST_BLOCK_COUNT, int N, int BLOCKS_PER_FEATURE, typename THist> __forceinline__ __device__ void ComputeHistogram( const ui32* __restrict indices, int dsSize, const float* __restrict target, const float* __restrict weight, const ui32* __restrict cindex, float* result) { indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE; target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE; weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE; dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE, 0); const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE; THist hist(result); if (dsSize) { int i = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32; int iteration_count = (dsSize - i + (stripe - 1)) / stripe; int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N; weight += i; target += i; indices += i; #pragma unroll 4 for (int j = 0; j < blocked_iteration_count; ++j) { ui32 local_index[N]; #pragma unroll for (int k = 0; k < N; k++) { local_index[k] = __ldg(indices + stripe * k); } ui32 local_ci[N]; float local_w[N]; float local_wt[N]; #pragma unroll for (int k = 0; k < N; ++k) { local_ci[k] = __ldg(cindex + local_index[k]); local_w[k] = __ldg(weight + stripe * k); local_wt[k] = __ldg(target + stripe * k); } #pragma unroll for (int k = 0; k < N; ++k) { hist.AddPoint(local_ci[k], local_wt[k], local_w[k]); } i += stripe * N; indices += stripe * N; target += stripe * N; weight += stripe * N; } for (int k = blocked_iteration_count * N; k < iteration_count; ++k) { const int index = __ldg(indices); ui32 ci = __ldg(cindex + index); float w = __ldg(weight); float wt = __ldg(target); hist.AddPoint(ci, wt, w); i += stripe; indices += stripe; target += stripe; weight += stripe; } __syncthreads(); hist.Reduce(); } } template <int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int N, int BLOCKS_PER_FEATURE> __forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict feature, const ui32* __restrict cindex, const float* __restrict target, const float* __restrict weight, const ui32* __restrict indices, const TDataPartition* __restrict partition, int fCount, float* binSumsForPart, float* smem) { using THist = TPointHist < OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE >; const int stripeSize = BLOCK_SIZE >> OUTER_HIST_BITS_COUNT; const int histBlockCount = 1 << OUTER_HIST_BITS_COUNT; ComputeHistogram<stripeSize, histBlockCount, N, BLOCKS_PER_FEATURE, THist >(indices + partition->Offset, partition->Size, target + partition->Offset, weight + partition->Offset, cindex, smem); __syncthreads(); int fid = (threadIdx.x / 64); int fold = (threadIdx.x / 2) & 31; for (int upperBits = 0; upperBits < (1 << (OUTER_HIST_BITS_COUNT + INNER_HIST_BITS_COUNT)); ++upperBits) { const int binOffset = upperBits << 5; if (fid < fCount && fold < min((int)feature[fid].Folds - binOffset, 32)) { int w = threadIdx.x & 1; if (BLOCKS_PER_FEATURE > 1) { atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w, smem[fold * 8 + 2 * fid + w + 256 * upperBits]); } else { binSumsForPart[(feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w] = smem[fold * 8 + 2 * fid + w + 256 * upperBits]; } } } __syncthreads(); } #define DECLARE_PASS(O, I, N, M) \ ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, N, M>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]); template <int BLOCK_SIZE, bool FULL_PASS, int M> #if __CUDA_ARCH__ >= 520 __launch_bounds__(BLOCK_SIZE, 2) #else __launch_bounds__(BLOCK_SIZE, 1) #endif __global__ void ComputeSplitPropertiesNBImpl( const TCFeature* __restrict feature, int fCount, const ui32* __restrict cindex, const float* __restrict target, const float* __restrict weight, int dsSize, const ui32* __restrict indices, const TDataPartition* __restrict partition, float* binSums, const int totalFeatureCount) { TPartOffsetsHelper helper(gridDim.z); if (FULL_PASS) { partition += helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); binSums += helper.GetHistogramOffset(blockIdx.y, blockIdx.z) * 2 * totalFeatureCount; } else { const ui64 leftPartOffset = helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); const ui64 rightPartOffset = helper.GetDataPartitionOffset(gridDim.y | blockIdx.y, blockIdx.z); const int leftPartSize = partition[leftPartOffset].Size; const int rightPartSize = partition[rightPartOffset].Size; partition += (leftPartSize < rightPartSize) ? leftPartOffset : rightPartOffset; binSums += 2 * totalFeatureCount * helper.GetHistogramOffset(gridDim.y | blockIdx.y, blockIdx.z); } feature += (blockIdx.x / M) * 4; cindex += feature->Offset * ((size_t)dsSize); fCount = min(fCount - (blockIdx.x / M) * 4, 4); // __shared__ float counters[32 * BLOCK_SIZE]; const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]); __syncthreads(); if (partition->Size) { if (maxBinCount <= 32) { DECLARE_PASS(0, 0, 8, M); } else if (maxBinCount <= 64) { DECLARE_PASS(0, 1, 4, M); } else if (maxBinCount <= 128) { DECLARE_PASS(0, 2, 4, M); } else { DECLARE_PASS(1, 2, 4, M); } } } template <int BIN_COUNT, int BLOCK_SIZE, bool FULL_PASS, int M> __launch_bounds__(BLOCK_SIZE, 1) __global__ void ComputeSplitPropertiesBImpl( const TCFeature* __restrict feature, int fCount, const ui32* __restrict cindex, const float* __restrict target, const float* __restrict weight, int dsSize, const ui32* __restrict indices, const TDataPartition* __restrict partition, float* __restrict binSums, int totalFeatureCount) { TPartOffsetsHelper helper(gridDim.z); if (FULL_PASS) { partition += helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); binSums += helper.GetHistogramOffset(blockIdx.y, blockIdx.z) * 2 * totalFeatureCount; } else { const ui64 leftPartOffset = helper.GetDataPartitionOffset(blockIdx.y, blockIdx.z); const ui64 rightPartOffset = helper.GetDataPartitionOffset(gridDim.y | blockIdx.y, blockIdx.z); const int leftPartSize = partition[leftPartOffset].Size; const int rightPartSize = partition[rightPartOffset].Size; partition += (leftPartSize < rightPartSize) ? leftPartOffset : rightPartOffset; binSums += 2 * totalFeatureCount * helper.GetHistogramOffset(gridDim.y | blockIdx.y, blockIdx.z); } feature += (blockIdx.x / M) * 20; cindex += feature->Offset * ((size_t)dsSize); fCount = min(fCount - (blockIdx.x / M) * 20, 20); __shared__ float counters[BIN_COUNT * BLOCK_SIZE]; if (partition->Size) { ComputeHistogram < BLOCK_SIZE, 1, 8, M, TPointHist<0, 0, BLOCK_SIZE> > (indices + partition->Offset, partition->Size, target + partition->Offset, weight + partition->Offset, cindex, &counters[0]); uchar fold = (threadIdx.x >> 1) & 1; uchar fid = (threadIdx.x >> 2); if (fid < fCount && fold < feature[fid].Folds) { uchar w = threadIdx.x & 1; uchar fMask = 1 << (4 - fid % 5); float sum = 0.f; #pragma uroll for (int i = 0; i < 32; i++) { if (!(i & fMask) || fold) sum += counters[i * 8 + 2 * (fid / 5) + w]; } if (M > 1) { atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, sum); } else { binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = sum; } } } } template <int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT> inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, dsSize, indices, partition, binSums, binFeatureCount ); } else { ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT> << <numBlocks, BLOCK_SIZE, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, dsSize, indices, partition, binSums, binFeatureCount); } } inline ui32 EstimateBlockPerFeatureMultiplier(dim3 numBlocks, ui32 dsSize) { ui32 multiplier = 1; while ((numBlocks.x * numBlocks.y * min(numBlocks.z, 4) * multiplier < TArchProps::SMCount()) && ((dsSize / multiplier) > 15000) && (multiplier < 64)) { multiplier *= 2; } return multiplier; } void ComputeHist2NonBinary(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, ui32 partCount, ui32 foldCount, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream) { if (nbCount) { dim3 numBlocks; numBlocks.x = (nbCount + 3) / 4; const int histPartCount = (fullPass ? partCount : partCount / 2); numBlocks.y = histPartCount; numBlocks.z = foldCount; const int blockSize = 384; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, dsSize), 64); numBlocks.x *= multiplier; if (multiplier == 1) { RunComputeHist2NonBinaryKernel<blockSize, 1>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 2) { RunComputeHist2NonBinaryKernel<blockSize, 2>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 4) { RunComputeHist2NonBinaryKernel<blockSize, 4>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 8) { RunComputeHist2NonBinaryKernel<blockSize, 8>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 16) { RunComputeHist2NonBinaryKernel<blockSize, 16>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 32) { RunComputeHist2NonBinaryKernel<blockSize, 32>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else if (multiplier == 64) { RunComputeHist2NonBinaryKernel<blockSize, 64>(nbFeatures, nbCount, cindex, dsSize, target, weight, indices, partition, binSums, binFeatureCount, fullPass, stream, numBlocks); } else { exit(1); } const int scanBlockSize = 256; dim3 scanBlocks; scanBlocks.x = (nbCount * 32 + scanBlockSize - 1) / scanBlockSize; scanBlocks.y = histPartCount; scanBlocks.z = foldCount; const int scanOffset = fullPass ? 0 : ((partCount / 2) * binFeatureCount * 2) * foldCount; ScanHistogramsImpl<scanBlockSize, 2><<<scanBlocks, scanBlockSize, 0, stream>>>(nbFeatures, nbCount, binFeatureCount, binSums + scanOffset); if (!fullPass) { UpdatePointwiseHistograms(binSums, binFeatureCount, partCount, foldCount, 2, partition, stream); } } } template <int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT> void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesBImpl < 32, BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>( bFeatures, bCount, cindex, target, weight, dsSize, indices, partition, binSums, bCount ); } else { ComputeSplitPropertiesBImpl < 32, BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>( bFeatures, bCount, cindex, target, weight, dsSize, indices, partition, binSums, bCount ); } }; void ComputeHist2Binary(const TCFeature* bFeatures, int bCount, const ui32* cindex, int dsSize, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, ui32 partsCount, ui32 foldCount, float* binSums, bool fullPass, TCudaStream stream) { dim3 numBlocks; numBlocks.x = (bCount + 19) / 20; const int histCount = fullPass ? partsCount : partsCount / 2; numBlocks.y = histCount; numBlocks.z = foldCount; const int blockSize = 384; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, dsSize), 64); numBlocks.x *= multiplier; if (bCount) { if (multiplier == 1) { RunComputeHist2BinaryKernel<blockSize, 1>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 2) { RunComputeHist2BinaryKernel<blockSize, 2>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 4) { RunComputeHist2BinaryKernel<blockSize, 4>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 8) { RunComputeHist2BinaryKernel<blockSize, 8>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 16) { RunComputeHist2BinaryKernel<blockSize, 16>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 32) { RunComputeHist2BinaryKernel<blockSize, 32>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else if (multiplier == 64) { RunComputeHist2BinaryKernel<blockSize, 64>(bFeatures, bCount, cindex, dsSize, target, weight, indices, partition, binSums, fullPass, stream, numBlocks); } else { exit(1); } if (!fullPass) { UpdatePointwiseHistograms(binSums, bCount, partsCount, foldCount, 2, partition, stream); } } } __global__ void UpdateBinsImpl(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size, ui32 loadBit, ui32 foldBits) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { const ui32 idx = LdgWithFallback(docIndices, i); const ui32 bit = (LdgWithFallback(bins, idx) >> loadBit) & 1; dstBins[i] = dstBins[i] | (bit << (loadBit + foldBits)); } } void UpdateFoldBins(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size, ui32 loadBit, ui32 foldBits, TCudaStream stream) { const ui32 blockSize = 256; const ui32 numBlocks = CeilDivide(size, blockSize); UpdateBinsImpl<<<numBlocks, blockSize, 0, stream>>>(dstBins, bins, docIndices, size, loadBit, foldBits); } }
2b75284c55d77275f936684cef0e6c9b3c14ad10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "harmonicRepulsion.cuh" /*! \file harmonicRepulsion.cu \addtogroup forceKernels @{ */ #include "functions.h" /*! calculate the force per particle given harmonic repulsions */ __global__ void gpu_harmonic_repulsion_kernel(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,int *particleType,scalar *d_radii,scalar *d_params,Index2D neighborIndexer,Index2D particleTypeIndexer,int N,bool zeroForce) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; int neighs = d_neighborsPerParticle[idx]; if(zeroForce) d_force[idx] = make_dVec(0.0); for (int nn = 0; nn < neighs; ++nn) { int nIdx = neighborIndexer(nn,idx); int p2 = d_neighbors[nIdx]; dVec relativeDistance = d_neighborVectors[nIdx]; //get parameters scalar K = d_params[particleTypeIndexer(particleType[p2],particleType[idx])]; scalar sigma0 = d_radii[idx]+d_radii[p2]; //compute force scalar dnorm = norm(relativeDistance); if (dnorm <= sigma0) d_force[idx] += K*(1.0/sigma0)*(1.0-dnorm/sigma0)*(1.0/dnorm)*relativeDistance; }; }; __global__ void gpu_harmonic_repulsion_monodisperse_kernel(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,Index2D neighborIndexer,int N,bool zeroForce) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; int neighs = d_neighborsPerParticle[idx]; if(zeroForce) d_force[idx] = make_dVec(0.0); for (int nn = 0; nn < neighs; ++nn) { int nIdx = neighborIndexer(nn,idx); dVec relativeDistance = d_neighborVectors[nIdx]; //compute force scalar dnorm = norm(relativeDistance); if (dnorm <= 1.0) d_force[idx] += 1.0*(1.0-dnorm)*(1.0/dnorm)*relativeDistance; }; }; __global__ void gpu_harmonic_repulsion_allPairs_kernel(dVec *d_force,dVec *d_pos,int *particleType, scalar *d_radii,scalar *d_params,Index2D particleTypeIndexer,periodicBoundaryConditions Box, int N,bool zeroForce) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; dVec disp; scalar dnorm; scalar r1 = d_radii[idx]; if(zeroForce) d_force[idx] = make_dVec(0.0); for (int nn = 0; nn < N; ++nn) { if(nn == idx) continue; Box.minDist(d_pos[idx],d_pos[nn],disp); dnorm = norm(disp); scalar sigma0 = r1 + d_radii[nn]; if(dnorm < sigma0) { scalar K = d_params[particleTypeIndexer(particleType[nn],particleType[idx])]; d_force[idx] += K*(1.0/sigma0)*(1.0-dnorm/sigma0)*(1.0/dnorm)*disp; }; }; }; /*! Calculate harmonic repulsion forces, launching one thread per particle */ bool gpu_harmonic_repulsion_monodisperse_calculation(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,Index2D neighborIndexer,int N,bool zeroForce) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_harmonic_repulsion_monodisperse_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_force, d_neighborsPerParticle, d_neighbors, d_neighborVectors, neighborIndexer, N, zeroForce); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /*! Calculate harmonic repulsion forces, launching one thread per particle */ bool gpu_harmonic_repulsion_calculation(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,int *particleType,scalar *d_radii,scalar *d_params,Index2D neighborIndexer,Index2D particleTypeIndexer,int N,bool zeroForce) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_harmonic_repulsion_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_force, d_neighborsPerParticle, d_neighbors, d_neighborVectors, particleType, d_radii, d_params, neighborIndexer, particleTypeIndexer, N, zeroForce); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /*! Calculate harmonic repulsion forces, launching one thread per particle, by brute force */ bool gpu_harmonic_repulsion_allPairs(dVec *d_force,dVec *d_pos,int *particleType, scalar *d_radii,scalar *d_params,Index2D particleTypeIndexer,periodicBoundaryConditions &Box, int N, bool zeroForce) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_harmonic_repulsion_allPairs_kernel), dim3(nblocks),dim3(block_size), 0, 0, d_force, d_pos, particleType, d_radii, d_params, particleTypeIndexer, Box, N, zeroForce); HANDLE_ERROR(hipGetLastError()); return hipSuccess; }; /** @} */ //end of group declaration
2b75284c55d77275f936684cef0e6c9b3c14ad10.cu
#include "harmonicRepulsion.cuh" /*! \file harmonicRepulsion.cu \addtogroup forceKernels @{ */ #include "functions.h" /*! calculate the force per particle given harmonic repulsions */ __global__ void gpu_harmonic_repulsion_kernel(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,int *particleType,scalar *d_radii,scalar *d_params,Index2D neighborIndexer,Index2D particleTypeIndexer,int N,bool zeroForce) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; int neighs = d_neighborsPerParticle[idx]; if(zeroForce) d_force[idx] = make_dVec(0.0); for (int nn = 0; nn < neighs; ++nn) { int nIdx = neighborIndexer(nn,idx); int p2 = d_neighbors[nIdx]; dVec relativeDistance = d_neighborVectors[nIdx]; //get parameters scalar K = d_params[particleTypeIndexer(particleType[p2],particleType[idx])]; scalar sigma0 = d_radii[idx]+d_radii[p2]; //compute force scalar dnorm = norm(relativeDistance); if (dnorm <= sigma0) d_force[idx] += K*(1.0/sigma0)*(1.0-dnorm/sigma0)*(1.0/dnorm)*relativeDistance; }; }; __global__ void gpu_harmonic_repulsion_monodisperse_kernel(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,Index2D neighborIndexer,int N,bool zeroForce) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; int neighs = d_neighborsPerParticle[idx]; if(zeroForce) d_force[idx] = make_dVec(0.0); for (int nn = 0; nn < neighs; ++nn) { int nIdx = neighborIndexer(nn,idx); dVec relativeDistance = d_neighborVectors[nIdx]; //compute force scalar dnorm = norm(relativeDistance); if (dnorm <= 1.0) d_force[idx] += 1.0*(1.0-dnorm)*(1.0/dnorm)*relativeDistance; }; }; __global__ void gpu_harmonic_repulsion_allPairs_kernel(dVec *d_force,dVec *d_pos,int *particleType, scalar *d_radii,scalar *d_params,Index2D particleTypeIndexer,periodicBoundaryConditions Box, int N,bool zeroForce) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; dVec disp; scalar dnorm; scalar r1 = d_radii[idx]; if(zeroForce) d_force[idx] = make_dVec(0.0); for (int nn = 0; nn < N; ++nn) { if(nn == idx) continue; Box.minDist(d_pos[idx],d_pos[nn],disp); dnorm = norm(disp); scalar sigma0 = r1 + d_radii[nn]; if(dnorm < sigma0) { scalar K = d_params[particleTypeIndexer(particleType[nn],particleType[idx])]; d_force[idx] += K*(1.0/sigma0)*(1.0-dnorm/sigma0)*(1.0/dnorm)*disp; }; }; }; /*! Calculate harmonic repulsion forces, launching one thread per particle */ bool gpu_harmonic_repulsion_monodisperse_calculation(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,Index2D neighborIndexer,int N,bool zeroForce) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; gpu_harmonic_repulsion_monodisperse_kernel<<<nblocks,block_size>>>(d_force, d_neighborsPerParticle, d_neighbors, d_neighborVectors, neighborIndexer, N, zeroForce); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /*! Calculate harmonic repulsion forces, launching one thread per particle */ bool gpu_harmonic_repulsion_calculation(dVec *d_force,unsigned int *d_neighborsPerParticle,int *d_neighbors,dVec *d_neighborVectors,int *particleType,scalar *d_radii,scalar *d_params,Index2D neighborIndexer,Index2D particleTypeIndexer,int N,bool zeroForce) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; gpu_harmonic_repulsion_kernel<<<nblocks,block_size>>>(d_force, d_neighborsPerParticle, d_neighbors, d_neighborVectors, particleType, d_radii, d_params, neighborIndexer, particleTypeIndexer, N, zeroForce); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /*! Calculate harmonic repulsion forces, launching one thread per particle, by brute force */ bool gpu_harmonic_repulsion_allPairs(dVec *d_force,dVec *d_pos,int *particleType, scalar *d_radii,scalar *d_params,Index2D particleTypeIndexer,periodicBoundaryConditions &Box, int N, bool zeroForce) { unsigned int block_size = 128; if (N < 128) block_size = 32; unsigned int nblocks = N/block_size + 1; gpu_harmonic_repulsion_allPairs_kernel<<<nblocks,block_size>>>(d_force, d_pos, particleType, d_radii, d_params, particleTypeIndexer, Box, N, zeroForce); HANDLE_ERROR(cudaGetLastError()); return cudaSuccess; }; /** @} */ //end of group declaration
ba2064f8a6c321806fcc0b4886921bc46cd019ee.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpuSD.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *vectsA = NULL; hipMalloc(&vectsA, XSIZE*YSIZE); size_t na = 1; const float *vectsB = NULL; hipMalloc(&vectsB, XSIZE*YSIZE); size_t nb = 1; size_t dim = 2; const float *means = NULL; hipMalloc(&means, XSIZE*YSIZE); const float *numPairs = NULL; hipMalloc(&numPairs, XSIZE*YSIZE); float *sds = NULL; hipMalloc(&sds, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpuSD), dim3(gridBlock),dim3(threadBlock), 0, 0, vectsA,na,vectsB,nb,dim,means,numPairs,sds); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpuSD), dim3(gridBlock),dim3(threadBlock), 0, 0, vectsA,na,vectsB,nb,dim,means,numPairs,sds); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpuSD), dim3(gridBlock),dim3(threadBlock), 0, 0, vectsA,na,vectsB,nb,dim,means,numPairs,sds); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ba2064f8a6c321806fcc0b4886921bc46cd019ee.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpuSD.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *vectsA = NULL; cudaMalloc(&vectsA, XSIZE*YSIZE); size_t na = 1; const float *vectsB = NULL; cudaMalloc(&vectsB, XSIZE*YSIZE); size_t nb = 1; size_t dim = 2; const float *means = NULL; cudaMalloc(&means, XSIZE*YSIZE); const float *numPairs = NULL; cudaMalloc(&numPairs, XSIZE*YSIZE); float *sds = NULL; cudaMalloc(&sds, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpuSD<<<gridBlock,threadBlock>>>(vectsA,na,vectsB,nb,dim,means,numPairs,sds); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpuSD<<<gridBlock,threadBlock>>>(vectsA,na,vectsB,nb,dim,means,numPairs,sds); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpuSD<<<gridBlock,threadBlock>>>(vectsA,na,vectsB,nb,dim,means,numPairs,sds); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1b4625b086ab7f952f6140641de372f9231e21d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_poisson_kernel_error; int xdim0_poisson_kernel_error_h = -1; int ydim0_poisson_kernel_error_h = -1; __constant__ int xdim1_poisson_kernel_error; int xdim1_poisson_kernel_error_h = -1; int ydim1_poisson_kernel_error_h = -1; #define OPS_ACC0(x,y) (x+xdim0_poisson_kernel_error*(y)) #define OPS_ACC1(x,y) (x+xdim1_poisson_kernel_error*(y)) //user function __device__ void poisson_kernel_error(const double *u, const double *ref, double *err) { *err = *err + (u[OPS_ACC0(0,0)]-ref[OPS_ACC1(0,0)])*(u[OPS_ACC0(0,0)]-ref[OPS_ACC1(0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_poisson_kernel_error( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, int size0, int size1 ){ double arg2_l[1]; for (int d=0; d<1; d++) arg2_l[d] = ZERO_double; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_poisson_kernel_error; arg1 += idx_x * 1 + idx_y * 1 * xdim1_poisson_kernel_error; if (idx_x < size0 && idx_y < size1) { poisson_kernel_error(arg0, arg1, arg2_l); } for (int d=0; d<1; d++) ops_reduction_cuda<OPS_INC>(&arg2[d+blockIdx.x + blockIdx.y*gridDim.x],arg2_l[d]); } // host stub function void ops_par_loop_poisson_kernel_error(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(4,"poisson_kernel_error"); OPS_kernels[4].count++; //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_poisson_kernel_error_h || xdim1 != xdim1_poisson_kernel_error_h) { hipMemcpyToSymbol( xdim0_poisson_kernel_error, &xdim0, sizeof(int) ); xdim0_poisson_kernel_error_h = xdim0; hipMemcpyToSymbol( xdim1_poisson_kernel_error, &xdim1, sizeof(int) ); xdim1_poisson_kernel_error_h = xdim1; } #ifdef OPS_MPI double *arg2h = (double *)(((ops_reduction)args[2].data)->data + ((ops_reduction)args[2].data)->size * block->index); #else //OPS_MPI double *arg2h = (double *)(((ops_reduction)args[2].data)->data); #endif //OPS_MPI dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1); int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)*1); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg2.data = OPS_reduct_h + reduct_bytes; arg2.data_d = OPS_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg2.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[4].mpi_time += t1-t2; int nshared = 0; int nthread = OPS_block_size_x*OPS_block_size_y; nshared = MAX(nshared,sizeof(double)*1); nshared = MAX(nshared*nthread,reduct_size*nthread); //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_poisson_kernel_error), dim3(grid), dim3(tblock), nshared , 0, (double *)p_a[0], (double *)p_a[1], (double *)arg2.data_d,x_size, y_size); mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg2h[d] = arg2h[d] + ((double *)arg2.data)[d+b*1]; } } arg2.data = (char *)arg2h; if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[4].time += t2-t1; ops_set_dirtybit_device(args, 3); //Update kernel record OPS_kernels[4].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[4].transfer += ops_compute_transfer(dim, range, &arg1); }
1b4625b086ab7f952f6140641de372f9231e21d6.cu
// // auto-generated by ops.py // __constant__ int xdim0_poisson_kernel_error; int xdim0_poisson_kernel_error_h = -1; int ydim0_poisson_kernel_error_h = -1; __constant__ int xdim1_poisson_kernel_error; int xdim1_poisson_kernel_error_h = -1; int ydim1_poisson_kernel_error_h = -1; #define OPS_ACC0(x,y) (x+xdim0_poisson_kernel_error*(y)) #define OPS_ACC1(x,y) (x+xdim1_poisson_kernel_error*(y)) //user function __device__ void poisson_kernel_error(const double *u, const double *ref, double *err) { *err = *err + (u[OPS_ACC0(0,0)]-ref[OPS_ACC1(0,0)])*(u[OPS_ACC0(0,0)]-ref[OPS_ACC1(0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_poisson_kernel_error( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, int size0, int size1 ){ double arg2_l[1]; for (int d=0; d<1; d++) arg2_l[d] = ZERO_double; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_poisson_kernel_error; arg1 += idx_x * 1 + idx_y * 1 * xdim1_poisson_kernel_error; if (idx_x < size0 && idx_y < size1) { poisson_kernel_error(arg0, arg1, arg2_l); } for (int d=0; d<1; d++) ops_reduction_cuda<OPS_INC>(&arg2[d+blockIdx.x + blockIdx.y*gridDim.x],arg2_l[d]); } // host stub function void ops_par_loop_poisson_kernel_error(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(4,"poisson_kernel_error"); OPS_kernels[4].count++; //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_poisson_kernel_error_h || xdim1 != xdim1_poisson_kernel_error_h) { cudaMemcpyToSymbol( xdim0_poisson_kernel_error, &xdim0, sizeof(int) ); xdim0_poisson_kernel_error_h = xdim0; cudaMemcpyToSymbol( xdim1_poisson_kernel_error, &xdim1, sizeof(int) ); xdim1_poisson_kernel_error_h = xdim1; } #ifdef OPS_MPI double *arg2h = (double *)(((ops_reduction)args[2].data)->data + ((ops_reduction)args[2].data)->size * block->index); #else //OPS_MPI double *arg2h = (double *)(((ops_reduction)args[2].data)->data); #endif //OPS_MPI dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1); int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)*1); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg2.data = OPS_reduct_h + reduct_bytes; arg2.data_d = OPS_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg2.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[4].mpi_time += t1-t2; int nshared = 0; int nthread = OPS_block_size_x*OPS_block_size_y; nshared = MAX(nshared,sizeof(double)*1); nshared = MAX(nshared*nthread,reduct_size*nthread); //call kernel wrapper function, passing in pointers to data ops_poisson_kernel_error<<<grid, tblock, nshared >>> ( (double *)p_a[0], (double *)p_a[1], (double *)arg2.data_d,x_size, y_size); mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<1; d++ ){ arg2h[d] = arg2h[d] + ((double *)arg2.data)[d+b*1]; } } arg2.data = (char *)arg2h; if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[4].time += t2-t1; ops_set_dirtybit_device(args, 3); //Update kernel record OPS_kernels[4].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[4].transfer += ops_compute_transfer(dim, range, &arg1); }
339bbea2a3537525dbd5e59a05ebc3e13ba9dadd.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "matrix/math.h" #include "random/rng.h" #include "stats/mean.h" #include "stats/stddev.h" #include "test_utils.h" namespace MLCommon { namespace Stats { template <typename T> struct StdDevInputs { T tolerance, mean, stddev; int rows, cols; bool sample, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const StdDevInputs<T> &dims) { return os; } template <typename T> class StdDevTest : public ::testing::TestWithParam<StdDevInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<StdDevInputs<T>>::GetParam(); Random::Rng r(params.seed); int rows = params.rows, cols = params.cols; int len = rows * cols; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); allocate(data, len); allocate(mean_act, cols); allocate(stddev_act, cols); allocate(vars_act, cols); r.normal(data, len, params.mean, params.stddev, stream); stdVarSGtest(data, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void stdVarSGtest(T *data, hipStream_t stream) { int rows = params.rows, cols = params.cols; mean(mean_act, data, cols, rows, params.sample, params.rowMajor, stream); stddev(stddev_act, data, mean_act, cols, rows, params.sample, params.rowMajor, stream); vars(vars_act, data, mean_act, cols, rows, params.sample, params.rowMajor, stream); Matrix::seqRoot(vars_act, T(1), cols, stream); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(mean_act)); CUDA_CHECK(hipFree(stddev_act)); CUDA_CHECK(hipFree(vars_act)); } protected: StdDevInputs<T> params; T *data, *mean_act, *stddev_act, *vars_act; }; const std::vector<StdDevInputs<float>> inputsf = { {0.1f, 1.f, 2.f, 1024, 32, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 64, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 128, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 256, true, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 32, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 64, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 128, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 256, false, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 32, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 64, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 128, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 256, true, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 32, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 64, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 128, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}}; const std::vector<StdDevInputs<double>> inputsd = { {0.1, 1.0, 2.0, 1024, 32, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 64, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 128, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 256, true, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 32, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 64, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 128, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 256, false, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 32, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 64, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 128, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 256, true, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 32, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 64, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 128, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 256, false, true, 1234ULL}}; typedef StdDevTest<float> StdDevTestF; TEST_P(StdDevTestF, Result) { ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols, CompareApprox<float>(params.tolerance))); } typedef StdDevTest<double> StdDevTestD; TEST_P(StdDevTestD, Result) { ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
339bbea2a3537525dbd5e59a05ebc3e13ba9dadd.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "matrix/math.h" #include "random/rng.h" #include "stats/mean.h" #include "stats/stddev.h" #include "test_utils.h" namespace MLCommon { namespace Stats { template <typename T> struct StdDevInputs { T tolerance, mean, stddev; int rows, cols; bool sample, rowMajor; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const StdDevInputs<T> &dims) { return os; } template <typename T> class StdDevTest : public ::testing::TestWithParam<StdDevInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<StdDevInputs<T>>::GetParam(); Random::Rng r(params.seed); int rows = params.rows, cols = params.cols; int len = rows * cols; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); allocate(data, len); allocate(mean_act, cols); allocate(stddev_act, cols); allocate(vars_act, cols); r.normal(data, len, params.mean, params.stddev, stream); stdVarSGtest(data, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void stdVarSGtest(T *data, cudaStream_t stream) { int rows = params.rows, cols = params.cols; mean(mean_act, data, cols, rows, params.sample, params.rowMajor, stream); stddev(stddev_act, data, mean_act, cols, rows, params.sample, params.rowMajor, stream); vars(vars_act, data, mean_act, cols, rows, params.sample, params.rowMajor, stream); Matrix::seqRoot(vars_act, T(1), cols, stream); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(mean_act)); CUDA_CHECK(cudaFree(stddev_act)); CUDA_CHECK(cudaFree(vars_act)); } protected: StdDevInputs<T> params; T *data, *mean_act, *stddev_act, *vars_act; }; const std::vector<StdDevInputs<float>> inputsf = { {0.1f, 1.f, 2.f, 1024, 32, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 64, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 128, true, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 256, true, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 32, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 64, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 128, false, false, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 256, false, false, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 32, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 64, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 128, true, true, 1234ULL}, {0.1f, 1.f, 2.f, 1024, 256, true, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 32, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 64, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 128, false, true, 1234ULL}, {0.1f, -1.f, 2.f, 1024, 256, false, true, 1234ULL}}; const std::vector<StdDevInputs<double>> inputsd = { {0.1, 1.0, 2.0, 1024, 32, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 64, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 128, true, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 256, true, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 32, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 64, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 128, false, false, 1234ULL}, {0.1, -1.0, 2.0, 1024, 256, false, false, 1234ULL}, {0.1, 1.0, 2.0, 1024, 32, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 64, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 128, true, true, 1234ULL}, {0.1, 1.0, 2.0, 1024, 256, true, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 32, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 64, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 128, false, true, 1234ULL}, {0.1, -1.0, 2.0, 1024, 256, false, true, 1234ULL}}; typedef StdDevTest<float> StdDevTestF; TEST_P(StdDevTestF, Result) { ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols, CompareApprox<float>(params.tolerance))); } typedef StdDevTest<double> StdDevTestD; TEST_P(StdDevTestD, Result) { ASSERT_TRUE(devArrMatch(params.stddev, stddev_act, params.cols, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(stddev_act, vars_act, params.cols, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(StdDevTests, StdDevTestD, ::testing::ValuesIn(inputsd)); } // end namespace Stats } // end namespace MLCommon
35a52c4b3c0a06e5d8087476846c3d109fb2d05f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <set> #include <vector> #include <assert.h> #include <rocblas.h> #include <cutil_inline.h> #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> #include <typeinfo> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <map> #include "common/logging.h" using namespace std; /* * Device random number generator pointers. */ //map<int,hiprandGenerator_t> NVMatrix::rndGen; map<int,hiprandState_t*> NVMatrix::rndDevStates; pthread_mutex_t* NVMatrix::_rndMutex = makeMutex(); pthread_mutex_t* NVMatrix::makeMutex() { pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(m, NULL); return m; } void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) { _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = true; _isTrans = isTrans; _devData = NULL; _maxElements = _numElements; if (_numElements > 0) { hipblasAlloc(_numElements, sizeof(float), (void**) &_devData); checkCublasError("!!!! device memory allocation error\n"); // Log_Info("Allocated to %p", _devData); } _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::NVMatrix() { _init(0, 0, -1, false); } NVMatrix::NVMatrix(bool isTrans) { _init(0, 0, -1, isTrans); } NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) { _init(numRows, numCols, -1, isTrans); } NVMatrix::NVMatrix(const Matrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { copyFromHost(like); } } NVMatrix::NVMatrix(const NVMatrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { like.copy(*this); } } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const NVMatrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const Matrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, false); } NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) : _numRows(numRows), _numCols(numCols), _numElements(numRows*numCols), _ownsData(false), _devData(devData), _isTrans(isTrans) { _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::~NVMatrix() { if(_ownsData && _numElements > 0) { // Log_Info("Freeing: %p", _devData); cublasStatus status = hipblasFree(_devData); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error\n"); abort(); } } } void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) { if (resizeDeviceMatrix) { resize(hostMatrix); } copyFromHost(hostMatrix); } void NVMatrix::copyFromHost(const Matrix& hostMatrix) { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); setTrans(hostMatrix.isTrans()); if (getNumElements() > 0) { cublasStatus status = hipblasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float), hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (write) %d\n", status); abort(); } } } void NVMatrix::copyToHost(Matrix& hostMatrix) const { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); hostMatrix.setTrans(_isTrans); if (getNumElements() > 0) { // printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride()); cublasStatus status = hipblasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float), _devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim()); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (read)\n"); abort(); } } } void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const { if (resizeTarget) { hostMatrix.resize(_numRows, _numCols); } copyToHost(hostMatrix); } void NVMatrix::copy(NVMatrix& dest) const { dest.resize(*this); copy(dest, 0, -1, 0, -1, 0, 0); } NVMatrix& NVMatrix::copy() const { NVMatrix* c = new NVMatrix(); copy(*c); return *c; } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const { assert(isContiguous() && b.isContiguous() && target.isContiguous()); // assert(&target != &b); assert(_numCols == b.getNumRows()); if(&target != this) { target.resize(_numRows, b.getNumCols()); target.setTrans(true); } assert(target.getNumRows() == _numRows); assert(target.getNumCols() == b.getNumCols()); if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer."); } hipblasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols, scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(), 0, target.getDevData(), getNumRows()); checkCublasError("hipblasSgemm failed"); // hipDeviceSynchronize(); } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) { rightMult(b, scaleAB, *this); } void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const { rightMult(b, 1, target); } /* * This will only work if this matrix is in column-major order! In other words, * if isTrans() returns true. */ void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) { if (scaleThis == 0) { a.rightMult(b, scaleAB, *this); return; } assert(isContiguous()); assert(a.getNumCols() == b.getNumRows()); assert(this->getNumRows() == a.getNumRows()); assert(this->getNumCols() == b.getNumCols()); assert(_isTrans); if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer."); } hipblasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(), scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(), scaleThis, _devData, getLeadingDim()); checkCublasError("hipblasSgemm failed"); // hipDeviceSynchronize(); } void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) { addProduct(a, b, 1, 1); } void NVMatrix::zero() { apply(NVMatrixOps::Zero()); } template <class Randomizer> void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && target.isContiguous()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); hipLaunchKernelGGL(( kUnaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kUnaryRandomize: Kernel execution failed"); } template <class Randomizer> void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && data2.isContiguous() && target.isContiguous()); assert(isSameDims(data2)); assert(isTrans() == data2.isTrans()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); hipLaunchKernelGGL(( kBinaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kBinaryRandomize: Kernel execution failed"); } void NVMatrix::initRandom(unsigned long long seed) { assert(!isRndInitialized()); pthread_mutex_lock(_rndMutex); int d = getDeviceID(); rndDevStates[d] = NULL; CUDA_CALL(hipMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(hiprandState_t))); pthread_mutex_unlock(_rndMutex); hipLaunchKernelGGL(( kSetupCurand), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one cutilCheckMsg("initRandom: Kernel execution failed"); } void NVMatrix::initRandom() { NVMatrix::initRandom(time(0)); } hiprandState_t* NVMatrix::getCurandState() { pthread_mutex_lock(_rndMutex); int d = getDeviceID(); assert(rndDevStates.count(d) != 0); hiprandState_t* r = rndDevStates[d]; pthread_mutex_unlock(_rndMutex); return r; } int NVMatrix::getDeviceID() { int d; hipGetDevice(&d); return d; } bool NVMatrix::isRndInitialized() { pthread_mutex_lock(_rndMutex); bool b = rndDevStates.count(getDeviceID()) != 0; pthread_mutex_unlock(_rndMutex); return b; } void NVMatrix::destroyRandom() { assert(isRndInitialized()); int d = getDeviceID(); pthread_mutex_lock(_rndMutex); CUDA_CALL(hipFree(rndDevStates[d])); rndDevStates.erase(d); pthread_mutex_unlock(_rndMutex); } void NVMatrix::binarizeProbs() { binarizeProbs(*this); } void NVMatrix::binarizeProbs(NVMatrix& target) { _unaryRandomize(target, BinarizeUnaryRandomizer()); } void NVMatrix::randomizeUniform() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, UniformUnaryRandomizer()); } void NVMatrix::randomizeGaussian() { randomizeGaussian(1); } void NVMatrix::randomizeGaussian(float stdev) { randomizeGaussian(0, stdev); } void NVMatrix::randomizeGaussian(float mean, float stdev) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev)); _unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev)); } /* * Kind of a hack since we don't actually need the contents of this matrix for it, * so we don't really need a binary randomizer. */ void NVMatrix::randomizeGaussian(NVMatrix& stdevs) { _binaryRandomize(stdevs, *this, GaussianBinaryRandomizer()); } void NVMatrix::addGaussianNoise() { addGaussianNoise(1); } void NVMatrix::addGaussianNoise(float stdev) { addGaussianNoise(stdev, *this); } void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) { _unaryRandomize(target, AddGaussianUnaryRandomizer(stdev)); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) { addGaussianNoise(stdevs, var, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs) { addGaussianNoise(stdevs, false, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) { if (var) { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>()); } else { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>()); } } void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target); } void NVMatrix::biggerThan(NVMatrix& b) { biggerThan(b, *this); } void NVMatrix::equals(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Equals(), b, target); } void NVMatrix::equals(NVMatrix& m) { equals(m, *this); } void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target); } void NVMatrix::biggerThanVector(NVMatrix& vec) { biggerThanVector(vec, *this); } void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const { assert(startRow >= 0 && startRow < _numRows); assert(endRow > startRow && endRow <= _numRows); assert(startCol >= 0 && startCol < _numCols); assert(endCol > startCol && endCol <= _numCols); } /* * The only place where stride is supported for now! * Will ALWAYS return a view of the original data, sometimes non-contiguous. */ NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); if (!isTrans()) { return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false); } return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true); } /* this will NEVER return a view */ void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); int sliceRows = endRow - startRow, sliceCols = endCol - startCol; if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) { target.resize(sliceRows, sliceCols); } this->copy(target, startRow, endRow, startCol, endCol, 0, 0); } NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const { return slice(startRow, endRow, 0, -1); } void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const { slice(startRow, endRow, 0, -1, target); } NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const { return slice(0, -1, startCol, endCol); } void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const { slice(0, -1, startCol, endCol, target); } /* * Guaranteed to not change the data if the number of elements doesn't change. * So you can use this to "reshape" a matrix. */ bool NVMatrix::resize(int numRows, int numCols) { bool reallocated = false; if (numRows != _numRows || numCols != _numCols) { assert(_ownsData); if (_numElements != numRows * numCols) { if (_maxElements < numRows * numCols) { Log_Info("NVMatrix::resize %d", numRows * numCols); _maxElements = numRows * numCols; if (_numElements > 0) { // free old memory cublasStatus status = hipblasFree(_devData); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error: %X\n", status); abort(); } } if (numRows * numCols > 0) { // allocate new memory cublasStatus status = hipblasAlloc(numCols * numRows, sizeof(float), (void**) &_devData); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device memory allocation error\n"); abort(); } //Log_Info("Allocated to %p", _devData); } else { _devData = NULL; } } reallocated = true; } _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _stride = getLeadingDim(); } return reallocated; } bool NVMatrix::resize(const NVMatrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } bool NVMatrix::resize(const Matrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } void NVMatrix::reshape(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); _numRows = numRows; _numCols = numCols; _stride = getLeadingDim(); } NVMatrix& NVMatrix::reshaped(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans); } void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const { srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow; srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol; NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol); NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol); srcSlice->apply(NVMatrixOps::Identity(), *destSlice); delete srcSlice; delete destSlice; } NVMatrix& NVMatrix::getTranspose() { return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);; } void NVMatrix::transpose(NVMatrix& target) { flipTrans(target); target.setTrans(!target.isTrans()); target.reshape(target.getNumCols(), target.getNumRows()); } void NVMatrix::transpose() { int tmp = _numCols; _numCols = _numRows; _numRows = tmp; _isTrans = !_isTrans; } bool NVMatrix::transpose(bool trans) { bool oldTrans = _isTrans; if (oldTrans != trans) { transpose(); } return oldTrans; } /* * Flips the ordering of the matrix from row-major to column-major and vice versa. * This creates temporary storage -- not a cheap operation. * * This is not equivalent to a "hard transpose". The resultant matrix still has * the same dimensions, its layout in memory just changes. */ NVMatrix& NVMatrix::flipTrans() { NVMatrix* meTrans = new NVMatrix(*this); flipTrans(*meTrans); return *meTrans; } void NVMatrix::flipTrans(NVMatrix& target) { assert(&target != this); target.resize(_numRows, _numCols); target.setTrans(!isTrans()); apply(NVMatrixOps::Identity(), target); } void NVMatrix::squaredDiff(NVMatrix& b) { squaredDiff(b, *this); } void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) { if (scaleA == 0) { b.scale(scaleB, target); return; } if (scaleA == 1 && scaleB == 1) { // slight optimization applyBinary(NVMatrixBinaryOps::Add(), b, target); } else { applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target); } } void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) { add(b, 1, scaleB, target); } void NVMatrix::add(NVMatrix& b, NVMatrix& target) { add(b, 1, target); } void NVMatrix::add(NVMatrix& b, float scaleB) { add(b, scaleB, *this); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) { add(b, scaleA, scaleB, *this); } void NVMatrix::add(NVMatrix& b) { add(b, 1, *this); } void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) { add(b, -1, target); } void NVMatrix::subtract(NVMatrix& b) { add(b, -1); } void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Multiply(), b, target); } void NVMatrix::eltwiseMult(NVMatrix& b) { eltwiseMult(b, *this); } void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Divide(), b, target); } void NVMatrix::eltwiseDivide(NVMatrix& b) { eltwiseDivide(b, *this); } void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) { assert(isContiguous() && target.isContiguous()); assert(timesX > 0 && timesY > 0); target.resize(_numRows*timesY, _numCols*timesX); target.setTrans(_isTrans); if(!isTrans()) { hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numCols, _numRows, target._numCols, target._numRows); } else { hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numRows, _numCols, target._numRows, target._numCols); } cutilCheckMsg("Kernel execution failed"); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target); } void NVMatrix::addVector(NVMatrix& vec) { addVector(vec, 1, *this); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec) { addVector(vec, scaleVec, *this); } void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) { addVector(vec, 1, target); } void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target); } void NVMatrix::equalsVector(NVMatrix& vec) { equalsVector(vec, *this); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec) { eltwiseMultByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) { eltwiseDivideByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target); } /* * num threads per block is ignored when summing rows (axis=1) because * it has to be a power of 2. * * TODO: this is a mess, fix it. it works pretty fast but it's too ugly. * TODO: this function is _really_ bad for very long aggregations of few columns. */ template<class Agg, class BinaryOp> void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) { assert(axis == 0 || axis == 1); assert(isContiguous() && target.isContiguous()); assert(&target != this); int width = _isTrans ? _numRows : _numCols; int height = _isTrans ? _numCols : _numRows; target.setTrans(_isTrans); assert(width > 0); assert(height > 0); if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1); int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK); assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width); assert(numBlocks < NUM_BLOCKS_MAX); hipLaunchKernelGGL(( kDumbAggCols<Agg, BinaryOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, width, height, agg, op); cutilCheckMsg("kDumbAggCols: Kernel execution failed"); } else { // row sum target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1); if (width > 1) { if (height >= 16384) { // linear aggregation int numBlocksX = 1; int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y); int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X; int numThreadsY = AGG_SHORT_ROWS_THREADS_Y; while (numBlocksY > NUM_BLOCKS_MAX) { numBlocksY = DIVUP(numBlocksY,2); numBlocksX *= 2; } dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); if(width <= 16) { if(width <= 4) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 4>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 8) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 8>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 12) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 12>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 16>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } } else if(width <= 32) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 48){ hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 64){ hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else { hipLaunchKernelGGL(( kAggShortRows2<Agg, BinaryOp>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } } else { if (width >= 512) { dim3 threads(AWR_NUM_THREADS); dim3 blocks(1, ::min(1024, height)); hipLaunchKernelGGL(( kAggRows_wholerow_nosync), dim3(blocks), dim3(threads), 0, 0, _devData, target._devData, width, height, agg, op); // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, ::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); } else { // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, ::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); NVMatrix *prevSum = this; while (prevSum->getLeadingDim() > 1) { int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512))); int numThreadsY = 1; int numBlocksX = DIVUP(width, 2*numThreadsX); int numBlocksY = ::min(height, NUM_BLOCKS_MAX); NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false); dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); assert(numBlocksX <= NUM_BLOCKS_MAX); assert(numBlocksY <= NUM_BLOCKS_MAX); if(width <= 64) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 32>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 128) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 64>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 256) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 128>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 512) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 256>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 512>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } cutilCheckMsg("agg rows: Kernel execution failed"); hipDeviceSynchronize(); width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway if (prevSum != this) { delete prevSum; } prevSum = nvSumAccum; } } } } else { copy(target); } } } void NVMatrix::inRangeInc(float lower, float upper) { inRangeInc(lower, upper, *this); } void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<false>(lower, upper), target); } void NVMatrix::inRangeExc(float lower, float upper) { inRangeExc(lower, upper, *this); } void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<true>(lower, upper), target); } void NVMatrix::biggerThanScalar(float scalar) { biggerThanScalar(scalar, *this); } void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::BiggerThanScalar(scalar), target); } void NVMatrix::smallerThanScalar(float scalar) { smallerThanScalar(scalar, *this); } void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::SmallerThanScalar(scalar), target); } void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) { apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target); } void NVMatrix::addScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::AddScalar(scalar), target); } void NVMatrix::addScalar(float scalar) { addScalar(scalar, *this); } void NVMatrix::minWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MinWithScalar(scalar), target); } void NVMatrix::minWithScalar(float scalar) { minWithScalar(scalar, *this); } void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MaxWithScalar(scalar), target); } void NVMatrix::maxWithScalar(float scalar) { maxWithScalar(scalar, *this); } void NVMatrix::pow(float p, NVMatrix& target) { apply(NVMatrixOps::Pow(p), target); } void NVMatrix::pow(float p) { pow(p, *this); } void NVMatrix::scale(float _scale) { scale(_scale, *this); } void NVMatrix::scale(float _scale, NVMatrix& target) { if (_scale != 1 || &target != this) { // optimize away scale by 1 apply(NVMatrixOps::MultByScalar(_scale), target); } } template<class Agg, class BinaryOp> NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) { NVMatrix *sumVec = new NVMatrix(); _aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op); return *sumVec; } void NVMatrix::max(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) { if (scaleThis != 0) { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum)); } else { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum)); } } void NVMatrix::sum(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } void NVMatrix::min(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::max(int axis) { return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::sum(int axis) { return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::min(int axis) { return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) { int logn = int(ceil(log(double(n)) / log(2))); *numCols = DIVUP(n, logn); int numThreads = *numCols; *blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE)); *threads = dim3(DP_BLOCKSIZE); } float NVMatrix::mean() { return sum() / getNumElements(); } float NVMatrix::sum() { return _totalAgg(NVMatrixAggs::Sum()); } float NVMatrix::max() { return _totalAgg(NVMatrixAggs::Max()); } float NVMatrix::min() { return _totalAgg(NVMatrixAggs::Min()); } template<class Agg> float NVMatrix::_totalAgg(Agg agg) { assert(isContiguous()); dim3 blocks, threads; int numCols; // Sum most of it on GPU NVMatrix* src = this; for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) { _sum_setParams(src->getNumElements(), &blocks, &threads, &numCols); target = new NVMatrix(1, blocks.x); hipLaunchKernelGGL(( kTotalAgg), dim3(blocks), dim3(threads), 0, 0, src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg); cutilCheckMsg("kTotalAgg: Kernel execution failed"); hipDeviceSynchronize(); // not really necessary? delete (src == this ? NULL : src); } Matrix srcCPU(src->getNumRows(), src->getNumCols()); src->copyToHost(srcCPU); if (src->getNumElements() > 1) { // Sum remainder on CPU delete (src == this ? NULL : src); if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) { return srcCPU.sum(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) { return srcCPU.max(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) { return srcCPU.min(); } else { assert(false); } } return srcCPU(0,0); } /* * Fast dot product only for matrices with same transposedness. */ float NVMatrix::dotProduct(NVMatrix& b) { assert(isContiguous() && b.isContiguous()); assert(isSameDims(b)); assert(isTrans() == b.isTrans()); // see? dim3 blocks, threads; int numCols; _sum_setParams(getNumElements(), &blocks, &threads, &numCols); NVMatrix target(1, blocks.x); hipLaunchKernelGGL(( kDotProduct_r), dim3(blocks), dim3(threads), 0, 0, getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements()); cutilCheckMsg("kDotProduct: Kernel execution failed"); hipDeviceSynchronize(); return target.sum(); } float NVMatrix::norm2() { return dotProduct(*this); } float NVMatrix::norm() { return sqrt(norm2()); } void NVMatrix::print(int startRow, int rows, int startCol, int cols) const { hipDeviceSynchronize(); Matrix hm = Matrix(_numRows, _numCols); copyToHost(hm); hm.print(startRow, rows, startCol, cols); } void NVMatrix::print(int rows, int cols) const { print(0, rows, 0, cols); } void NVMatrix::printShape(const char* name) const { printf("%s: %dx%d\n", name, _numRows, _numCols); }
35a52c4b3c0a06e5d8087476846c3d109fb2d05f.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <set> #include <vector> #include <assert.h> #include <cublas.h> #include <cutil_inline.h> #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> #include <typeinfo> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <map> #include "common/logging.h" using namespace std; /* * Device random number generator pointers. */ //map<int,curandGenerator_t> NVMatrix::rndGen; map<int,curandState*> NVMatrix::rndDevStates; pthread_mutex_t* NVMatrix::_rndMutex = makeMutex(); pthread_mutex_t* NVMatrix::makeMutex() { pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(m, NULL); return m; } void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) { _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = true; _isTrans = isTrans; _devData = NULL; _maxElements = _numElements; if (_numElements > 0) { cublasAlloc(_numElements, sizeof(float), (void**) &_devData); checkCublasError("!!!! device memory allocation error\n"); // Log_Info("Allocated to %p", _devData); } _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::NVMatrix() { _init(0, 0, -1, false); } NVMatrix::NVMatrix(bool isTrans) { _init(0, 0, -1, isTrans); } NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) { _init(numRows, numCols, -1, isTrans); } NVMatrix::NVMatrix(const Matrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { copyFromHost(like); } } NVMatrix::NVMatrix(const NVMatrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { like.copy(*this); } } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const NVMatrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const Matrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, false); } NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) : _numRows(numRows), _numCols(numCols), _numElements(numRows*numCols), _ownsData(false), _devData(devData), _isTrans(isTrans) { _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::~NVMatrix() { if(_ownsData && _numElements > 0) { // Log_Info("Freeing: %p", _devData); cublasStatus status = cublasFree(_devData); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error\n"); abort(); } } } void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) { if (resizeDeviceMatrix) { resize(hostMatrix); } copyFromHost(hostMatrix); } void NVMatrix::copyFromHost(const Matrix& hostMatrix) { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); setTrans(hostMatrix.isTrans()); if (getNumElements() > 0) { cublasStatus status = cublasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float), hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (write) %d\n", status); abort(); } } } void NVMatrix::copyToHost(Matrix& hostMatrix) const { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); hostMatrix.setTrans(_isTrans); if (getNumElements() > 0) { // printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride()); cublasStatus status = cublasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float), _devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim()); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (read)\n"); abort(); } } } void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const { if (resizeTarget) { hostMatrix.resize(_numRows, _numCols); } copyToHost(hostMatrix); } void NVMatrix::copy(NVMatrix& dest) const { dest.resize(*this); copy(dest, 0, -1, 0, -1, 0, 0); } NVMatrix& NVMatrix::copy() const { NVMatrix* c = new NVMatrix(); copy(*c); return *c; } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const { assert(isContiguous() && b.isContiguous() && target.isContiguous()); // assert(&target != &b); assert(_numCols == b.getNumRows()); if(&target != this) { target.resize(_numRows, b.getNumCols()); target.setTrans(true); } assert(target.getNumRows() == _numRows); assert(target.getNumCols() == b.getNumCols()); if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer."); } cublasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols, scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(), 0, target.getDevData(), getNumRows()); checkCublasError("cublasSgemm failed"); // cudaThreadSynchronize(); } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) { rightMult(b, scaleAB, *this); } void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const { rightMult(b, 1, target); } /* * This will only work if this matrix is in column-major order! In other words, * if isTrans() returns true. */ void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) { if (scaleThis == 0) { a.rightMult(b, scaleAB, *this); return; } assert(isContiguous()); assert(a.getNumCols() == b.getNumRows()); assert(this->getNumRows() == a.getNumRows()); assert(this->getNumCols() == b.getNumCols()); assert(_isTrans); if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer."); } cublasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(), scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(), scaleThis, _devData, getLeadingDim()); checkCublasError("cublasSgemm failed"); // cudaThreadSynchronize(); } void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) { addProduct(a, b, 1, 1); } void NVMatrix::zero() { apply(NVMatrixOps::Zero()); } template <class Randomizer> void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && target.isContiguous()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); kUnaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kUnaryRandomize: Kernel execution failed"); } template <class Randomizer> void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && data2.isContiguous() && target.isContiguous()); assert(isSameDims(data2)); assert(isTrans() == data2.isTrans()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); kBinaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kBinaryRandomize: Kernel execution failed"); } void NVMatrix::initRandom(unsigned long long seed) { assert(!isRndInitialized()); pthread_mutex_lock(_rndMutex); int d = getDeviceID(); rndDevStates[d] = NULL; CUDA_CALL(cudaMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(curandState))); pthread_mutex_unlock(_rndMutex); kSetupCurand<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one cutilCheckMsg("initRandom: Kernel execution failed"); } void NVMatrix::initRandom() { NVMatrix::initRandom(time(0)); } curandState* NVMatrix::getCurandState() { pthread_mutex_lock(_rndMutex); int d = getDeviceID(); assert(rndDevStates.count(d) != 0); curandState* r = rndDevStates[d]; pthread_mutex_unlock(_rndMutex); return r; } int NVMatrix::getDeviceID() { int d; cudaGetDevice(&d); return d; } bool NVMatrix::isRndInitialized() { pthread_mutex_lock(_rndMutex); bool b = rndDevStates.count(getDeviceID()) != 0; pthread_mutex_unlock(_rndMutex); return b; } void NVMatrix::destroyRandom() { assert(isRndInitialized()); int d = getDeviceID(); pthread_mutex_lock(_rndMutex); CUDA_CALL(cudaFree(rndDevStates[d])); rndDevStates.erase(d); pthread_mutex_unlock(_rndMutex); } void NVMatrix::binarizeProbs() { binarizeProbs(*this); } void NVMatrix::binarizeProbs(NVMatrix& target) { _unaryRandomize(target, BinarizeUnaryRandomizer()); } void NVMatrix::randomizeUniform() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, UniformUnaryRandomizer()); } void NVMatrix::randomizeGaussian() { randomizeGaussian(1); } void NVMatrix::randomizeGaussian(float stdev) { randomizeGaussian(0, stdev); } void NVMatrix::randomizeGaussian(float mean, float stdev) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev)); _unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev)); } /* * Kind of a hack since we don't actually need the contents of this matrix for it, * so we don't really need a binary randomizer. */ void NVMatrix::randomizeGaussian(NVMatrix& stdevs) { _binaryRandomize(stdevs, *this, GaussianBinaryRandomizer()); } void NVMatrix::addGaussianNoise() { addGaussianNoise(1); } void NVMatrix::addGaussianNoise(float stdev) { addGaussianNoise(stdev, *this); } void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) { _unaryRandomize(target, AddGaussianUnaryRandomizer(stdev)); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) { addGaussianNoise(stdevs, var, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs) { addGaussianNoise(stdevs, false, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) { if (var) { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>()); } else { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>()); } } void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target); } void NVMatrix::biggerThan(NVMatrix& b) { biggerThan(b, *this); } void NVMatrix::equals(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Equals(), b, target); } void NVMatrix::equals(NVMatrix& m) { equals(m, *this); } void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target); } void NVMatrix::biggerThanVector(NVMatrix& vec) { biggerThanVector(vec, *this); } void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const { assert(startRow >= 0 && startRow < _numRows); assert(endRow > startRow && endRow <= _numRows); assert(startCol >= 0 && startCol < _numCols); assert(endCol > startCol && endCol <= _numCols); } /* * The only place where stride is supported for now! * Will ALWAYS return a view of the original data, sometimes non-contiguous. */ NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); if (!isTrans()) { return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false); } return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true); } /* this will NEVER return a view */ void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); int sliceRows = endRow - startRow, sliceCols = endCol - startCol; if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) { target.resize(sliceRows, sliceCols); } this->copy(target, startRow, endRow, startCol, endCol, 0, 0); } NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const { return slice(startRow, endRow, 0, -1); } void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const { slice(startRow, endRow, 0, -1, target); } NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const { return slice(0, -1, startCol, endCol); } void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const { slice(0, -1, startCol, endCol, target); } /* * Guaranteed to not change the data if the number of elements doesn't change. * So you can use this to "reshape" a matrix. */ bool NVMatrix::resize(int numRows, int numCols) { bool reallocated = false; if (numRows != _numRows || numCols != _numCols) { assert(_ownsData); if (_numElements != numRows * numCols) { if (_maxElements < numRows * numCols) { Log_Info("NVMatrix::resize %d", numRows * numCols); _maxElements = numRows * numCols; if (_numElements > 0) { // free old memory cublasStatus status = cublasFree(_devData); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error: %X\n", status); abort(); } } if (numRows * numCols > 0) { // allocate new memory cublasStatus status = cublasAlloc(numCols * numRows, sizeof(float), (void**) &_devData); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device memory allocation error\n"); abort(); } //Log_Info("Allocated to %p", _devData); } else { _devData = NULL; } } reallocated = true; } _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _stride = getLeadingDim(); } return reallocated; } bool NVMatrix::resize(const NVMatrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } bool NVMatrix::resize(const Matrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } void NVMatrix::reshape(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); _numRows = numRows; _numCols = numCols; _stride = getLeadingDim(); } NVMatrix& NVMatrix::reshaped(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans); } void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const { srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow; srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol; NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol); NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol); srcSlice->apply(NVMatrixOps::Identity(), *destSlice); delete srcSlice; delete destSlice; } NVMatrix& NVMatrix::getTranspose() { return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);; } void NVMatrix::transpose(NVMatrix& target) { flipTrans(target); target.setTrans(!target.isTrans()); target.reshape(target.getNumCols(), target.getNumRows()); } void NVMatrix::transpose() { int tmp = _numCols; _numCols = _numRows; _numRows = tmp; _isTrans = !_isTrans; } bool NVMatrix::transpose(bool trans) { bool oldTrans = _isTrans; if (oldTrans != trans) { transpose(); } return oldTrans; } /* * Flips the ordering of the matrix from row-major to column-major and vice versa. * This creates temporary storage -- not a cheap operation. * * This is not equivalent to a "hard transpose". The resultant matrix still has * the same dimensions, its layout in memory just changes. */ NVMatrix& NVMatrix::flipTrans() { NVMatrix* meTrans = new NVMatrix(*this); flipTrans(*meTrans); return *meTrans; } void NVMatrix::flipTrans(NVMatrix& target) { assert(&target != this); target.resize(_numRows, _numCols); target.setTrans(!isTrans()); apply(NVMatrixOps::Identity(), target); } void NVMatrix::squaredDiff(NVMatrix& b) { squaredDiff(b, *this); } void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) { if (scaleA == 0) { b.scale(scaleB, target); return; } if (scaleA == 1 && scaleB == 1) { // slight optimization applyBinary(NVMatrixBinaryOps::Add(), b, target); } else { applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target); } } void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) { add(b, 1, scaleB, target); } void NVMatrix::add(NVMatrix& b, NVMatrix& target) { add(b, 1, target); } void NVMatrix::add(NVMatrix& b, float scaleB) { add(b, scaleB, *this); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) { add(b, scaleA, scaleB, *this); } void NVMatrix::add(NVMatrix& b) { add(b, 1, *this); } void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) { add(b, -1, target); } void NVMatrix::subtract(NVMatrix& b) { add(b, -1); } void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Multiply(), b, target); } void NVMatrix::eltwiseMult(NVMatrix& b) { eltwiseMult(b, *this); } void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Divide(), b, target); } void NVMatrix::eltwiseDivide(NVMatrix& b) { eltwiseDivide(b, *this); } void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) { assert(isContiguous() && target.isContiguous()); assert(timesX > 0 && timesY > 0); target.resize(_numRows*timesY, _numCols*timesX); target.setTrans(_isTrans); if(!isTrans()) { kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numCols, _numRows, target._numCols, target._numRows); } else { kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numRows, _numCols, target._numRows, target._numCols); } cutilCheckMsg("Kernel execution failed"); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target); } void NVMatrix::addVector(NVMatrix& vec) { addVector(vec, 1, *this); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec) { addVector(vec, scaleVec, *this); } void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) { addVector(vec, 1, target); } void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target); } void NVMatrix::equalsVector(NVMatrix& vec) { equalsVector(vec, *this); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec) { eltwiseMultByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) { eltwiseDivideByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target); } /* * num threads per block is ignored when summing rows (axis=1) because * it has to be a power of 2. * * TODO: this is a mess, fix it. it works pretty fast but it's too ugly. * TODO: this function is _really_ bad for very long aggregations of few columns. */ template<class Agg, class BinaryOp> void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) { assert(axis == 0 || axis == 1); assert(isContiguous() && target.isContiguous()); assert(&target != this); int width = _isTrans ? _numRows : _numCols; int height = _isTrans ? _numCols : _numRows; target.setTrans(_isTrans); assert(width > 0); assert(height > 0); if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1); int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK); assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width); assert(numBlocks < NUM_BLOCKS_MAX); kDumbAggCols<Agg, BinaryOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK>>>(_devData, target._devData, width, height, agg, op); cutilCheckMsg("kDumbAggCols: Kernel execution failed"); } else { // row sum target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1); if (width > 1) { if (height >= 16384) { // linear aggregation int numBlocksX = 1; int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y); int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X; int numThreadsY = AGG_SHORT_ROWS_THREADS_Y; while (numBlocksY > NUM_BLOCKS_MAX) { numBlocksY = DIVUP(numBlocksY,2); numBlocksX *= 2; } dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); if(width <= 16) { if(width <= 4) { kAggShortRows<Agg, BinaryOp, 1, 4><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 8) { kAggShortRows<Agg, BinaryOp, 1, 8><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 12) { kAggShortRows<Agg, BinaryOp, 1, 12><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else { kAggShortRows<Agg, BinaryOp, 1, 16><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } } else if(width <= 32) { kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 48){ kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 64){ kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else { kAggShortRows2<Agg, BinaryOp><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } } else { if (width >= 512) { dim3 threads(AWR_NUM_THREADS); dim3 blocks(1, std::min(1024, height)); kAggRows_wholerow_nosync<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, std::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); } else { // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, std::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); NVMatrix *prevSum = this; while (prevSum->getLeadingDim() > 1) { int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512))); int numThreadsY = 1; int numBlocksX = DIVUP(width, 2*numThreadsX); int numBlocksY = std::min(height, NUM_BLOCKS_MAX); NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false); dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); assert(numBlocksX <= NUM_BLOCKS_MAX); assert(numBlocksY <= NUM_BLOCKS_MAX); if(width <= 64) { kAggRows<Agg, BinaryOp, 32><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 128) { kAggRows<Agg, BinaryOp, 64><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 256) { kAggRows<Agg, BinaryOp, 128><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 512) { kAggRows<Agg, BinaryOp, 256><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else { kAggRows<Agg, BinaryOp, 512><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } cutilCheckMsg("agg rows: Kernel execution failed"); cudaThreadSynchronize(); width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway if (prevSum != this) { delete prevSum; } prevSum = nvSumAccum; } } } } else { copy(target); } } } void NVMatrix::inRangeInc(float lower, float upper) { inRangeInc(lower, upper, *this); } void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<false>(lower, upper), target); } void NVMatrix::inRangeExc(float lower, float upper) { inRangeExc(lower, upper, *this); } void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<true>(lower, upper), target); } void NVMatrix::biggerThanScalar(float scalar) { biggerThanScalar(scalar, *this); } void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::BiggerThanScalar(scalar), target); } void NVMatrix::smallerThanScalar(float scalar) { smallerThanScalar(scalar, *this); } void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::SmallerThanScalar(scalar), target); } void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) { apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target); } void NVMatrix::addScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::AddScalar(scalar), target); } void NVMatrix::addScalar(float scalar) { addScalar(scalar, *this); } void NVMatrix::minWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MinWithScalar(scalar), target); } void NVMatrix::minWithScalar(float scalar) { minWithScalar(scalar, *this); } void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MaxWithScalar(scalar), target); } void NVMatrix::maxWithScalar(float scalar) { maxWithScalar(scalar, *this); } void NVMatrix::pow(float p, NVMatrix& target) { apply(NVMatrixOps::Pow(p), target); } void NVMatrix::pow(float p) { pow(p, *this); } void NVMatrix::scale(float _scale) { scale(_scale, *this); } void NVMatrix::scale(float _scale, NVMatrix& target) { if (_scale != 1 || &target != this) { // optimize away scale by 1 apply(NVMatrixOps::MultByScalar(_scale), target); } } template<class Agg, class BinaryOp> NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) { NVMatrix *sumVec = new NVMatrix(); _aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op); return *sumVec; } void NVMatrix::max(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) { if (scaleThis != 0) { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum)); } else { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum)); } } void NVMatrix::sum(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } void NVMatrix::min(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::max(int axis) { return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::sum(int axis) { return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::min(int axis) { return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) { int logn = int(ceil(log(double(n)) / log(2))); *numCols = DIVUP(n, logn); int numThreads = *numCols; *blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE)); *threads = dim3(DP_BLOCKSIZE); } float NVMatrix::mean() { return sum() / getNumElements(); } float NVMatrix::sum() { return _totalAgg(NVMatrixAggs::Sum()); } float NVMatrix::max() { return _totalAgg(NVMatrixAggs::Max()); } float NVMatrix::min() { return _totalAgg(NVMatrixAggs::Min()); } template<class Agg> float NVMatrix::_totalAgg(Agg agg) { assert(isContiguous()); dim3 blocks, threads; int numCols; // Sum most of it on GPU NVMatrix* src = this; for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) { _sum_setParams(src->getNumElements(), &blocks, &threads, &numCols); target = new NVMatrix(1, blocks.x); kTotalAgg<<<blocks, threads>>>(src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg); cutilCheckMsg("kTotalAgg: Kernel execution failed"); cudaThreadSynchronize(); // not really necessary? delete (src == this ? NULL : src); } Matrix srcCPU(src->getNumRows(), src->getNumCols()); src->copyToHost(srcCPU); if (src->getNumElements() > 1) { // Sum remainder on CPU delete (src == this ? NULL : src); if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) { return srcCPU.sum(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) { return srcCPU.max(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) { return srcCPU.min(); } else { assert(false); } } return srcCPU(0,0); } /* * Fast dot product only for matrices with same transposedness. */ float NVMatrix::dotProduct(NVMatrix& b) { assert(isContiguous() && b.isContiguous()); assert(isSameDims(b)); assert(isTrans() == b.isTrans()); // see? dim3 blocks, threads; int numCols; _sum_setParams(getNumElements(), &blocks, &threads, &numCols); NVMatrix target(1, blocks.x); kDotProduct_r<<<blocks, threads>>>(getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements()); cutilCheckMsg("kDotProduct: Kernel execution failed"); cudaThreadSynchronize(); return target.sum(); } float NVMatrix::norm2() { return dotProduct(*this); } float NVMatrix::norm() { return sqrt(norm2()); } void NVMatrix::print(int startRow, int rows, int startCol, int cols) const { cudaThreadSynchronize(); Matrix hm = Matrix(_numRows, _numCols); copyToHost(hm); hm.print(startRow, rows, startCol, cols); } void NVMatrix::print(int rows, int cols) const { print(0, rows, 0, cols); } void NVMatrix::printShape(const char* name) const { printf("%s: %dx%d\n", name, _numRows, _numCols); }
4d846c195988ce51bbd173c07db39e0731894583.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Author: Liangji * Email: [email protected] */ #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/gaterecurrent2d_layer.hpp" #include "caffe/util/io.hpp" namespace caffe { __device__ void get_gate_idx(int h1,int w1,int h2,int w2, int * out,bool horizontal, bool reverse) { if(horizontal && ! reverse) // left -> right { if(w1>w2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(horizontal && reverse) // right -> left { if(w1<w2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(!horizontal && !reverse) // top -> bottom { if(h1>h2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(!horizontal && reverse) // bottom -> top { if(h1<h2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } } template <typename Dtype> __device__ Dtype get_data(Dtype * data, int num, int channels,int height, int width,int n,int c,int h,int w) { if(h<0 || h >=height) return 0; if(w<0 || w >= width) return 0; return data[n*channels*height*width + c * height*width + h * width + w]; } template <typename Dtype> __device__ void set_data(Dtype * data, int num, int channels,int height, int width,int n,int c,int h,int w,Dtype v) { if(h<0 || h >=height) return ; if(w<0 || w >= width) return ; data[n*channels*height*width + c * height*width + h * width + w]=v; } template <typename Dtype> __device__ Dtype get_gate(Dtype * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,bool horizontal,bool reverse) { if(h1<0 || h1 >=height) return 0; if(w1<0 || w1 >= width) return 0; if(h2<0 || h2 >=height) return 0; if(w2<0 || w2 >= width) return 0; int idx[2]; get_gate_idx(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; return data[n*channels*height*width + c * height*width + h * width + w]; } template <typename Dtype> __device__ void set_gate(Dtype * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,bool horizontal,bool reverse,Dtype v) { if(h1<0 || h1 >=height) return ; if(w1<0 || w1 >= width) return ; if(h2<0 || h2 >=height) return ; if(w2<0 || w2 >= width) return ; int idx[2]; get_gate_idx(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; data[n*channels*height*width + c * height*width + h * width + w]=v; } template <typename Dtype> __device__ void set_gate_add(Dtype * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,bool horizontal,bool reverse,Dtype v) { if(h1<0 || h1 >=height) return ; if(w1<0 || w1 >= width) return ; if(h2<0 || h2 >=height) return ; if(w2<0 || w2 >= width) return ; int idx[2]; get_gate_idx(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; atomicAdd((float *)(data + n*channels*height*width + c * height*width + h * width + w),float(v)); } template <typename Dtype> __global__ void forward_one_col_left_right(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h-1,w-1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h,w-1); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h+1,w-1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void forward_one_col_right_left(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h-1,w+1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h,w+1); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h+1,w+1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void forward_one_row_top_bottom(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h-1,w-1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h-1,w); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h-1,w+1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void forward_one_row_bottom_top(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h+1,w-1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h+1,w); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h+1,w+1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void backward_one_col_left_right(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w+1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w+1); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w+1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w+1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w-1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h,w-1); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w-1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> __global__ void backward_one_col_right_left(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w-1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w-1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w-1); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w-1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w+1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h,w+1); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w+1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> __global__ void backward_one_row_top_bottom(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w-1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w+1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w+1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w-1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w+1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> __global__ void backward_one_row_bottom_top(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w-1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w-1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w+1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w-1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w+1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> void GateRecurrent2dLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* X = bottom[0]->gpu_data(); const Dtype* G1 = bottom[1]->gpu_data(); const Dtype* G2 = bottom[2]->gpu_data(); const Dtype* G3 = bottom[3]->gpu_data(); const Dtype* Idx = bottom[4]->gpu_data(); Dtype * H = top[0]->mutable_gpu_data(); if(horizontal_ && !reverse_) // left to right { const int count = height_ * channels_ * num_; for(int t=0;t<width_;t++) { hipLaunchKernelGGL(( forward_one_col_left_right<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } else if(horizontal_ && reverse_) // right to left { const int count = height_ * channels_ * num_; for(int t=width_ - 1; t>=0; t--) { hipLaunchKernelGGL(( forward_one_col_right_left<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } else if(!horizontal_ && !reverse_) // top to bottom { const int count = width_ * channels_ * num_; for(int t=0; t< height_; t++) { hipLaunchKernelGGL(( forward_one_row_top_bottom<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } else //bottom to top { const int count = width_ * channels_ * num_; for(int t=height_-1; t>=0; t--) { hipLaunchKernelGGL(( forward_one_row_bottom_top<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } } template <typename Dtype> void GateRecurrent2dLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* X = bottom[0]->gpu_data(); const Dtype* G1 = bottom[1]->gpu_data(); const Dtype* G2 = bottom[2]->gpu_data(); const Dtype* G3 = bottom[3]->gpu_data(); const Dtype* Idx = bottom[4]->gpu_data(); const Dtype * H = top[0]->gpu_data(); Dtype * H_diff = H_.mutable_gpu_diff(); caffe_copy(top[0]->count(),top[0]->gpu_diff(),H_diff); Dtype * X_diff = bottom[0]->mutable_gpu_diff(); Dtype * G1_diff = bottom[1]->mutable_gpu_diff(); Dtype * G2_diff = bottom[2]->mutable_gpu_diff(); Dtype * G3_diff = bottom[3]->mutable_gpu_diff(); Dtype * Idx_diff = bottom[4]->mutable_gpu_diff(); Dtype * H_cpudiff = H_.mutable_cpu_diff(); //SaveArray("topdiff.txt", H_.mutable_cpu_diff(),top[0]->count()); if(horizontal_ && ! reverse_) //left to right { const int count = height_ * channels_ * num_; for(int t = width_ -1; t>=0; t--) { hipLaunchKernelGGL(( backward_one_col_left_right<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } else if(horizontal_ && reverse_) //right to left { const int count = height_ * channels_ * num_; for(int t = 0; t<width_; t++) { hipLaunchKernelGGL(( backward_one_col_right_left<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } else if(!horizontal_ && !reverse_) //top to bottom { const int count = width_ * channels_ * num_; for(int t = height_-1; t>=0; t--) { hipLaunchKernelGGL(( backward_one_row_top_bottom<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } else //bottom to top { const int count = width_ * channels_ * num_; for(int t = 0; t<height_; t++) { hipLaunchKernelGGL(( backward_one_row_bottom_top<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } } INSTANTIATE_LAYER_GPU_FUNCS(GateRecurrent2dLayer); } // namespace caffe
4d846c195988ce51bbd173c07db39e0731894583.cu
/* * Author: Liangji * Email: [email protected] */ #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/gaterecurrent2d_layer.hpp" #include "caffe/util/io.hpp" namespace caffe { __device__ void get_gate_idx(int h1,int w1,int h2,int w2, int * out,bool horizontal, bool reverse) { if(horizontal && ! reverse) // left -> right { if(w1>w2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(horizontal && reverse) // right -> left { if(w1<w2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(!horizontal && !reverse) // top -> bottom { if(h1>h2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(!horizontal && reverse) // bottom -> top { if(h1<h2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } } template <typename Dtype> __device__ Dtype get_data(Dtype * data, int num, int channels,int height, int width,int n,int c,int h,int w) { if(h<0 || h >=height) return 0; if(w<0 || w >= width) return 0; return data[n*channels*height*width + c * height*width + h * width + w]; } template <typename Dtype> __device__ void set_data(Dtype * data, int num, int channels,int height, int width,int n,int c,int h,int w,Dtype v) { if(h<0 || h >=height) return ; if(w<0 || w >= width) return ; data[n*channels*height*width + c * height*width + h * width + w]=v; } template <typename Dtype> __device__ Dtype get_gate(Dtype * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,bool horizontal,bool reverse) { if(h1<0 || h1 >=height) return 0; if(w1<0 || w1 >= width) return 0; if(h2<0 || h2 >=height) return 0; if(w2<0 || w2 >= width) return 0; int idx[2]; get_gate_idx(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; return data[n*channels*height*width + c * height*width + h * width + w]; } template <typename Dtype> __device__ void set_gate(Dtype * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,bool horizontal,bool reverse,Dtype v) { if(h1<0 || h1 >=height) return ; if(w1<0 || w1 >= width) return ; if(h2<0 || h2 >=height) return ; if(w2<0 || w2 >= width) return ; int idx[2]; get_gate_idx(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; data[n*channels*height*width + c * height*width + h * width + w]=v; } template <typename Dtype> __device__ void set_gate_add(Dtype * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,bool horizontal,bool reverse,Dtype v) { if(h1<0 || h1 >=height) return ; if(w1<0 || w1 >= width) return ; if(h2<0 || h2 >=height) return ; if(w2<0 || w2 >= width) return ; int idx[2]; get_gate_idx(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; atomicAdd((float *)(data + n*channels*height*width + c * height*width + h * width + w),float(v)); } template <typename Dtype> __global__ void forward_one_col_left_right(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h-1,w-1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h,w-1); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h+1,w-1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void forward_one_col_right_left(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h-1,w+1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h,w+1); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h+1,w+1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void forward_one_row_top_bottom(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h-1,w-1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h-1,w); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h-1,w+1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void forward_one_row_bottom_top(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, Dtype* H,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g_data_1 = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype h_minus1_data_1 = get_data(H,num,channels,height,width,n,c,h+1,w-1); Dtype h1 = (1-g_data_1)*x_data + g_data_1 * h_minus1_data_1; Dtype g_data_2 = get_gate(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); Dtype h_minus1_data_2 = get_data(H,num,channels,height,width,n,c,h+1,w); Dtype h2 = (1-g_data_2)*x_data + g_data_2 * h_minus1_data_2; Dtype g_data_3 = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype h_minus1_data_3 = get_data(H,num,channels,height,width,n,c,h+1,w+1); Dtype h3 = (1-g_data_3)*x_data + g_data_3 * h_minus1_data_3; Dtype h_data = h1*g1_idx + h2 * g2_idx + h3*g3_idx; set_data(H,num,channels,height,width,n,c,h,w,h_data); } } template <typename Dtype> __global__ void backward_one_col_left_right(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w+1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w+1); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w+1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w+1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w-1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h,w-1); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w-1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> __global__ void backward_one_col_right_left(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w-1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w-1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w-1,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w-1); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w-1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w+1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h,w+1); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w+1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h,w+1,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> __global__ void backward_one_row_top_bottom(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w-1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w-1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w+1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h+1,w+1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w-1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h-1,w+1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h-1,w-1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h-1,w+1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> __global__ void backward_one_row_bottom_top(const int count, int T, int num,int channels, int height, int width,const Dtype* X,const Dtype* G1, const Dtype* G2,const Dtype* G3,const Dtype* Idx, const Dtype* H, Dtype * X_diff, Dtype * G1_diff,Dtype* G2_diff,Dtype * G3_diff,Dtype * Idx_diff, Dtype * Hdiff,bool horizontal,bool reverse) { CUDA_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; Dtype x_data = get_data(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff Dtype h_diff = get_data(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T Dtype add1_g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h-1,w-1,horizontal,reverse); Dtype add1_h3_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w-1); Dtype add1_g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); Dtype add1_g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h-1,w,horizontal,reverse); Dtype add1_h2_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w); Dtype add1_g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); Dtype add1_g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h-1,w+1,horizontal,reverse); Dtype add1_h1_diff = get_data(Hdiff,num,channels,height,width,n,c,h-1,w+1); Dtype add1_g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); h_diff = h_diff + add1_g3_idx * add1_h3_diff * add1_g3_data + add1_g2_idx * add1_h2_diff * add1_g2_data + add1_g1_idx * add1_h1_diff * add1_g1_data ; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff Dtype g1_idx = get_gate(Idx,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse); Dtype g2_idx = get_gate(Idx,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse); Dtype g3_idx = get_gate(Idx,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse); Dtype g1_data = get_gate(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); Dtype g2_data = get_gate(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); Dtype g3_data = get_gate(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); Dtype x_diff = (1-g1_data)*h_diff*g1_idx + (1-g2_data)*h_diff*g2_idx + (1-g3_data)*h_diff*g3_idx; set_data(X_diff,num,channels,height,width,n,c,h,w,x_diff); //g(t)_diff = h(t)_diff * x(t) * -1 //g(t)_diff+=h(t)_diff * h(t-1)if t>0 Dtype g1_diff = h_diff * g1_idx * x_data * -1; Dtype h1_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w-1); g1_diff = g1_diff + h_diff * g1_idx*h1_minus1_data; set_gate(G1_diff,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse,g1_diff); Dtype g2_diff = h_diff * g2_idx * x_data * -1; Dtype h2_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w); g2_diff = g2_diff + h_diff * g2_idx*h2_minus1_data; set_gate(G2_diff,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse,g2_diff); Dtype g3_diff = h_diff * g3_idx * x_data * -1; Dtype h3_minus1_data = get_data(H,num,channels,height,width,n,c,h+1,w+1); g3_diff = g3_diff + h_diff * g3_idx*h3_minus1_data; set_gate(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse,g3_diff); //idx_diff = h_diff*( (1-g(t))*x(t) + g(t)*h(t-1) ) Dtype g1_idx_diff = h_diff * ( (1-g1_data)*x_data + g1_data*h1_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,0,h,w,h+1,w-1,horizontal,reverse,g1_idx_diff); Dtype g2_idx_diff = h_diff * ( (1-g2_data)*x_data + g2_data*h2_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,1,h,w,h+1,w,horizontal,reverse,g2_idx_diff); Dtype g3_idx_diff = h_diff * ( (1-g3_data)*x_data + g3_data*h3_minus1_data); set_gate_add(Idx_diff,num,3,height,width,n,2,h,w,h+1,w+1,horizontal,reverse,g3_idx_diff); } } template <typename Dtype> void GateRecurrent2dLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* X = bottom[0]->gpu_data(); const Dtype* G1 = bottom[1]->gpu_data(); const Dtype* G2 = bottom[2]->gpu_data(); const Dtype* G3 = bottom[3]->gpu_data(); const Dtype* Idx = bottom[4]->gpu_data(); Dtype * H = top[0]->mutable_gpu_data(); if(horizontal_ && !reverse_) // left to right { const int count = height_ * channels_ * num_; for(int t=0;t<width_;t++) { forward_one_col_left_right<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } else if(horizontal_ && reverse_) // right to left { const int count = height_ * channels_ * num_; for(int t=width_ - 1; t>=0; t--) { forward_one_col_right_left<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } else if(!horizontal_ && !reverse_) // top to bottom { const int count = width_ * channels_ * num_; for(int t=0; t< height_; t++) { forward_one_row_top_bottom<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } else //bottom to top { const int count = width_ * channels_ * num_; for(int t=height_-1; t>=0; t--) { forward_one_row_bottom_top<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,horizontal_,reverse_); CUDA_POST_KERNEL_CHECK; } } } template <typename Dtype> void GateRecurrent2dLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* X = bottom[0]->gpu_data(); const Dtype* G1 = bottom[1]->gpu_data(); const Dtype* G2 = bottom[2]->gpu_data(); const Dtype* G3 = bottom[3]->gpu_data(); const Dtype* Idx = bottom[4]->gpu_data(); const Dtype * H = top[0]->gpu_data(); Dtype * H_diff = H_.mutable_gpu_diff(); caffe_copy(top[0]->count(),top[0]->gpu_diff(),H_diff); Dtype * X_diff = bottom[0]->mutable_gpu_diff(); Dtype * G1_diff = bottom[1]->mutable_gpu_diff(); Dtype * G2_diff = bottom[2]->mutable_gpu_diff(); Dtype * G3_diff = bottom[3]->mutable_gpu_diff(); Dtype * Idx_diff = bottom[4]->mutable_gpu_diff(); Dtype * H_cpudiff = H_.mutable_cpu_diff(); //SaveArray("topdiff.txt", H_.mutable_cpu_diff(),top[0]->count()); if(horizontal_ && ! reverse_) //left to right { const int count = height_ * channels_ * num_; for(int t = width_ -1; t>=0; t--) { backward_one_col_left_right<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } else if(horizontal_ && reverse_) //right to left { const int count = height_ * channels_ * num_; for(int t = 0; t<width_; t++) { backward_one_col_right_left<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } else if(!horizontal_ && !reverse_) //top to bottom { const int count = width_ * channels_ * num_; for(int t = height_-1; t>=0; t--) { backward_one_row_top_bottom<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } else //bottom to top { const int count = width_ * channels_ * num_; for(int t = 0; t<height_; t++) { backward_one_row_bottom_top<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count,t, num_,channels_,height_,width_,X,G1,G2,G3,Idx,H,X_diff,G1_diff,G2_diff,G3_diff,Idx_diff,H_diff,horizontal_, reverse_); CUDA_POST_KERNEL_CHECK; } } } INSTANTIATE_LAYER_GPU_FUNCS(GateRecurrent2dLayer); } // namespace caffe
e28705893504c735cfb12338a68c96cf7339bbda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) // const size_t N = 8ULL*1024ULL*1024ULL; // data size // const size_t N = 640ULL * 256UL; // data size const size_t N = 32ULL*1024ULL*1024ULL;;// data size //const size_t N = 256*640; // data size const int BLOCK_SIZE = 256; // CUDA maximum is 1024 // naive atomic reduction kernel __global__ void atomic_red(const float *gdata, float *out){ size_t idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < N) atomicAdd(out, gdata[idx]); } __global__ void reduce(float *gdata, float *out){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < N) { // grid stride loop to load data sdata[tid] += gdata[idx]; idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] += sdata[tid + s]; } if (tid == 0) out[blockIdx.x] = sdata[0]; } __global__ void reduce_a(float *gdata, float *out){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < N) { // grid stride loop to load data sdata[tid] += gdata[idx]; idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] += sdata[tid + s]; } if (tid == 0) atomicAdd(out, sdata[0]); } __global__ void reduce_ws(float *gdata, float *out){ __shared__ float sdata[32]; int tid = threadIdx.x; int idx = threadIdx.x+blockDim.x*blockIdx.x; float val = 0.0f; unsigned mask = 0xFFFFFFFFU; int lane = threadIdx.x % warpSize; int warpID = threadIdx.x / warpSize; while (idx < N) { // grid stride loop to load val += gdata[idx]; idx += gridDim.x*blockDim.x; } // 1st warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (lane == 0) sdata[warpID] = val; __syncthreads(); // put warp results in shared mem // hereafter, just warp 0 if (warpID == 0){ // reload val from shared mem if warp existed val = (tid < blockDim.x/warpSize)?sdata[lane]:0; // final warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (tid == 0) atomicAdd(out, val); } } int main(){ float *h_A, *h_sum, *d_A, *d_sum; h_A = new float[N]; // allocate space for data in host memory h_sum = new float; for (int i = 0; i < N; i++) // initialize matrix in host memory h_A[i] = 1.0f; hipMalloc(&d_A, N*sizeof(float)); // allocate device space for A hipMalloc(&d_sum, sizeof(float)); // allocate device space for sum cudaCheckErrors("hipMalloc failure"); // error checking // copy matrix A to device: hipMemcpy(d_A, h_A, N*sizeof(float), hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy H2D failure"); hipMemset(d_sum, 0, sizeof(float)); cudaCheckErrors("hipMemset failure"); //cuda processing sequence step 1 is complete hipLaunchKernelGGL(( atomic_red), dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_A, d_sum); cudaCheckErrors("atomic reduction kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: hipMemcpy(h_sum, d_sum, sizeof(float), hipMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("atomic reduction kernel execution failure or hipMemcpy H2D failure"); if (*h_sum != (float)N) {printf("atomic sum reduction incorrect!\n"); return -1;} printf("atomic sum reduction correct!\n"); const int blocks = 640; hipMemset(d_sum, 0, sizeof(float)); cudaCheckErrors("hipMemset failure"); //cuda processing sequence step 1 is complete hipLaunchKernelGGL(( reduce_a), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, d_A, d_sum); cudaCheckErrors("reduction w/atomic kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: hipMemcpy(h_sum, d_sum, sizeof(float), hipMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction w/atomic kernel execution failure or hipMemcpy H2D failure"); if (*h_sum != (float)N) {printf("reduction w/atomic sum incorrect!\n"); return -1;} printf("reduction w/atomic sum correct!\n"); hipMemset(d_sum, 0, sizeof(float)); cudaCheckErrors("hipMemset failure"); //cuda processing sequence step 1 is complete hipLaunchKernelGGL(( reduce_ws), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, d_A, d_sum); cudaCheckErrors("reduction warp shuffle kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: hipMemcpy(h_sum, d_sum, sizeof(float), hipMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction warp shuffle kernel execution failure or hipMemcpy H2D failure"); if (*h_sum != (float)N) {printf("reduction warp shuffle sum incorrect!\n"); return -1;} printf("reduction warp shuffle sum correct!\n"); return 0; }
e28705893504c735cfb12338a68c96cf7339bbda.cu
#include <stdio.h> // error checking macro #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) // const size_t N = 8ULL*1024ULL*1024ULL; // data size // const size_t N = 640ULL * 256UL; // data size const size_t N = 32ULL*1024ULL*1024ULL;;// data size //const size_t N = 256*640; // data size const int BLOCK_SIZE = 256; // CUDA maximum is 1024 // naive atomic reduction kernel __global__ void atomic_red(const float *gdata, float *out){ size_t idx = threadIdx.x+blockDim.x*blockIdx.x; if (idx < N) atomicAdd(out, gdata[idx]); } __global__ void reduce(float *gdata, float *out){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < N) { // grid stride loop to load data sdata[tid] += gdata[idx]; idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] += sdata[tid + s]; } if (tid == 0) out[blockIdx.x] = sdata[0]; } __global__ void reduce_a(float *gdata, float *out){ __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x; sdata[tid] = 0.0f; size_t idx = threadIdx.x+blockDim.x*blockIdx.x; while (idx < N) { // grid stride loop to load data sdata[tid] += gdata[idx]; idx += gridDim.x*blockDim.x; } for (unsigned int s=blockDim.x/2; s>0; s>>=1) { __syncthreads(); if (tid < s) // parallel sweep reduction sdata[tid] += sdata[tid + s]; } if (tid == 0) atomicAdd(out, sdata[0]); } __global__ void reduce_ws(float *gdata, float *out){ __shared__ float sdata[32]; int tid = threadIdx.x; int idx = threadIdx.x+blockDim.x*blockIdx.x; float val = 0.0f; unsigned mask = 0xFFFFFFFFU; int lane = threadIdx.x % warpSize; int warpID = threadIdx.x / warpSize; while (idx < N) { // grid stride loop to load val += gdata[idx]; idx += gridDim.x*blockDim.x; } // 1st warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (lane == 0) sdata[warpID] = val; __syncthreads(); // put warp results in shared mem // hereafter, just warp 0 if (warpID == 0){ // reload val from shared mem if warp existed val = (tid < blockDim.x/warpSize)?sdata[lane]:0; // final warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (tid == 0) atomicAdd(out, val); } } int main(){ float *h_A, *h_sum, *d_A, *d_sum; h_A = new float[N]; // allocate space for data in host memory h_sum = new float; for (int i = 0; i < N; i++) // initialize matrix in host memory h_A[i] = 1.0f; cudaMalloc(&d_A, N*sizeof(float)); // allocate device space for A cudaMalloc(&d_sum, sizeof(float)); // allocate device space for sum cudaCheckErrors("cudaMalloc failure"); // error checking // copy matrix A to device: cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy H2D failure"); cudaMemset(d_sum, 0, sizeof(float)); cudaCheckErrors("cudaMemset failure"); //cuda processing sequence step 1 is complete atomic_red<<<(N+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>>(d_A, d_sum); cudaCheckErrors("atomic reduction kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: cudaMemcpy(h_sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("atomic reduction kernel execution failure or cudaMemcpy H2D failure"); if (*h_sum != (float)N) {printf("atomic sum reduction incorrect!\n"); return -1;} printf("atomic sum reduction correct!\n"); const int blocks = 640; cudaMemset(d_sum, 0, sizeof(float)); cudaCheckErrors("cudaMemset failure"); //cuda processing sequence step 1 is complete reduce_a<<<blocks, BLOCK_SIZE>>>(d_A, d_sum); cudaCheckErrors("reduction w/atomic kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: cudaMemcpy(h_sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction w/atomic kernel execution failure or cudaMemcpy H2D failure"); if (*h_sum != (float)N) {printf("reduction w/atomic sum incorrect!\n"); return -1;} printf("reduction w/atomic sum correct!\n"); cudaMemset(d_sum, 0, sizeof(float)); cudaCheckErrors("cudaMemset failure"); //cuda processing sequence step 1 is complete reduce_ws<<<blocks, BLOCK_SIZE>>>(d_A, d_sum); cudaCheckErrors("reduction warp shuffle kernel launch failure"); //cuda processing sequence step 2 is complete // copy vector sums from device to host: cudaMemcpy(h_sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost); //cuda processing sequence step 3 is complete cudaCheckErrors("reduction warp shuffle kernel execution failure or cudaMemcpy H2D failure"); if (*h_sum != (float)N) {printf("reduction warp shuffle sum incorrect!\n"); return -1;} printf("reduction warp shuffle sum correct!\n"); return 0; }
fb9efdf0679fa0a55785661cf2634d23c1c57089.hip
// !!! This is a file automatically generated by hipify!!! int main() { const int blockSize = 256, nStreams = 4; const unsigned int N = 1048576; const unsigned int bytes = N * sizeof(int); int *h_a = (int*)malloc(bytes); int *d_a; hipMalloc((int**)&d_a, bytes); memset(h_a, 0, bytes); hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(h_a, d_a, bytes, hipMemcpyDeviceToHost); return 0; } int main() { const int blockSize = 256, nStreams = 4; const int n = 4 * 1024 * blockSize; const int streamSize = n / nStreams; const int streamBytes = streamSize * sizeof(float); const int bytes = n * sizeof(float); float *a, *d_a; hipStream_t stream[nStream]; for (int i = 0; i < nStreams; ++i) { checkCuda(hipStreamCreate(&stream[i])); } memset(a, 0, bytes); checkCuda(hipHostMalloc((void**)&a, bytes)); checkCuda(hipMalloc((void**)&d_a, bytes)); for (int i = 0; i < nStreams; ++i) { checkCuda(hipMemcpyAsync()); checkCuda() } }
fb9efdf0679fa0a55785661cf2634d23c1c57089.cu
int main() { const int blockSize = 256, nStreams = 4; const unsigned int N = 1048576; const unsigned int bytes = N * sizeof(int); int *h_a = (int*)malloc(bytes); int *d_a; cudaMalloc((int**)&d_a, bytes); memset(h_a, 0, bytes); cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost); return 0; } int main() { const int blockSize = 256, nStreams = 4; const int n = 4 * 1024 * blockSize; const int streamSize = n / nStreams; const int streamBytes = streamSize * sizeof(float); const int bytes = n * sizeof(float); float *a, *d_a; cudaStream_t stream[nStream]; for (int i = 0; i < nStreams; ++i) { checkCuda(cudaStreamCreate(&stream[i])); } memset(a, 0, bytes); checkCuda(cudaMallocHost((void**)&a, bytes)); checkCuda(cudaMalloc((void**)&d_a, bytes)); for (int i = 0; i < nStreams; ++i) { checkCuda(cudaMemcpyAsync()); checkCuda() } }
00fffdace126745669ff7fb20e50a11eb0713aaf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Kernel definition __global__ void VecAdd(float* A, float* B, float* C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { ... // Kernel invocation with N threads hipLaunchKernelGGL(( VecAdd), dim3(1), dim3(N), 0, 0, A, B, C); ... }
00fffdace126745669ff7fb20e50a11eb0713aaf.cu
// Kernel definition __global__ void VecAdd(float* A, float* B, float* C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { ... // Kernel invocation with N threads VecAdd<<<1, N>>>(A, B, C); ... }
e545b237caff13e1e6b9968fd585a8edaba90869.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> void CUDA_CALL(hipError_t result) { if (result != hipSuccess) { std::cout << "Error " << result << "\n"; std::cout << hipGetErrorString(result) << "\n"; exit(1); } } texture<float, 2> texRef2; // Simple transformation kernel __global__ void transformKernel(float* output, int width, int height, int output_width, int output_height, float theta) { // Calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; float tu = ((float)width * x) / output_width; float tv = ((float)height * y) / output_height; output[y * output_width + x] = tex2D(texRef2, tu, tv); } // Host code int main() { int width = 32; int height = 16; int size = width * height; float * h_data = (float*) malloc(width * height * sizeof(float)); for (int i = 0 ; i < height; i++) for (int j = 0; j < width; j++) h_data[i * width + j] = i * width + j + 1; printf ("\n Original array \n"); for (int i = 0; i < height; i++) { for (int j = 0 ; j < width; j++) printf ("%f ", h_data[i*width + j]); printf ("\n"); } float * d_data; CUDA_CALL( hipMalloc(&d_data, size * sizeof(float)) ); // Copy to device memory some data located at address h_data // in host memory CUDA_CALL( hipMemcpy(d_data, h_data, size * sizeof(float), hipMemcpyHostToDevice) ); // Set texture parameters //texRef.addressMode[0] = hipAddressModeWrap; //texRef.addressMode[1] = hipAddressModeWrap; //texRef.filterMode = hipFilterModeLinear; //texRef.normalized = true; // Bind the array to the texture reference hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipArray* cuArray; CUDA_CALL( hipMallocArray(&cuArray, &texRef2.channelDesc, width, height) ); CUDA_CALL( hipMemcpy2DToArray(cuArray, 0, 0, d_data, sizeof(float) * width, sizeof(float) * width, height, hipMemcpyDeviceToDevice) ); hipBindTextureToArray(texRef2, cuArray); hipDeviceSynchronize(); int output_width = 32; int output_height = 16; // Set up block dims. dim3 dimBlock(4, 4); dim3 dimGrid((output_width - 1)/dimBlock.x + 1, (output_height - 1)/dimBlock.y + 1); printf("blockDim = %d, %d\n", dimBlock.x, dimBlock.y); printf("gridDim = %d, %d\n", dimGrid.x, dimGrid.y); // Allocate result of transformation in device memory int output_size = dimGrid.x * dimBlock.x * dimGrid.y * dimBlock.y; printf("output_size %d\n", output_size); float* output; CUDA_CALL( hipMalloc(&output, output_size * sizeof(float)) ); float * h_output = (float*)malloc(output_size * sizeof(float)); hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, width, height, output_width, output_height, 90); hipDeviceSynchronize(); CUDA_CALL( hipMemcpy(h_output, output, output_size * sizeof(float), hipMemcpyDeviceToHost) ); printf ("\nAfter operation\n"); for (int i = 0; i < output_height; i++) { for (int j = 0; j < output_width; j++) printf ("%d %f\n", i*output_width + j, h_output[i*output_width + j]); printf ("\n"); } system ("pause"); // Free device memory hipFree(d_data); hipFree(output); return 0; }
e545b237caff13e1e6b9968fd585a8edaba90869.cu
#include <iostream> #include <stdio.h> void CUDA_CALL(cudaError_t result) { if (result != cudaSuccess) { std::cout << "Error " << result << "\n"; std::cout << cudaGetErrorString(result) << "\n"; exit(1); } } texture<float, 2> texRef2; // Simple transformation kernel __global__ void transformKernel(float* output, int width, int height, int output_width, int output_height, float theta) { // Calculate normalized texture coordinates unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; float tu = ((float)width * x) / output_width; float tv = ((float)height * y) / output_height; output[y * output_width + x] = tex2D(texRef2, tu, tv); } // Host code int main() { int width = 32; int height = 16; int size = width * height; float * h_data = (float*) malloc(width * height * sizeof(float)); for (int i = 0 ; i < height; i++) for (int j = 0; j < width; j++) h_data[i * width + j] = i * width + j + 1; printf ("\n Original array \n"); for (int i = 0; i < height; i++) { for (int j = 0 ; j < width; j++) printf ("%f ", h_data[i*width + j]); printf ("\n"); } float * d_data; CUDA_CALL( cudaMalloc(&d_data, size * sizeof(float)) ); // Copy to device memory some data located at address h_data // in host memory CUDA_CALL( cudaMemcpy(d_data, h_data, size * sizeof(float), cudaMemcpyHostToDevice) ); // Set texture parameters //texRef.addressMode[0] = cudaAddressModeWrap; //texRef.addressMode[1] = cudaAddressModeWrap; //texRef.filterMode = cudaFilterModeLinear; //texRef.normalized = true; // Bind the array to the texture reference cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaArray* cuArray; CUDA_CALL( cudaMallocArray(&cuArray, &texRef2.channelDesc, width, height) ); CUDA_CALL( cudaMemcpy2DToArray(cuArray, 0, 0, d_data, sizeof(float) * width, sizeof(float) * width, height, cudaMemcpyDeviceToDevice) ); cudaBindTextureToArray(texRef2, cuArray); cudaDeviceSynchronize(); int output_width = 32; int output_height = 16; // Set up block dims. dim3 dimBlock(4, 4); dim3 dimGrid((output_width - 1)/dimBlock.x + 1, (output_height - 1)/dimBlock.y + 1); printf("blockDim = %d, %d\n", dimBlock.x, dimBlock.y); printf("gridDim = %d, %d\n", dimGrid.x, dimGrid.y); // Allocate result of transformation in device memory int output_size = dimGrid.x * dimBlock.x * dimGrid.y * dimBlock.y; printf("output_size %d\n", output_size); float* output; CUDA_CALL( cudaMalloc(&output, output_size * sizeof(float)) ); float * h_output = (float*)malloc(output_size * sizeof(float)); transformKernel<<<dimGrid, dimBlock>>>(output, width, height, output_width, output_height, 90); cudaDeviceSynchronize(); CUDA_CALL( cudaMemcpy(h_output, output, output_size * sizeof(float), cudaMemcpyDeviceToHost) ); printf ("\nAfter operation\n"); for (int i = 0; i < output_height; i++) { for (int j = 0; j < output_width; j++) printf ("%d %f\n", i*output_width + j, h_output[i*output_width + j]); printf ("\n"); } system ("pause"); // Free device memory cudaFree(d_data); cudaFree(output); return 0; }
a34942e665e650393472d67d79ed2272b4f1c078.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 19-Oct-2012 16:21:07 // // user function __device__ #include "res_calc.h" // CUDA kernel function __global__ void op_cuda_res_calc( float *ind_arg0, float *ind_arg1, float *ind_arg2, float *ind_arg3, int *ind_map, short *arg_map, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg6_l[4]; float arg7_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ float *ind_arg0_s; __shared__ float *ind_arg1_s; __shared__ float *ind_arg2_s; __shared__ float *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4]; ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4]; ind_arg2_map = &ind_map[4*set_size] + ind_arg_offs[2+blockId*4]; ind_arg3_map = &ind_map[6*set_size] + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2); ind_arg1_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4); ind_arg2_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1); ind_arg3_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_float; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg6_l[d] = ZERO_float; for (int d=0; d<4; d++) arg7_l[d] = ZERO_float; // user-supplied kernel call res_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg1_s+arg_map[2*set_size+n+offset_b]*4, ind_arg1_s+arg_map[3*set_size+n+offset_b]*4, ind_arg2_s+arg_map[4*set_size+n+offset_b]*1, ind_arg2_s+arg_map[5*set_size+n+offset_b]*1, arg6_l, arg7_l ); col2 = colors[n+offset_b]; } // store local variables int arg6_map; int arg7_map; if (col2>=0) { arg6_map = arg_map[6*set_size+n+offset_b]; arg7_map = arg_map[7*set_size+n+offset_b]; } for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg6_map*4] += arg6_l[d]; for (int d=0; d<4; d++) ind_arg3_s[d+arg7_map*4] += arg7_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7 ){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } // get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(2); OP_kernels[2].name = name; OP_kernels[2].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all_cuda(nargs,args); #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0, (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg4.data_d, (float *)arg6.data_d, Plan->ind_map, Plan->loc_map, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_res_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(2); OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
a34942e665e650393472d67d79ed2272b4f1c078.cu
// // auto-generated by op2.m on 19-Oct-2012 16:21:07 // // user function __device__ #include "res_calc.h" // CUDA kernel function __global__ void op_cuda_res_calc( float *ind_arg0, float *ind_arg1, float *ind_arg2, float *ind_arg3, int *ind_map, short *arg_map, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg6_l[4]; float arg7_l[4]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ int *ind_arg2_map, ind_arg2_size; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ float *ind_arg0_s; __shared__ float *ind_arg1_s; __shared__ float *ind_arg2_s; __shared__ float *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*4]; ind_arg1_size = ind_arg_sizes[1+blockId*4]; ind_arg2_size = ind_arg_sizes[2+blockId*4]; ind_arg3_size = ind_arg_sizes[3+blockId*4]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*4]; ind_arg1_map = &ind_map[2*set_size] + ind_arg_offs[1+blockId*4]; ind_arg2_map = &ind_map[4*set_size] + ind_arg_offs[2+blockId*4]; ind_arg3_map = &ind_map[6*set_size] + ind_arg_offs[3+blockId*4]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2); ind_arg1_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4); ind_arg2_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1); ind_arg3_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x) ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4]; for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x) ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3_s[n] = ZERO_float; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<4; d++) arg6_l[d] = ZERO_float; for (int d=0; d<4; d++) arg7_l[d] = ZERO_float; // user-supplied kernel call res_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg1_s+arg_map[2*set_size+n+offset_b]*4, ind_arg1_s+arg_map[3*set_size+n+offset_b]*4, ind_arg2_s+arg_map[4*set_size+n+offset_b]*1, ind_arg2_s+arg_map[5*set_size+n+offset_b]*1, arg6_l, arg7_l ); col2 = colors[n+offset_b]; } // store local variables int arg6_map; int arg7_map; if (col2>=0) { arg6_map = arg_map[6*set_size+n+offset_b]; arg7_map = arg_map[7*set_size+n+offset_b]; } for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<4; d++) ind_arg3_s[d+arg6_map*4] += arg6_l[d]; for (int d=0; d<4; d++) ind_arg3_s[d+arg7_map*4] += arg7_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x) ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } // host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7 ){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } // get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(2); OP_kernels[2].name = name; OP_kernels[2].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all_cuda(nargs,args); #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; op_cuda_res_calc<<<nblocks,nthread,nshared>>>( (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg4.data_d, (float *)arg6.data_d, Plan->ind_map, Plan->loc_map, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_res_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(2); OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
b75cf6f2102dda81ba83a39c292b44607fd05e3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 08.11.2018 // @author [email protected] // #include <system/op_boilerplate.h> #include <types/types.h> #include "../legacy_ops.h" #include "../scalar_bool.h" using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void scalarAlongDimension(void const* x, sd::LongType const* xShapeInfo, void* extraParams, void* z, sd::LongType const* zShapeInfo, void const* scalars, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { functions::scalar::ScalarBoolTransform<X, Z>::template transformCuda<OpType>( x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void scalarSimpleShaped(void const* x, void const* y, sd::LongType const* xShapeInfo, void* params, void* z, sd::LongType const* zShapeInfo, int* allocationBuffer) { functions::scalar::ScalarBoolTransform<X, Z>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer); } // *********************************************************************// // *********************************************************************// namespace functions { namespace scalar { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ScalarBoolTransform<X, Z>::transformCuda(void const* vscalar, void const* vy, sd::LongType const* yShapeInfo, void* vparams, void* vz, sd::LongType const* zShapeInfo, int* allocationBuffer) { auto scalar = reinterpret_cast<X const*>(vscalar)[0]; auto y = reinterpret_cast<X const*>(vy); auto params = reinterpret_cast<X*>(vparams); auto z = reinterpret_cast<Z*>(vz); auto yRank = shape::rank(yShapeInfo); auto yEWS = shape::elementWiseStride(yShapeInfo); auto yShape = shape::shapeOf(yShapeInfo); auto yStride = shape::stride(yShapeInfo); auto zRank = shape::rank(zShapeInfo); auto zEWS = shape::elementWiseStride(zShapeInfo); auto zShape = shape::shapeOf(zShapeInfo); auto zStride = shape::stride(zShapeInfo); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int len; if (threadIdx.x == 0) len = shape::length(yShapeInfo); __syncthreads(); if (yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) { transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer); } else { for (sd::LongType i = tid; i < len; i += totalThreads) z[shape::getIndexOffset(i, zShapeInfo)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo)], scalar, params); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ScalarBoolTransform<X, Z>::transformCuda(sd::LongType len, void const* vx, void const* vy, sd::LongType yEWS, void* vparams, void* vz, sd::LongType zEWS, int* allocationBuffer) { auto x = reinterpret_cast<X const*>(vx)[0]; auto y = reinterpret_cast<X const*>(vy); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<X*>(vparams); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; sd::LongType i = tid; if (yEWS == 1 && zEWS == 1) { for (; i < len; i += totalThreads) z[i] = OpType::op(y[i], x, params); } else { for (; i < len; i += totalThreads) z[i * zEWS] = OpType::op(y[i * yEWS], x, params); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ScalarBoolTransform<X, Z>::transformCuda( void const* vx, sd::LongType const* xShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo, void const* vscalars, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto x = reinterpret_cast<X const*>(vx); auto scalars = reinterpret_cast<X const*>(vscalars); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); if (tadShapeInfoZ == nullptr) { tadShapeInfoZ = tadShapeInfo; tadOffsetsZ = tadOffsets; } // tad preparation auto tadEws = shape::elementWiseStride(tadShapeInfo); auto zEws = shape::elementWiseStride(tadShapeInfoZ); auto tadLength = shape::length(tadShapeInfo); // shape::tadLength(xShapeInfo, dimension, dimensionLength); auto numTads = shape::length(xShapeInfo) / tadLength; if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Z* oZ = z + tadOffsetsZ[r]; auto oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams); } } else { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Z* oZ = z + tadOffsetsZ[r]; auto oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[shape::getIndexOffset(f, tadShapeInfoZ)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo)], s, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_HOST void ScalarBoolTransform<X, Z>::intermediateAlongDimension( dim3& launchDims, hipStream_t* stream, void const* x, sd::LongType const* xShapeInfo, void* z, sd::LongType const* zShapeInfo, void const* scalars, void* extraParams, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { hipLaunchKernelGGL(( scalarAlongDimension<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); sd::DebugHelper::checkErrorCode(stream, "scalarAlongDim(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> void SD_HOST ScalarBoolTransform<X, Z>::intermediateShaped(dim3& launchDims, hipStream_t* stream, void const* vx, sd::LongType const* xShapeInfo, void* vz, sd::LongType const* zShapeInfo, void const* vscalar, void* vextraParams, int* allocPointer) { hipLaunchKernelGGL(( scalarSimpleShaped<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer); sd::DebugHelper::checkErrorCode(stream, "scalarSimpleShaped(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> void ScalarBoolTransform<X, Y>::executeCudaShaped(dim3& launchDims, hipStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo, void* vz, sd::LongType const* zShapeInfo, void const* vscalar, void const* vextraParams) { if (sd::Environment::getInstance().isDebugAndVerbose()) printf("H14 opNum:[%i]\n", opNum); DISPATCH_BY_OPNUM_TT( intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, const_cast<void*>(vextraParams), nullptr), SCALAR_BOOL_OPS); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> void ScalarBoolTransform<X, Y>::executeCudaAlongDimension( dim3& launchDims, hipStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo, void* vz, sd::LongType const* zShapeInfo, void const* vscalars, void* vextraParams, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { DISPATCH_BY_OPNUM_TT(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_BOOL_OPS); } BUILD_DOUBLE_TEMPLATE(template class ScalarBoolTransform, , SD_COMMON_TYPES, SD_BOOL_TYPES); } // namespace scalar } // namespace functions
b75cf6f2102dda81ba83a39c292b44607fd05e3e.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 08.11.2018 // @author [email protected] // #include <system/op_boilerplate.h> #include <types/types.h> #include "../legacy_ops.h" #include "../scalar_bool.h" using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void scalarAlongDimension(void const* x, sd::LongType const* xShapeInfo, void* extraParams, void* z, sd::LongType const* zShapeInfo, void const* scalars, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { functions::scalar::ScalarBoolTransform<X, Z>::template transformCuda<OpType>( x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z, typename OpType> SD_KERNEL void scalarSimpleShaped(void const* x, void const* y, sd::LongType const* xShapeInfo, void* params, void* z, sd::LongType const* zShapeInfo, int* allocationBuffer) { functions::scalar::ScalarBoolTransform<X, Z>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer); } // *********************************************************************// // *********************************************************************// namespace functions { namespace scalar { //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ScalarBoolTransform<X, Z>::transformCuda(void const* vscalar, void const* vy, sd::LongType const* yShapeInfo, void* vparams, void* vz, sd::LongType const* zShapeInfo, int* allocationBuffer) { auto scalar = reinterpret_cast<X const*>(vscalar)[0]; auto y = reinterpret_cast<X const*>(vy); auto params = reinterpret_cast<X*>(vparams); auto z = reinterpret_cast<Z*>(vz); auto yRank = shape::rank(yShapeInfo); auto yEWS = shape::elementWiseStride(yShapeInfo); auto yShape = shape::shapeOf(yShapeInfo); auto yStride = shape::stride(yShapeInfo); auto zRank = shape::rank(zShapeInfo); auto zEWS = shape::elementWiseStride(zShapeInfo); auto zShape = shape::shapeOf(zShapeInfo); auto zStride = shape::stride(zShapeInfo); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int len; if (threadIdx.x == 0) len = shape::length(yShapeInfo); __syncthreads(); if (yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) { transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer); } else { for (sd::LongType i = tid; i < len; i += totalThreads) z[shape::getIndexOffset(i, zShapeInfo)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo)], scalar, params); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ScalarBoolTransform<X, Z>::transformCuda(sd::LongType len, void const* vx, void const* vy, sd::LongType yEWS, void* vparams, void* vz, sd::LongType zEWS, int* allocationBuffer) { auto x = reinterpret_cast<X const*>(vx)[0]; auto y = reinterpret_cast<X const*>(vy); auto z = reinterpret_cast<Z*>(vz); auto params = reinterpret_cast<X*>(vparams); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; sd::LongType i = tid; if (yEWS == 1 && zEWS == 1) { for (; i < len; i += totalThreads) z[i] = OpType::op(y[i], x, params); } else { for (; i < len; i += totalThreads) z[i * zEWS] = OpType::op(y[i * yEWS], x, params); } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_DEVICE void ScalarBoolTransform<X, Z>::transformCuda( void const* vx, sd::LongType const* xShapeInfo, void* vextraParams, void* vz, sd::LongType const* zShapeInfo, void const* vscalars, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { auto x = reinterpret_cast<X const*>(vx); auto scalars = reinterpret_cast<X const*>(vscalars); auto z = reinterpret_cast<Z*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); if (tadShapeInfoZ == nullptr) { tadShapeInfoZ = tadShapeInfo; tadOffsetsZ = tadOffsets; } // tad preparation auto tadEws = shape::elementWiseStride(tadShapeInfo); auto zEws = shape::elementWiseStride(tadShapeInfoZ); auto tadLength = shape::length(tadShapeInfo); // shape::tadLength(xShapeInfo, dimension, dimensionLength); auto numTads = shape::length(xShapeInfo) / tadLength; if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Z* oZ = z + tadOffsetsZ[r]; auto oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams); } } else { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { Z* oZ = z + tadOffsetsZ[r]; auto oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[shape::getIndexOffset(f, tadShapeInfoZ)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo)], s, extraParams); } } } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> SD_HOST void ScalarBoolTransform<X, Z>::intermediateAlongDimension( dim3& launchDims, cudaStream_t* stream, void const* x, sd::LongType const* xShapeInfo, void* z, sd::LongType const* zShapeInfo, void const* scalars, void* extraParams, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { scalarAlongDimension<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); sd::DebugHelper::checkErrorCode(stream, "scalarAlongDim(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Z> template <typename OpType> void SD_HOST ScalarBoolTransform<X, Z>::intermediateShaped(dim3& launchDims, cudaStream_t* stream, void const* vx, sd::LongType const* xShapeInfo, void* vz, sd::LongType const* zShapeInfo, void const* vscalar, void* vextraParams, int* allocPointer) { scalarSimpleShaped<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer); sd::DebugHelper::checkErrorCode(stream, "scalarSimpleShaped(...) failed"); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> void ScalarBoolTransform<X, Y>::executeCudaShaped(dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo, void* vz, sd::LongType const* zShapeInfo, void const* vscalar, void const* vextraParams) { if (sd::Environment::getInstance().isDebugAndVerbose()) printf("H14 opNum:[%i]\n", opNum); DISPATCH_BY_OPNUM_TT( intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, const_cast<void*>(vextraParams), nullptr), SCALAR_BOOL_OPS); } //////////////////////////////////////////////////////////////////////// template <typename X, typename Y> void ScalarBoolTransform<X, Y>::executeCudaAlongDimension( dim3& launchDims, cudaStream_t* stream, int opNum, void const* vx, sd::LongType const* xShapeInfo, void* vz, sd::LongType const* zShapeInfo, void const* vscalars, void* vextraParams, int* dimension, int dimensionLength, sd::LongType const* tadShapeInfo, sd::LongType const* tadOffsets, sd::LongType const* tadShapeInfoZ, sd::LongType const* tadOffsetsZ) { DISPATCH_BY_OPNUM_TT(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_BOOL_OPS); } BUILD_DOUBLE_TEMPLATE(template class ScalarBoolTransform, , SD_COMMON_TYPES, SD_BOOL_TYPES); } // namespace scalar } // namespace functions
fbac7dd391a51a1352fce0bce90b7c8420d6546d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Kernels.cu * * Created on: Oct 19, 2013 * Author: reid */ #include "Kernels.h" #include "util.h" extern __host__ __device__ void printLongSizes(); dim3 gDefaultMatProdBlock = dim3(16,16); __device__ clock_t global_now; // todo redundant to util<T> ::eps template<> __host__ __device__ float Epsilon() { return 1e-6; } template<> __host__ __device__ double Epsilon() { return 1e-10; } template<> __host__ __device__ ulong Epsilon() { return 1; } __global__ void warmup() { #if __CUDA_ARCH__ == 610 prlocf("\n\nwarmup<<<>>> on cc 6.1 device\n\n\n"); #elif __CUDA_ARCH__ == 520 prlocf("\n\nwarmup<<<>>> on cc 5.2 device\n\n\n"); #elif __CUDA_ARCH__ == 500 prlocf("\n\nwarmup<<<>>> on cc 5 device\n\n\n"); #elif __CUDA_ARCH__ == 350 prlocf("\n\nwarmup<<<>>> on cc 3.5 device\n\n\n"); #elif __CUDA_ARCH__ == 300 prlocf("\n\nwarmup<<<>>> on cc 3 device\n\n\n"); #else prlocf("\n\nwarmup<<<>>> on cc UNKNOWN device\n\n\n"); #endif printLongSizes(); } __global__ void slep(long slepMs) { clock_t start = clock(); clock_t now; for (;;) { now = clock(); clock_t cycles = now > start ? now - start : now + (0xffffffff - start); if (cycles >= slepMs) { break; } } // Stored "now" in global memory here to prevent the // compiler from optimizing away the entire loop. global_now = now; } template <typename T> __host__ CUDART_DEVICE void setL(T* elements, int m, int n, int p, int row, int col, T val) { if(row > m) { printf("rowOutOfBounds()\n"); return;} if(col > n) { printf("columnOutOfBounds()\n"); return; } hipLaunchKernelGGL(( setKernel), dim3(1),dim3(1), 0, 0, elements, row, col, p, row * p + col, val); } template void setL<float>(float*, int, int, int, int, int, float); template void setL<double>(double*, int, int, int, int, int, double); template void setL<long>(long*, int, int, int, int, int, long); template void setL<ulong>(ulong*, int, int, int, int, int, ulong); template void setL<int>(int*, int, int, int, int, int, int); template void setL<uint>(uint*, int, int, int, int, int, uint); template <typename T> __global__ void setKernel(T* elements, int m, int n, int p, long l, T val) { if(n == p) { elements[l] = val; } else { // todo simplify this uint div = l /n; uint idx = div * p; idx += l - div * n; //printf("offset l %u -> %u\n", l, idx); elements[idx ] = val; } } template <typename T> __host__ CUDART_DEVICE void setL(T* elements, int m, int n, int p, long l, T val) { if(l > m * p) {printf("outOfBounds()\n"); return;} hipLaunchKernelGGL(( setKernel), dim3(1),dim3(1), 0, 0, elements, m, n, p, l,val); } template void setL<float>(float*, int, int, int, long, float); template void setL<double>(double*, int, int, int, long, double); template void setL<long>(long*, int, int, int, long, long); template void setL<ulong>(ulong*, int, int, int, long, ulong); template void setL<int>(int*, int, int, int, long, int); template void setL<uint>(uint*, int, int, int, long, uint); // filluers /* template __global__ void fillOpKernel<float, stepFiller<float> >(stepFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, constFiller<float> >(constFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, sinFiller<float> >(sinFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, cosFiller<float> >(cosFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, randFiller<float> >(randFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, sequenceFiller<float> >(sequenceFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, seqModFiller<float> >(seqModFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, diagonalFiller<float> >(diagonalFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, increasingColumnsFiller<float> >(increasingColumnsFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, increasingRowsFiller<float> >(increasingRowsFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, sequenceScaleFiller<float> >(sequenceScaleFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, spanFiller<float> >(spanFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<double, stepFiller<double> >(stepFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, constFiller<double> >(constFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, sinFiller<double> >(sinFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, cosFiller<double> >(cosFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, randFiller<double> >(randFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, sequenceFiller<double> >(sequenceFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, seqModFiller<double> >(seqModFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, diagonalFiller<double> >(diagonalFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, increasingColumnsFiller<double> >(increasingColumnsFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, increasingRowsFiller<double> >(increasingRowsFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, sequenceScaleFiller<double> >(sequenceScaleFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, spanFiller<double> >(spanFiller<double>, double*, int, int, int, bool); */ /* template<typename T> __global__ void fill_Kernel( T* trg, int height, int width, int pitch, bool colMajor) { uint xIndex = blockIdx.x * blockDim.x + threadIdx.x; uint yIndex = blockIdx.y * blockDim.y + threadIdx.y; uint indexOut = colMajor ? xIndex * pitch + yIndex : yIndex * pitch + xIndex; if(xIndex < width && yIndex < height) trg[indexOut] = indexOut; } */ template <typename T> __global__ void fillKernel(T* elements, T val, long n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n) { elements[idx] = val; } } template __global__ void fillKernel<float>(float*, float, long); template __global__ void fillKernel<double>(double*, double, long); template __global__ void fillKernel<int>(int*, int, long); template __global__ void fillKernel<uint>(uint*, uint, long); template __global__ void fillKernel<long>(long*, long, long); template __global__ void fillKernel<ulong>(ulong*, ulong, long); // Non-Square Block version, to amortize index calcs template<typename T, typename FillOp> __global__ void fillOpNsbKernel( FillOp op, T* trg, int height, int width, int pitch, bool colMajor) { uint xIndex = blockIdx.x * blockDim.x + threadIdx.x; uint yIndex = blockIdx.y * blockDim.x + threadIdx.y; uint indexOut = colMajor ? xIndex * pitch + yIndex : yIndex * pitch + xIndex; uint ip; if( xIndex < width ) for(int i = 0; i < blockDim.x; i+= blockDim.y) { if( i + yIndex < height) { ip = i * pitch; trg[ip + indexOut] = op(indexOut + ip); } } } template __global__ void fillOpNsbKernel<float, oneOverFiller<float> >(oneOverFiller<float>, float*, int, int, int, bool); template __global__ void fillOpNsbKernel<double, oneOverFiller<double> >(oneOverFiller<double>, double*, int, int, int, bool); __global__ void setup_kernel ( hiprandState_t * state, int width, int pitch ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * pitch; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init (1234, offset, 0, & state [ offset ]) ; } template __global__ void generate_kernel ( hiprandState_t * state, float* result, int height, int width ); template __global__ void generate_kernel ( hiprandState_t * state, double* result, int height, int width ); template <typename T> __global__ void generate_kernel ( hiprandState_t * state, T* result, int height, int width ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * width; if(xIdx < width && yIdx < height) { /* Copy state to local memory for efficiency */ hiprandState_t localState = state [ offset ]; /* Generate pseudo - random unsigned ints */ result[offset]= static_cast<T>(hiprand (&localState) / RAND_MAX); /* Copy state back to global memory */ state [ offset ] = localState ; } } template __global__ void generate_uniform_kernel ( hiprandState_t * state, float* result, float epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( hiprandState_t * state, double* result, double epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( hiprandState_t * state, long* result, long epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( hiprandState_t * state, ulong* result, ulong epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( hiprandState_t * state, uint* result, uint epsilon, int height, int width , int); template __global__ void generate_uniform_kernel ( hiprandState_t * state, int* result, int epsilon, int height, int width, int ); template <typename T> __global__ void generate_uniform_kernel ( hiprandState_t * state, T* result, T epsilon, int height, int width, int pitch ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * pitch; if(xIdx < width && yIdx < height) { /* Copy state to local memory for efficiency */ hiprandState_t localState = state [ offset ]; /* Generate pseudo - random uniforms */ result[offset] = (2 * hiprand_uniform (& localState ) - 1) * epsilon; /* Copy state back to global memory */ state [ offset ] = localState ; } } template __global__ void generate_uniform_kernel_mod ( hiprandState_t * state, float* result, float epsilon, int height, int width, int , int ); template __global__ void generate_uniform_kernel_mod ( hiprandState_t * state, double* result, double epsilon, int height, int width, int , int ); template __global__ void generate_uniform_kernel_mod ( hiprandState_t * state, long* result, long epsilon, int height, int width, int, int ); template __global__ void generate_uniform_kernel_mod ( hiprandState_t * state, ulong* result, ulong epsilon, int height, int width, int , int ); template __global__ void generate_uniform_kernel_mod ( hiprandState_t * state, uint* result, uint epsilon, int height, int width , int, int ); template __global__ void generate_uniform_kernel_mod ( hiprandState_t * state, int* result, int epsilon, int height, int width, int, int ); template <typename T> __global__ void generate_uniform_kernel_mod ( hiprandState_t * state, T* result, T epsilon, int height, int width, int pitch, int mod ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * pitch; if(xIdx < width && yIdx < height) { /* Copy state to local memory for efficiency */ hiprandState_t localState = state [ offset ]; /* Generate pseudo - random uniforms */ result[offset] = (T) ( (int)((2 * hiprand_uniform (& localState ) - 1) * epsilon) % mod); /* Copy state back to global memory */ state [ offset ] = localState ; } }
fbac7dd391a51a1352fce0bce90b7c8420d6546d.cu
/* * Kernels.cu * * Created on: Oct 19, 2013 * Author: reid */ #include "Kernels.h" #include "util.h" extern __host__ __device__ void printLongSizes(); dim3 gDefaultMatProdBlock = dim3(16,16); __device__ clock_t global_now; // todo redundant to util<T> ::eps template<> __host__ __device__ float Epsilon() { return 1e-6; } template<> __host__ __device__ double Epsilon() { return 1e-10; } template<> __host__ __device__ ulong Epsilon() { return 1; } __global__ void warmup() { #if __CUDA_ARCH__ == 610 prlocf("\n\nwarmup<<<>>> on cc 6.1 device\n\n\n"); #elif __CUDA_ARCH__ == 520 prlocf("\n\nwarmup<<<>>> on cc 5.2 device\n\n\n"); #elif __CUDA_ARCH__ == 500 prlocf("\n\nwarmup<<<>>> on cc 5 device\n\n\n"); #elif __CUDA_ARCH__ == 350 prlocf("\n\nwarmup<<<>>> on cc 3.5 device\n\n\n"); #elif __CUDA_ARCH__ == 300 prlocf("\n\nwarmup<<<>>> on cc 3 device\n\n\n"); #else prlocf("\n\nwarmup<<<>>> on cc UNKNOWN device\n\n\n"); #endif printLongSizes(); } __global__ void slep(long slepMs) { clock_t start = clock(); clock_t now; for (;;) { now = clock(); clock_t cycles = now > start ? now - start : now + (0xffffffff - start); if (cycles >= slepMs) { break; } } // Stored "now" in global memory here to prevent the // compiler from optimizing away the entire loop. global_now = now; } template <typename T> __host__ CUDART_DEVICE void setL(T* elements, int m, int n, int p, int row, int col, T val) { if(row > m) { printf("rowOutOfBounds()\n"); return;} if(col > n) { printf("columnOutOfBounds()\n"); return; } setKernel<<<1,1>>>(elements, row, col, p, row * p + col, val); } template void setL<float>(float*, int, int, int, int, int, float); template void setL<double>(double*, int, int, int, int, int, double); template void setL<long>(long*, int, int, int, int, int, long); template void setL<ulong>(ulong*, int, int, int, int, int, ulong); template void setL<int>(int*, int, int, int, int, int, int); template void setL<uint>(uint*, int, int, int, int, int, uint); template <typename T> __global__ void setKernel(T* elements, int m, int n, int p, long l, T val) { if(n == p) { elements[l] = val; } else { // todo simplify this uint div = l /n; uint idx = div * p; idx += l - div * n; //printf("offset l %u -> %u\n", l, idx); elements[idx ] = val; } } template <typename T> __host__ CUDART_DEVICE void setL(T* elements, int m, int n, int p, long l, T val) { if(l > m * p) {printf("outOfBounds()\n"); return;} setKernel<<<1,1>>>(elements, m, n, p, l,val); } template void setL<float>(float*, int, int, int, long, float); template void setL<double>(double*, int, int, int, long, double); template void setL<long>(long*, int, int, int, long, long); template void setL<ulong>(ulong*, int, int, int, long, ulong); template void setL<int>(int*, int, int, int, long, int); template void setL<uint>(uint*, int, int, int, long, uint); // filluers /* template __global__ void fillOpKernel<float, stepFiller<float> >(stepFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, constFiller<float> >(constFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, sinFiller<float> >(sinFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, cosFiller<float> >(cosFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, randFiller<float> >(randFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, sequenceFiller<float> >(sequenceFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, seqModFiller<float> >(seqModFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, diagonalFiller<float> >(diagonalFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, increasingColumnsFiller<float> >(increasingColumnsFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, increasingRowsFiller<float> >(increasingRowsFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, sequenceScaleFiller<float> >(sequenceScaleFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<float, spanFiller<float> >(spanFiller<float>, float*, int, int, int, bool); template __global__ void fillOpKernel<double, stepFiller<double> >(stepFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, constFiller<double> >(constFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, sinFiller<double> >(sinFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, cosFiller<double> >(cosFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, randFiller<double> >(randFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, sequenceFiller<double> >(sequenceFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, seqModFiller<double> >(seqModFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, diagonalFiller<double> >(diagonalFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, increasingColumnsFiller<double> >(increasingColumnsFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, increasingRowsFiller<double> >(increasingRowsFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, sequenceScaleFiller<double> >(sequenceScaleFiller<double>, double*, int, int, int, bool); template __global__ void fillOpKernel<double, spanFiller<double> >(spanFiller<double>, double*, int, int, int, bool); */ /* template<typename T> __global__ void fill_Kernel( T* trg, int height, int width, int pitch, bool colMajor) { uint xIndex = blockIdx.x * blockDim.x + threadIdx.x; uint yIndex = blockIdx.y * blockDim.y + threadIdx.y; uint indexOut = colMajor ? xIndex * pitch + yIndex : yIndex * pitch + xIndex; if(xIndex < width && yIndex < height) trg[indexOut] = indexOut; } */ template <typename T> __global__ void fillKernel(T* elements, T val, long n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx < n) { elements[idx] = val; } } template __global__ void fillKernel<float>(float*, float, long); template __global__ void fillKernel<double>(double*, double, long); template __global__ void fillKernel<int>(int*, int, long); template __global__ void fillKernel<uint>(uint*, uint, long); template __global__ void fillKernel<long>(long*, long, long); template __global__ void fillKernel<ulong>(ulong*, ulong, long); // Non-Square Block version, to amortize index calcs template<typename T, typename FillOp> __global__ void fillOpNsbKernel( FillOp op, T* trg, int height, int width, int pitch, bool colMajor) { uint xIndex = blockIdx.x * blockDim.x + threadIdx.x; uint yIndex = blockIdx.y * blockDim.x + threadIdx.y; uint indexOut = colMajor ? xIndex * pitch + yIndex : yIndex * pitch + xIndex; uint ip; if( xIndex < width ) for(int i = 0; i < blockDim.x; i+= blockDim.y) { if( i + yIndex < height) { ip = i * pitch; trg[ip + indexOut] = op(indexOut + ip); } } } template __global__ void fillOpNsbKernel<float, oneOverFiller<float> >(oneOverFiller<float>, float*, int, int, int, bool); template __global__ void fillOpNsbKernel<double, oneOverFiller<double> >(oneOverFiller<double>, double*, int, int, int, bool); __global__ void setup_kernel ( curandState * state, int width, int pitch ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * pitch; /* Each thread gets same seed, a different sequence number, no offset */ curand_init (1234, offset, 0, & state [ offset ]) ; } template __global__ void generate_kernel ( curandState * state, float* result, int height, int width ); template __global__ void generate_kernel ( curandState * state, double* result, int height, int width ); template <typename T> __global__ void generate_kernel ( curandState * state, T* result, int height, int width ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * width; if(xIdx < width && yIdx < height) { /* Copy state to local memory for efficiency */ curandState localState = state [ offset ]; /* Generate pseudo - random unsigned ints */ result[offset]= static_cast<T>(curand (&localState) / RAND_MAX); /* Copy state back to global memory */ state [ offset ] = localState ; } } template __global__ void generate_uniform_kernel ( curandState * state, float* result, float epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( curandState * state, double* result, double epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( curandState * state, long* result, long epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( curandState * state, ulong* result, ulong epsilon, int height, int width, int ); template __global__ void generate_uniform_kernel ( curandState * state, uint* result, uint epsilon, int height, int width , int); template __global__ void generate_uniform_kernel ( curandState * state, int* result, int epsilon, int height, int width, int ); template <typename T> __global__ void generate_uniform_kernel ( curandState * state, T* result, T epsilon, int height, int width, int pitch ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * pitch; if(xIdx < width && yIdx < height) { /* Copy state to local memory for efficiency */ curandState localState = state [ offset ]; /* Generate pseudo - random uniforms */ result[offset] = (2 * curand_uniform (& localState ) - 1) * epsilon; /* Copy state back to global memory */ state [ offset ] = localState ; } } template __global__ void generate_uniform_kernel_mod ( curandState * state, float* result, float epsilon, int height, int width, int , int ); template __global__ void generate_uniform_kernel_mod ( curandState * state, double* result, double epsilon, int height, int width, int , int ); template __global__ void generate_uniform_kernel_mod ( curandState * state, long* result, long epsilon, int height, int width, int, int ); template __global__ void generate_uniform_kernel_mod ( curandState * state, ulong* result, ulong epsilon, int height, int width, int , int ); template __global__ void generate_uniform_kernel_mod ( curandState * state, uint* result, uint epsilon, int height, int width , int, int ); template __global__ void generate_uniform_kernel_mod ( curandState * state, int* result, int epsilon, int height, int width, int, int ); template <typename T> __global__ void generate_uniform_kernel_mod ( curandState * state, T* result, T epsilon, int height, int width, int pitch, int mod ) { int xIdx = threadIdx.x + blockIdx.x * blockDim.x; int yIdx = threadIdx.y + blockIdx.y * blockDim.y; int offset = xIdx + yIdx * pitch; if(xIdx < width && yIdx < height) { /* Copy state to local memory for efficiency */ curandState localState = state [ offset ]; /* Generate pseudo - random uniforms */ result[offset] = (T) ( (int)((2 * curand_uniform (& localState ) - 1) * epsilon) % mod); /* Copy state back to global memory */ state [ offset ] = localState ; } }
803b01a2dcb3b29cb263e9b6151e8eacd98d9a95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <functional> #include "caffe2/core/context_gpu.h" #include "csc_op.h" namespace caffe2 { namespace { template <typename T> __global__ void kernel_show(const T* Xdata, const int batch_size, const int channels, const int height, const int width, const int ndim, const int gpu_id, const int uuid) { printf("uuid=%d gpu=%d ndim=%d b = %d c = %d h = %d w = %d\n", uuid, gpu_id, ndim, batch_size, channels, height, width); for (int b = 0; b < batch_size; b++) { for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { int index_X = ((b * channels + c) * height + h) * width + w; printf("b = %d c = %d h = %d w = %d %.32f\n", b, c, h, w, Xdata[index_X]); } } } } } template <typename T> __global__ void kernel_show_c(const T* Xdata, const int batch_size, const int channels, const int height, const int width, const int ndim, const int gpu_id, const int uuid, const int c) { printf("uuid=%d gpu=%d ndim=%d b = %d c = %d h = %d w = %d\n", uuid, gpu_id, ndim, batch_size, channels, height, width); for (int b = 0; b < batch_size; b++) { // for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { int index_X = ((b * channels + c) * height + h) * width + w; printf("b = %d c = %d h = %d w = %d %.32f\n", b, c, h, w, Xdata[index_X]); } } //} } } template <typename T> __global__ void binary_kernel(const int nthreads, const T* const x, T* const y, const T threshold) { CUDA_1D_KERNEL_LOOP(index, nthreads) { if (x[index] >= threshold) { y[index] = 1; } else { y[index] = 0; } } } template <typename T> void integral_cpu(const T* src, T* sum, const int height, const int width) { T s = 0; for (int x = 0; x < width; x++) { s += src[x]; sum[x] = s; } src += width; sum += width; for (int y = 1; y < height; y++, src += width, sum += width) { s = 0; for (int x = 0; x < width; x++) { s += src[x]; sum[x] = sum[x - width] + s; } } } template <typename T> void binary_and_integral_cpu(const T* src, T* sum, const int height, const int width, const T threshold) { T s = 0; for (int x = 0; x < width; x++) { if (src[x] >= threshold) { s += 1; } else { s += 0; } sum[x] = s; } src += width; sum += width; for (int y = 1; y < height; y++, src += width, sum += width) { s = 0; for (int x = 0; x < width; x++) { if (src[x] >= threshold) { s += 1; } else { s += 0; } sum[x] = sum[x - width] + s; } } } template <typename T> T get_sum(const int N, const T* data) { T sum_val = 0; for (int i = 0; i < N; i++) { sum_val += *data; data += 1; } return sum_val; } template <typename T> T get_max(const int N, const T* data) { T max_val = -FLT_MAX; for (int i = 0; i < N; i++) { if (*data > max_val) { max_val = *data; } data += 1; } return max_val; } template <typename T> __global__ void CSCPool(const int nthreads, const T* cpg_data, const int height_im, const int width_im, const T* rois_data, const int num_class, const int cls_id, const T min_density, const T min_mass, const bool area_sqrt, const T context_scale, T* const top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int rois_index = index; rois_data += 5 * rois_index; int wstart = round(rois_data[1]); int hstart = round(rois_data[2]); int wend = round(rois_data[3]); int hend = round(rois_data[4]); // if (wstart < 0) wstart = 0; // if (wstart >= width_im) wstart = width_im - 1; // if (hstart < 0) hstart = 0; // if (hstart >= height_im) hstart = height_im - 1; // Check RoI if (wstart >= 0 && hstart >= 0 && wstart < wend && hstart < hend && wend < width_im && hend < height_im) { } else { top_data[rois_index * num_class + cls_id] = kMIN_SCORE; // forreturn continue; } // caculate the inner and outer RoI coordinate T width_roi = wend - wstart; T height_roi = hend - hstart; // T context_scale = 1.8; // T context_scale = sqrtf(2.0); T width_roi_inner = 1.0 * width_roi / context_scale; T height_roi_inner = 1.0 * height_roi / context_scale; T width_roi_outer = 1.0 * width_roi * context_scale; T height_roi_outer = 1.0 * height_roi * context_scale; T wcenter = 1.0 * (wend + wstart) / 2.0; T hcenter = 1.0 * (hend + hstart) / 2.0; int wstart_inner = round(wcenter - width_roi_inner / 2.0); int hstart_inner = round(hcenter - height_roi_inner / 2.0); int wend_inner = round(wcenter + width_roi_inner / 2.0); int hend_inner = round(hcenter + height_roi_inner / 2.0); int wstart_outer = round(max(wcenter - width_roi_outer / 2.0, 0.0)); int hstart_outer = round(max(hcenter - height_roi_outer / 2.0, 0.0)); int wend_outer = round(min(wcenter + width_roi_outer / 2.0, width_im - 1.0)); int hend_outer = round(min(hcenter + height_roi_outer / 2.0, height_im - 1.0)); width_roi = wend - wstart + 1; height_roi = hend - hstart + 1; width_roi_inner = wend_inner - wstart_inner + 1; height_roi_inner = hend_inner - hstart_inner + 1; width_roi_outer = wend_outer - wstart_outer + 1; height_roi_outer = hend_outer - hstart_outer + 1; // a1-a2-a3+a4 T a1, a2, a3, a4; // CPG sum of RoI a1 = cpg_data[hend * width_im + wend]; a2 = (wstart - 1 >= 0) ? cpg_data[hend * width_im + (wstart - 1)] : 0; a3 = (hstart - 1 >= 0) ? cpg_data[(hstart - 1) * width_im + wend] : 0; a4 = (hstart - 1 >= 0 && wstart - 1 >= 0) ? cpg_data[(hstart - 1) * width_im + (wstart - 1)] : 0; T sum_roi = a1 - a2 - a3 + a4; // CPG sum of inner RoI a1 = cpg_data[hend_inner * width_im + wend_inner]; a2 = (wstart_inner - 1 >= 0) ? cpg_data[hend_inner * width_im + (wstart_inner - 1)] : 0; a3 = (hstart_inner - 1 >= 0) ? cpg_data[(hstart_inner - 1) * width_im + wend_inner] : 0; a4 = (hstart_inner - 1 >= 0 && wstart_inner - 1 >= 0) ? cpg_data[(hstart_inner - 1) * width_im + (wstart_inner - 1)] : 0; T sum_inner = a1 - a2 - a3 + a4; // CPG sum of outer RoI a1 = cpg_data[hend_outer * width_im + wend_outer]; a2 = (wstart_outer - 1 >= 0) ? cpg_data[hend_outer * width_im + (wstart_outer - 1)] : 0; a3 = (hstart_outer - 1 >= 0) ? cpg_data[(hstart_outer - 1) * width_im + wend_outer] : 0; a4 = (hstart_outer - 1 >= 0 && wstart_outer - 1 >= 0) ? cpg_data[(hstart_outer - 1) * width_im + (wstart_outer - 1)] : 0; T sum_outer = a1 - a2 - a3 + a4; // area size T area_roi = height_roi * width_roi; T area_inner = height_roi_inner * width_roi_inner; T area_outer = height_roi_outer * width_roi_outer; T area_frame = max(area_roi - area_inner, T(1)); T area_context = max(area_outer - area_roi, T(1)); //----------------------------------------------------------------------- T score; T sum_frame = sum_roi - sum_inner; T sum_context = sum_outer - sum_roi; // current best if (area_sqrt) { score = sum_frame / sqrt(area_frame) - sum_context / sqrt(area_context); } else { score = sum_frame / area_frame - sum_context / area_context; } // bad at test debug // T score = (sum_roi - sum_inner) - (sum_outer - sum_roi); // (msra 0223): // T score = ((sum_roi - 2.0 * (sum_outer - sum_roi)) * //(2.0 * (sum_roi - sum_inner) - sum_inner)) / // area_roi; // if ((sum_roi - 2.0 * (sum_outer - sum_roi)) < 0 && //(2.0 * (sum_roi - sum_inner) - sum_inner) < 0) { // score = -1.0 * score; //} // (msra 0101): bad // T score = sqrt((sum_roi - sum_inner) / area_frame) - // sqrt((sum_outer - sum_roi) / area_context); // (msra 12.30): very bad // T score = // (sum_roi - sum_inner) / area_frame - (sum_outer - sum_roi) / // area_context; // (msra 12.29): bad // T score = ((sum_roi - sum_inner) - (sum_outer - sum_roi)) / // area_frame; // (msra 0105): bad than (msra 12.29) // T score = ((sum_roi - sum_inner) - (sum_outer - sum_roi)) / // sqrt(area_frame); //----------------------------------------------------------------------- // if (sum_roi < min_mass) score = kMIN_SCORE; top_data[rois_index * num_class + cls_id] = score; } } } // namespace template <> bool CSCOp<float, CUDAContext>::RunOnDevice() { const auto& M = Input(0); const auto& X = Input(1); const auto& Y = Input(2); const auto& R = Input(3); CAFFE_ENFORCE_EQ(M.dim(), 4); CAFFE_ENFORCE_EQ(X.dim(), 2); CAFFE_ENFORCE_EQ(Y.dim(), 2); CAFFE_ENFORCE_EQ(R.dim(), 2); CAFFE_ENFORCE_EQ(X.dim32(0), Y.dim32(0)); CAFFE_ENFORCE_EQ(X.dim32(0), M.dim32(0)); CAFFE_ENFORCE_EQ(X.dim32(1), Y.dim32(1)); CAFFE_ENFORCE_EQ(X.dim32(1), M.dim32(1)); CAFFE_ENFORCE_EQ(R.dim32(1), 5); const int batch_size = X.dim32(0); const int num_classes = X.dim32(1); const int num_rois = R.dim32(0); const int cpg_height = M.dim32(2); const int cpg_width = M.dim32(3); auto* W = Output(0); W->Resize(num_rois, num_classes); math::Set<float, CUDAContext>(W->numel(), 1.f, W->mutable_data<float>(), &context_); auto* PL = Output(1); PL->ResizeLike(X); PL->CopyFrom(X, false); context_.FinishDeviceComputation(); auto* NL = Output(2); NL->ResizeLike(X); math::Set<float, CUDAContext>(NL->numel(), 0.f, NL->mutable_data<float>(), &context_); if (cur_iter_ >= max_iter_) { return true; } const int gpu_id = context_.device_id(); int uuid; if (debug_info_) { srand(time(NULL)); uuid = rand(); } Tensor Xcpu = Tensor(X, caffe2::CPU); context_.FinishDeviceComputation(); const float* Xcpudata = Xcpu.data<float>(); Tensor Ycpu = Tensor(Y, caffe2::CPU); context_.FinishDeviceComputation(); const float* Ycpudata = Ycpu.data<float>(); for (int b = 0; b < batch_size; b++) { for (int c = 0; c < num_classes; c++) { int label_idx = b * num_classes + c; float label_value = Xcpudata[label_idx]; float pred_value = Ycpudata[label_idx]; if (debug_info_) { printf("uuid %d gpu %d b %d c %d: %.32f %.32f\n", uuid, gpu_id, b, c, label_value, pred_value); } if (label_value < 0.5) { continue; } // if (pred_value < tau_) { // continue; //} // Get CPG map Tensor m = Tensor(caffe2::CUDA); m.Resize(cpg_height, cpg_width); math::Abs<float, CUDAContext>( m.numel(), M.data<float>() + cpg_height * cpg_width * label_idx, m.mutable_data<float>(), &context_); // Get max value Tensor mcpu = Tensor(m, caffe2::CPU); context_.FinishDeviceComputation(); // float max_val = get_max<float>(mcpu.numel(), mcpu.data<float>()); float max_val = 1.; if (debug_info_) { printf("uuid %d gpu %d max_val %.32f\n", uuid, gpu_id, max_val); } float im_mass = 0; float im_density = 0; // im_mass = get_sum<float>(mcpu.numel(), mcpu.data<float>()); // im_density = 1.0 * im_mass / cpg_height / cpg_width; if (debug_info_) { printf("uuid %d gpu %d im_mass %.32f im_density %.32f\n", uuid, gpu_id, im_mass, im_density); } // Get Integral map Tensor icpu = Tensor(caffe2::CPU); icpu.ResizeLike(mcpu); binary_and_integral_cpu(mcpu.data<float>(), icpu.mutable_data<float>(), cpg_height, cpg_width, max_val * fg_threshold_); // CAFFE_ENFORCE_EQ(icpu.data<float>()[cpg_height * cpg_width - 1], // im_mass); if (debug_info_) { printf("uuid %d gpu %d im_mass in icpu %.32f im_mass %.32f\n", uuid, gpu_id, icpu.data<float>()[cpg_height * cpg_width - 1], im_mass); } m.CopyFrom(icpu, false); context_.FinishDeviceComputation(); // CSC Pooling hipLaunchKernelGGL(( CSCPool<float>), dim3(CAFFE_GET_BLOCKS(num_rois)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), num_rois, m.data<float>(), cpg_height, cpg_width, R.data<float>(), num_classes, c, im_density * density_threshold_, im_mass * mass_threshold_, area_sqrt_, context_scale_, W->mutable_data<float>()); Tensor Wcpu = Tensor(*W, caffe2::CPU); context_.FinishDeviceComputation(); // normalization max value to |1| float* Wcpudata = Wcpu.mutable_data<float>(); float max_value = 0; float min_value = 0; for (int r = 0; r < num_rois; r++) { float value = Wcpudata[r * num_classes + c]; if (value > max_value) { max_value = value; } if (value < min_value && value != kMIN_SCORE) { min_value = value; } } if (max_value > 0 && min_value < 0) { for (int r = 0; r < num_rois; r++) { float value = Wcpudata[r * num_classes + c]; if (value == kMIN_SCORE) { value = -1; } else { value = value > 0 ? value / max_value : value / (-min_value); } // value = value > 0 ? value / max_value : -1; Wcpudata[r * num_classes + c] = value; } } else if (max_value > 0 && min_value == 0) { for (int r = 0; r < num_rois; r++) { float value = Wcpudata[r * num_classes + c]; if (value == kMIN_SCORE) { value = -1; } else { value = value / max_value; } Wcpudata[r * num_classes + c] = value; } } else { for (int r = 0; r < num_rois; r++) { Wcpudata[r * num_classes + c] = 1.0; } } for (int r = 0; r < num_rois; r++) { Wcpudata[r * num_classes + c] = pred_value * Wcpudata[r * num_classes + c] + (1 - pred_value) * 1; } W->CopyFrom(Wcpu, &context_); context_.FinishDeviceComputation(); if (debug_info_) { hipLaunchKernelGGL(( kernel_show_c<float>) , dim3(CAFFE_GET_BLOCKS(1)), dim3(1), 0, context_.cuda_stream(), W->data<float>(), num_rois, num_classes, 1, 1, W->dim(), gpu_id, uuid, c); } } } cur_iter_++; return true; } REGISTER_CUDA_OPERATOR(CSC, CSCOp<float, CUDAContext>); } // namespace caffe2
803b01a2dcb3b29cb263e9b6151e8eacd98d9a95.cu
#include <cfloat> #include <functional> #include "caffe2/core/context_gpu.h" #include "csc_op.h" namespace caffe2 { namespace { template <typename T> __global__ void kernel_show(const T* Xdata, const int batch_size, const int channels, const int height, const int width, const int ndim, const int gpu_id, const int uuid) { printf("uuid=%d gpu=%d ndim=%d b = %d c = %d h = %d w = %d\n", uuid, gpu_id, ndim, batch_size, channels, height, width); for (int b = 0; b < batch_size; b++) { for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { int index_X = ((b * channels + c) * height + h) * width + w; printf("b = %d c = %d h = %d w = %d %.32f\n", b, c, h, w, Xdata[index_X]); } } } } } template <typename T> __global__ void kernel_show_c(const T* Xdata, const int batch_size, const int channels, const int height, const int width, const int ndim, const int gpu_id, const int uuid, const int c) { printf("uuid=%d gpu=%d ndim=%d b = %d c = %d h = %d w = %d\n", uuid, gpu_id, ndim, batch_size, channels, height, width); for (int b = 0; b < batch_size; b++) { // for (int c = 0; c < channels; c++) { for (int h = 0; h < height; h++) { for (int w = 0; w < width; w++) { int index_X = ((b * channels + c) * height + h) * width + w; printf("b = %d c = %d h = %d w = %d %.32f\n", b, c, h, w, Xdata[index_X]); } } //} } } template <typename T> __global__ void binary_kernel(const int nthreads, const T* const x, T* const y, const T threshold) { CUDA_1D_KERNEL_LOOP(index, nthreads) { if (x[index] >= threshold) { y[index] = 1; } else { y[index] = 0; } } } template <typename T> void integral_cpu(const T* src, T* sum, const int height, const int width) { T s = 0; for (int x = 0; x < width; x++) { s += src[x]; sum[x] = s; } src += width; sum += width; for (int y = 1; y < height; y++, src += width, sum += width) { s = 0; for (int x = 0; x < width; x++) { s += src[x]; sum[x] = sum[x - width] + s; } } } template <typename T> void binary_and_integral_cpu(const T* src, T* sum, const int height, const int width, const T threshold) { T s = 0; for (int x = 0; x < width; x++) { if (src[x] >= threshold) { s += 1; } else { s += 0; } sum[x] = s; } src += width; sum += width; for (int y = 1; y < height; y++, src += width, sum += width) { s = 0; for (int x = 0; x < width; x++) { if (src[x] >= threshold) { s += 1; } else { s += 0; } sum[x] = sum[x - width] + s; } } } template <typename T> T get_sum(const int N, const T* data) { T sum_val = 0; for (int i = 0; i < N; i++) { sum_val += *data; data += 1; } return sum_val; } template <typename T> T get_max(const int N, const T* data) { T max_val = -FLT_MAX; for (int i = 0; i < N; i++) { if (*data > max_val) { max_val = *data; } data += 1; } return max_val; } template <typename T> __global__ void CSCPool(const int nthreads, const T* cpg_data, const int height_im, const int width_im, const T* rois_data, const int num_class, const int cls_id, const T min_density, const T min_mass, const bool area_sqrt, const T context_scale, T* const top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int rois_index = index; rois_data += 5 * rois_index; int wstart = round(rois_data[1]); int hstart = round(rois_data[2]); int wend = round(rois_data[3]); int hend = round(rois_data[4]); // if (wstart < 0) wstart = 0; // if (wstart >= width_im) wstart = width_im - 1; // if (hstart < 0) hstart = 0; // if (hstart >= height_im) hstart = height_im - 1; // Check RoI if (wstart >= 0 && hstart >= 0 && wstart < wend && hstart < hend && wend < width_im && hend < height_im) { } else { top_data[rois_index * num_class + cls_id] = kMIN_SCORE; // 这里面是for循环,用return会中断后续的循环 continue; } // caculate the inner and outer RoI coordinate T width_roi = wend - wstart; T height_roi = hend - hstart; // T context_scale = 1.8; // T context_scale = sqrtf(2.0); T width_roi_inner = 1.0 * width_roi / context_scale; T height_roi_inner = 1.0 * height_roi / context_scale; T width_roi_outer = 1.0 * width_roi * context_scale; T height_roi_outer = 1.0 * height_roi * context_scale; T wcenter = 1.0 * (wend + wstart) / 2.0; T hcenter = 1.0 * (hend + hstart) / 2.0; int wstart_inner = round(wcenter - width_roi_inner / 2.0); int hstart_inner = round(hcenter - height_roi_inner / 2.0); int wend_inner = round(wcenter + width_roi_inner / 2.0); int hend_inner = round(hcenter + height_roi_inner / 2.0); int wstart_outer = round(max(wcenter - width_roi_outer / 2.0, 0.0)); int hstart_outer = round(max(hcenter - height_roi_outer / 2.0, 0.0)); int wend_outer = round(min(wcenter + width_roi_outer / 2.0, width_im - 1.0)); int hend_outer = round(min(hcenter + height_roi_outer / 2.0, height_im - 1.0)); width_roi = wend - wstart + 1; height_roi = hend - hstart + 1; width_roi_inner = wend_inner - wstart_inner + 1; height_roi_inner = hend_inner - hstart_inner + 1; width_roi_outer = wend_outer - wstart_outer + 1; height_roi_outer = hend_outer - hstart_outer + 1; // a1-a2-a3+a4 T a1, a2, a3, a4; // CPG sum of RoI a1 = cpg_data[hend * width_im + wend]; a2 = (wstart - 1 >= 0) ? cpg_data[hend * width_im + (wstart - 1)] : 0; a3 = (hstart - 1 >= 0) ? cpg_data[(hstart - 1) * width_im + wend] : 0; a4 = (hstart - 1 >= 0 && wstart - 1 >= 0) ? cpg_data[(hstart - 1) * width_im + (wstart - 1)] : 0; T sum_roi = a1 - a2 - a3 + a4; // CPG sum of inner RoI a1 = cpg_data[hend_inner * width_im + wend_inner]; a2 = (wstart_inner - 1 >= 0) ? cpg_data[hend_inner * width_im + (wstart_inner - 1)] : 0; a3 = (hstart_inner - 1 >= 0) ? cpg_data[(hstart_inner - 1) * width_im + wend_inner] : 0; a4 = (hstart_inner - 1 >= 0 && wstart_inner - 1 >= 0) ? cpg_data[(hstart_inner - 1) * width_im + (wstart_inner - 1)] : 0; T sum_inner = a1 - a2 - a3 + a4; // CPG sum of outer RoI a1 = cpg_data[hend_outer * width_im + wend_outer]; a2 = (wstart_outer - 1 >= 0) ? cpg_data[hend_outer * width_im + (wstart_outer - 1)] : 0; a3 = (hstart_outer - 1 >= 0) ? cpg_data[(hstart_outer - 1) * width_im + wend_outer] : 0; a4 = (hstart_outer - 1 >= 0 && wstart_outer - 1 >= 0) ? cpg_data[(hstart_outer - 1) * width_im + (wstart_outer - 1)] : 0; T sum_outer = a1 - a2 - a3 + a4; // area size T area_roi = height_roi * width_roi; T area_inner = height_roi_inner * width_roi_inner; T area_outer = height_roi_outer * width_roi_outer; T area_frame = max(area_roi - area_inner, T(1)); T area_context = max(area_outer - area_roi, T(1)); //----------------------------------------------------------------------- T score; T sum_frame = sum_roi - sum_inner; T sum_context = sum_outer - sum_roi; // current best if (area_sqrt) { score = sum_frame / sqrt(area_frame) - sum_context / sqrt(area_context); } else { score = sum_frame / area_frame - sum_context / area_context; } // bad at test debug // T score = (sum_roi - sum_inner) - (sum_outer - sum_roi); // (msra 0223): // T score = ((sum_roi - 2.0 * (sum_outer - sum_roi)) * //(2.0 * (sum_roi - sum_inner) - sum_inner)) / // area_roi; // if ((sum_roi - 2.0 * (sum_outer - sum_roi)) < 0 && //(2.0 * (sum_roi - sum_inner) - sum_inner) < 0) { // score = -1.0 * score; //} // (msra 0101): bad // T score = sqrt((sum_roi - sum_inner) / area_frame) - // sqrt((sum_outer - sum_roi) / area_context); // (msra 12.30): very bad // T score = // (sum_roi - sum_inner) / area_frame - (sum_outer - sum_roi) / // area_context; // (msra 12.29): bad // T score = ((sum_roi - sum_inner) - (sum_outer - sum_roi)) / // area_frame; // (msra 0105): bad than (msra 12.29) // T score = ((sum_roi - sum_inner) - (sum_outer - sum_roi)) / // sqrt(area_frame); //----------------------------------------------------------------------- // if (sum_roi < min_mass) score = kMIN_SCORE; top_data[rois_index * num_class + cls_id] = score; } } } // namespace template <> bool CSCOp<float, CUDAContext>::RunOnDevice() { const auto& M = Input(0); const auto& X = Input(1); const auto& Y = Input(2); const auto& R = Input(3); CAFFE_ENFORCE_EQ(M.dim(), 4); CAFFE_ENFORCE_EQ(X.dim(), 2); CAFFE_ENFORCE_EQ(Y.dim(), 2); CAFFE_ENFORCE_EQ(R.dim(), 2); CAFFE_ENFORCE_EQ(X.dim32(0), Y.dim32(0)); CAFFE_ENFORCE_EQ(X.dim32(0), M.dim32(0)); CAFFE_ENFORCE_EQ(X.dim32(1), Y.dim32(1)); CAFFE_ENFORCE_EQ(X.dim32(1), M.dim32(1)); CAFFE_ENFORCE_EQ(R.dim32(1), 5); const int batch_size = X.dim32(0); const int num_classes = X.dim32(1); const int num_rois = R.dim32(0); const int cpg_height = M.dim32(2); const int cpg_width = M.dim32(3); auto* W = Output(0); W->Resize(num_rois, num_classes); math::Set<float, CUDAContext>(W->numel(), 1.f, W->mutable_data<float>(), &context_); auto* PL = Output(1); PL->ResizeLike(X); PL->CopyFrom(X, false); context_.FinishDeviceComputation(); auto* NL = Output(2); NL->ResizeLike(X); math::Set<float, CUDAContext>(NL->numel(), 0.f, NL->mutable_data<float>(), &context_); if (cur_iter_ >= max_iter_) { return true; } const int gpu_id = context_.device_id(); int uuid; if (debug_info_) { srand(time(NULL)); uuid = rand(); } Tensor Xcpu = Tensor(X, caffe2::CPU); context_.FinishDeviceComputation(); const float* Xcpudata = Xcpu.data<float>(); Tensor Ycpu = Tensor(Y, caffe2::CPU); context_.FinishDeviceComputation(); const float* Ycpudata = Ycpu.data<float>(); for (int b = 0; b < batch_size; b++) { for (int c = 0; c < num_classes; c++) { int label_idx = b * num_classes + c; float label_value = Xcpudata[label_idx]; float pred_value = Ycpudata[label_idx]; if (debug_info_) { printf("uuid %d gpu %d b %d c %d: %.32f %.32f\n", uuid, gpu_id, b, c, label_value, pred_value); } if (label_value < 0.5) { continue; } // if (pred_value < tau_) { // continue; //} // Get CPG map Tensor m = Tensor(caffe2::CUDA); m.Resize(cpg_height, cpg_width); math::Abs<float, CUDAContext>( m.numel(), M.data<float>() + cpg_height * cpg_width * label_idx, m.mutable_data<float>(), &context_); // Get max value Tensor mcpu = Tensor(m, caffe2::CPU); context_.FinishDeviceComputation(); // float max_val = get_max<float>(mcpu.numel(), mcpu.data<float>()); float max_val = 1.; if (debug_info_) { printf("uuid %d gpu %d max_val %.32f\n", uuid, gpu_id, max_val); } float im_mass = 0; float im_density = 0; // im_mass = get_sum<float>(mcpu.numel(), mcpu.data<float>()); // im_density = 1.0 * im_mass / cpg_height / cpg_width; if (debug_info_) { printf("uuid %d gpu %d im_mass %.32f im_density %.32f\n", uuid, gpu_id, im_mass, im_density); } // Get Integral map Tensor icpu = Tensor(caffe2::CPU); icpu.ResizeLike(mcpu); binary_and_integral_cpu(mcpu.data<float>(), icpu.mutable_data<float>(), cpg_height, cpg_width, max_val * fg_threshold_); // CAFFE_ENFORCE_EQ(icpu.data<float>()[cpg_height * cpg_width - 1], // im_mass); if (debug_info_) { printf("uuid %d gpu %d im_mass in icpu %.32f im_mass %.32f\n", uuid, gpu_id, icpu.data<float>()[cpg_height * cpg_width - 1], im_mass); } m.CopyFrom(icpu, false); context_.FinishDeviceComputation(); // CSC Pooling CSCPool<float><<<CAFFE_GET_BLOCKS(num_rois), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( num_rois, m.data<float>(), cpg_height, cpg_width, R.data<float>(), num_classes, c, im_density * density_threshold_, im_mass * mass_threshold_, area_sqrt_, context_scale_, W->mutable_data<float>()); Tensor Wcpu = Tensor(*W, caffe2::CPU); context_.FinishDeviceComputation(); // normalization max value to |1| float* Wcpudata = Wcpu.mutable_data<float>(); float max_value = 0; float min_value = 0; for (int r = 0; r < num_rois; r++) { float value = Wcpudata[r * num_classes + c]; if (value > max_value) { max_value = value; } if (value < min_value && value != kMIN_SCORE) { min_value = value; } } if (max_value > 0 && min_value < 0) { for (int r = 0; r < num_rois; r++) { float value = Wcpudata[r * num_classes + c]; if (value == kMIN_SCORE) { value = -1; } else { value = value > 0 ? value / max_value : value / (-min_value); } // value = value > 0 ? value / max_value : -1; Wcpudata[r * num_classes + c] = value; } } else if (max_value > 0 && min_value == 0) { for (int r = 0; r < num_rois; r++) { float value = Wcpudata[r * num_classes + c]; if (value == kMIN_SCORE) { value = -1; } else { value = value / max_value; } Wcpudata[r * num_classes + c] = value; } } else { for (int r = 0; r < num_rois; r++) { Wcpudata[r * num_classes + c] = 1.0; } } for (int r = 0; r < num_rois; r++) { Wcpudata[r * num_classes + c] = pred_value * Wcpudata[r * num_classes + c] + (1 - pred_value) * 1; } W->CopyFrom(Wcpu, &context_); context_.FinishDeviceComputation(); if (debug_info_) { kernel_show_c<float> <<<CAFFE_GET_BLOCKS(1), 1, 0, context_.cuda_stream()>>>( W->data<float>(), num_rois, num_classes, 1, 1, W->dim(), gpu_id, uuid, c); } } } cur_iter_++; return true; } REGISTER_CUDA_OPERATOR(CSC, CSCOp<float, CUDAContext>); } // namespace caffe2
9eab532949a26534c8c1a25cf8030add66112f27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Name: Jin Pyo Jeon // CPSC375 // Times: // B T Time // 100 16 13.84 // 100 32 7.43 // 100 40 6.27 // 100 64 3.86 // 100 128 3.1 // 100 256 2.92 // 100 1024 2.83 // 1000 32 5.99 // 1000 256 2.76 // 1000 512 2.73 // 1000 1024 2.83 // 1024 1024 2.75 // 10000 32 5.78 // 10000 128 2.74 // 10000 200 2.94 // 10000 256 2.56 // 10000 512 2.73 // 10000 1024 2.88 // 32768 126 2.66 // 32768 256 2.65 // 32768 512 2.68 // 65535 32 5.59 // 65535 128 2.64 // 65535 256 2.63 // 65535 400 2.92 // 65535 512 2.69 // 65535 768 3.0 // 65535 1024 3.8 // Discussion: From these experimental value, it seems that the optimal value for block size and // thread size is 10000 blocks and 256 threads per block. Beyond the most optimal, one thing that // is evident is the fact that the optimal number of threads must be divisible by the warp size; // in every instance where the thread number is not divisible by 32, the time suffered compared // to the times adjacent to it. Furthermore, it seems that the size of the number range that // each thread is assigned to does not correlate linearly for the most part . // For example, the B/T pair (65535, 512) and (10000, 128) have similar times // despite the threads of first pair checking only 3 numbers and the latter around 78. // Furthermore, runs with small thread sizes suffered much more significant delay than others, // probably due to the fact that with small thread sizes ( t < 128 ), 8 blocks (per SM) // did not fill out the maximum number of threads possible (2048) and thus failed to fully // use the GPU. #include <stdio.h> #include <stdlib.h> #include <math.h> #define N 100000000 __global__ void testCollatz(long n, long blockNum, long* counterEx) { long numPerBlock = ceil(n * 1.0 / blockNum); long numPerThread = ceil(n * 1.0 / blockNum / blockDim.x); long lowRange = (numPerBlock * blockIdx.x) + (threadIdx.x * numPerThread); long highRange = (numPerBlock * blockIdx.x) + ((threadIdx.x + 1) * numPerThread); long i; for (i = lowRange; i < highRange && i < N; i++) { long temp = i; int iteration = 0; if (temp == 0) continue; while (temp != 1) { iteration++; if (iteration >= 1000) { *counterEx = i; break; } if (temp % 2 == 0) temp = temp / 2; else temp = (3 * temp) + 1; } } } int main(int argc, char**argv){ long B, T; long* h_counterEx, *d_counterEx; if (argc >= 2) { B = strtol(argv[1], NULL, 10); T = strtol(argv[2], NULL, 10); } else { return -1; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); h_counterEx = (long*)malloc(sizeof(long)); *h_counterEx = -1; hipMalloc((void**) &d_counterEx, sizeof(long)); hipMemcpy(d_counterEx, h_counterEx, sizeof(long), hipMemcpyHostToDevice); hipLaunchKernelGGL(( testCollatz), dim3(B),dim3(T), 0, 0, N, B, d_counterEx); hipMemcpy(h_counterEx, d_counterEx, sizeof(long), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime = -1; hipEventElapsedTime(&elapsedTime,start, stop); if (*h_counterEx == -1) { printf("Verifying %ld took %f s\n", (long) N, elapsedTime / 1000.0); } else { printf("Found a counterexample: %ld\n", *h_counterEx); } hipEventDestroy(start); hipEventDestroy(stop); }
9eab532949a26534c8c1a25cf8030add66112f27.cu
// Name: Jin Pyo Jeon // CPSC375 // Times: // B T Time // 100 16 13.84 // 100 32 7.43 // 100 40 6.27 // 100 64 3.86 // 100 128 3.1 // 100 256 2.92 // 100 1024 2.83 // 1000 32 5.99 // 1000 256 2.76 // 1000 512 2.73 // 1000 1024 2.83 // 1024 1024 2.75 // 10000 32 5.78 // 10000 128 2.74 // 10000 200 2.94 // 10000 256 2.56 // 10000 512 2.73 // 10000 1024 2.88 // 32768 126 2.66 // 32768 256 2.65 // 32768 512 2.68 // 65535 32 5.59 // 65535 128 2.64 // 65535 256 2.63 // 65535 400 2.92 // 65535 512 2.69 // 65535 768 3.0 // 65535 1024 3.8 // Discussion: From these experimental value, it seems that the optimal value for block size and // thread size is 10000 blocks and 256 threads per block. Beyond the most optimal, one thing that // is evident is the fact that the optimal number of threads must be divisible by the warp size; // in every instance where the thread number is not divisible by 32, the time suffered compared // to the times adjacent to it. Furthermore, it seems that the size of the number range that // each thread is assigned to does not correlate linearly for the most part . // For example, the B/T pair (65535, 512) and (10000, 128) have similar times // despite the threads of first pair checking only 3 numbers and the latter around 78. // Furthermore, runs with small thread sizes suffered much more significant delay than others, // probably due to the fact that with small thread sizes ( t < 128 ), 8 blocks (per SM) // did not fill out the maximum number of threads possible (2048) and thus failed to fully // use the GPU. #include <stdio.h> #include <stdlib.h> #include <math.h> #define N 100000000 __global__ void testCollatz(long n, long blockNum, long* counterEx) { long numPerBlock = ceil(n * 1.0 / blockNum); long numPerThread = ceil(n * 1.0 / blockNum / blockDim.x); long lowRange = (numPerBlock * blockIdx.x) + (threadIdx.x * numPerThread); long highRange = (numPerBlock * blockIdx.x) + ((threadIdx.x + 1) * numPerThread); long i; for (i = lowRange; i < highRange && i < N; i++) { long temp = i; int iteration = 0; if (temp == 0) continue; while (temp != 1) { iteration++; if (iteration >= 1000) { *counterEx = i; break; } if (temp % 2 == 0) temp = temp / 2; else temp = (3 * temp) + 1; } } } int main(int argc, char**argv){ long B, T; long* h_counterEx, *d_counterEx; if (argc >= 2) { B = strtol(argv[1], NULL, 10); T = strtol(argv[2], NULL, 10); } else { return -1; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); h_counterEx = (long*)malloc(sizeof(long)); *h_counterEx = -1; cudaMalloc((void**) &d_counterEx, sizeof(long)); cudaMemcpy(d_counterEx, h_counterEx, sizeof(long), cudaMemcpyHostToDevice); testCollatz<<<B,T>>>(N, B, d_counterEx); cudaMemcpy(h_counterEx, d_counterEx, sizeof(long), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime = -1; cudaEventElapsedTime(&elapsedTime,start, stop); if (*h_counterEx == -1) { printf("Verifying %ld took %f s\n", (long) N, elapsedTime / 1000.0); } else { printf("Found a counterexample: %ld\n", *h_counterEx); } cudaEventDestroy(start); cudaEventDestroy(stop); }
a651a5c4b2f16324bbcecfb82cd23dfa94ffae45.hip
// !!! This is a file automatically generated by hipify!!! #define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cuda/cudaflow.hpp> #include <taskflow/cuda/algorithm/find.hpp> // ---------------------------------------------------------------------------- // cuda_find_if // ---------------------------------------------------------------------------- template <typename T> void cuda_find_if() { tf::Taskflow taskflow; tf::Executor executor; for(int n=0; n<=1234567; n = (n<=100) ? n+1 : n*2 + 1) { taskflow.emplace([n](){ tf::cudaStream stream; tf::cudaDefaultExecutionPolicy policy(stream); // gpu data auto gdata = tf::cuda_malloc_shared<T>(n); auto gfind = tf::cuda_malloc_shared<unsigned>(1); // cpu data auto hdata = std::vector<T>(n); // initialize the data for(int i=0; i<n; i++) { T k = rand()% 100; gdata[i] = k; hdata[i] = k; } // -------------------------------------------------------------------------- // GPU find // -------------------------------------------------------------------------- tf::cudaStream s; tf::cudaDefaultExecutionPolicy p(s); tf::cuda_find_if( p, gdata, gdata+n, gfind, []__device__(T v) { return v == (T)50; } ); s.synchronize(); // -------------------------------------------------------------------------- // CPU find // -------------------------------------------------------------------------- auto hiter = std::find_if( hdata.begin(), hdata.end(), [=](T v) { return v == (T)50; } ); // -------------------------------------------------------------------------- // verify the result // -------------------------------------------------------------------------- unsigned hfind = std::distance(hdata.begin(), hiter); REQUIRE(*gfind == hfind); REQUIRE(hipFree(gdata) == hipSuccess); REQUIRE(hipFree(gfind) == hipSuccess); }); } executor.run(taskflow).wait(); } TEST_CASE("cuda_find_if.int" * doctest::timeout(300)) { cuda_find_if<int>(); } TEST_CASE("cuda_find_if.float" * doctest::timeout(300)) { cuda_find_if<float>(); }
a651a5c4b2f16324bbcecfb82cd23dfa94ffae45.cu
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN #include <doctest.h> #include <taskflow/taskflow.hpp> #include <taskflow/cuda/cudaflow.hpp> #include <taskflow/cuda/algorithm/find.hpp> // ---------------------------------------------------------------------------- // cuda_find_if // ---------------------------------------------------------------------------- template <typename T> void cuda_find_if() { tf::Taskflow taskflow; tf::Executor executor; for(int n=0; n<=1234567; n = (n<=100) ? n+1 : n*2 + 1) { taskflow.emplace([n](){ tf::cudaStream stream; tf::cudaDefaultExecutionPolicy policy(stream); // gpu data auto gdata = tf::cuda_malloc_shared<T>(n); auto gfind = tf::cuda_malloc_shared<unsigned>(1); // cpu data auto hdata = std::vector<T>(n); // initialize the data for(int i=0; i<n; i++) { T k = rand()% 100; gdata[i] = k; hdata[i] = k; } // -------------------------------------------------------------------------- // GPU find // -------------------------------------------------------------------------- tf::cudaStream s; tf::cudaDefaultExecutionPolicy p(s); tf::cuda_find_if( p, gdata, gdata+n, gfind, []__device__(T v) { return v == (T)50; } ); s.synchronize(); // -------------------------------------------------------------------------- // CPU find // -------------------------------------------------------------------------- auto hiter = std::find_if( hdata.begin(), hdata.end(), [=](T v) { return v == (T)50; } ); // -------------------------------------------------------------------------- // verify the result // -------------------------------------------------------------------------- unsigned hfind = std::distance(hdata.begin(), hiter); REQUIRE(*gfind == hfind); REQUIRE(cudaFree(gdata) == cudaSuccess); REQUIRE(cudaFree(gfind) == cudaSuccess); }); } executor.run(taskflow).wait(); } TEST_CASE("cuda_find_if.int" * doctest::timeout(300)) { cuda_find_if<int>(); } TEST_CASE("cuda_find_if.float" * doctest::timeout(300)) { cuda_find_if<float>(); }
8d74cac923d37438f6581fccd1189558906e39e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> // inform that the function should run on device instead of the host __global__ void add_kernel(int a, int b, int *c) { *c = a + b; } int main(int argc, char *argv[]) { // get device info int count; hipGetDeviceCount(&count); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); std::cout << "Device Count: " << count << "\n"; std::cout << "Name: " << prop.name << "\n"; std::cout << "\t mem: " << prop.maxThreadsPerBlock << "\n"; int c; int *dev_c; // allocate memory on the device // returned pointer should not be dereferenced hipMalloc((void**)&dev_c, sizeof(int)); // used to send device code to device compiler // angle brackets denote arguments we plan to pass for the device hipLaunchKernelGGL(( add_kernel), dim3(1), dim3(1) , 0, 0, 2, 7, dev_c); hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost); std::cout << "SUM: " << c << "\n"; hipFree(dev_c); std::cout << "HELLO WORLD!" << "\n"; return 0; }
8d74cac923d37438f6581fccd1189558906e39e5.cu
#include <iostream> // inform that the function should run on device instead of the host __global__ void add_kernel(int a, int b, int *c) { *c = a + b; } int main(int argc, char *argv[]) { // get device info int count; cudaGetDeviceCount(&count); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::cout << "Device Count: " << count << "\n"; std::cout << "Name: " << prop.name << "\n"; std::cout << "\t mem: " << prop.maxThreadsPerBlock << "\n"; int c; int *dev_c; // allocate memory on the device // returned pointer should not be dereferenced cudaMalloc((void**)&dev_c, sizeof(int)); // used to send device code to device compiler // angle brackets denote arguments we plan to pass for the device add_kernel<<< 1, 1 >>> (2, 7, dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); std::cout << "SUM: " << c << "\n"; cudaFree(dev_c); std::cout << "HELLO WORLD!" << "\n"; return 0; }
19b459059971bb0fa1953947e50a2580f7d95f27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef RELU_LAYER_CUH_ #define RELU_LAYER_CUH_ #include <assert.h> #include "basics/layer.hpp" #include "basics/tensor.cu" #include "basics/session.hpp" #define BLOCKDIM 32 namespace ReluGPUKernels { template<class Dtype> __global__ void ForwardGPU3(Tensor<Dtype> * bottom, Tensor<Dtype> * top) { int idx = blockDim.x * blockIdx.x + threadIdx.x; Dtype* b_data = bottom->GetDataPtr(); Dtype* t_data = top->GetDataPtr(); if(idx < 0 || idx >= bottom->size()) return; t_data[idx] = (b_data[idx] >= 0 ? b_data[idx] : 0); } template <class Dtype> __global__ void ForwardGPU(Tensor<Dtype> * bottom, Tensor<Dtype> * top) { size_t size = bottom->size(); hipLaunchKernelGGL(( ReluGPUKernels::ForwardGPU3<Dtype>), dim3(size/(BLOCKDIM*BLOCKDIM) + 1), dim3(BLOCKDIM*BLOCKDIM), 0, 0, bottom, top); } template <class Dtype> __global__ void BackwardGPU(Tensor<Dtype>* top, Tensor<Dtype>* top_diff, Tensor<Dtype>* bottom, Tensor<Dtype>* bottom_diff) { int batch_idx = threadIdx.x; for (int h = 0; h < top->GetDims()[1]; ++h) { for (int w = 0; w < top->GetDims()[2]; ++w) { for (int c = 0; c < top->GetDims()[3]; ++c) { if (bottom->at(batch_idx,h,w,c) <= 0) { bottom_diff->at(batch_idx,h,w,c) = 0; } else { bottom_diff->at(batch_idx,h,w,c) = top_diff->at(batch_idx,h,w,c); } } } } } } template <class Dtype> class Relu: public Layer<Dtype> { public: void GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims); void Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops); void Backward(const std::vector<Tensor<Dtype>*> &tops, const std::vector<Tensor<Dtype>*> &tops_diff, const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &bottoms_diff); }; template<class Dtype> void Relu<Dtype>::GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims) { assert(bottoms_dims.size()); assert(tops_dims.size()); size_t * b_dims = bottoms_dims[0]; size_t * t_dims = tops_dims[0]; t_dims[0] = b_dims[0]; t_dims[1] = b_dims[1]; t_dims[2] = b_dims[2]; t_dims[3] = b_dims[3]; } template<class Dtype> void Relu<Dtype>::Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops) { assert(bottoms.size()==1); assert(tops.size()==1); Tensor<Dtype> * bottom = bottoms[0]; Tensor<Dtype> * top = tops[0]; if (Session::GetSession()->gpu) { hipLaunchKernelGGL(( ReluGPUKernels::ForwardGPU), dim3(1),dim3(1), 0, 0, bottom, top); } else { for(int b = 0; b < bottom->GetDims()[0]; b++) { for(int o = 0; o < bottom->GetDims()[3]; o++) { for(int x = 0; x < bottom->GetDims()[2]; x += 1) { for(int y = 0; y < bottom->GetDims()[1]; y += 1) { Dtype val = bottom->at(b, y, x, o); top->at(b, y, x, o) = (val >= 0 ? val : 0); } } } } } } template <class Dtype> void Relu<Dtype>::Backward(const std::vector<Tensor<Dtype>*> &tops, const std::vector<Tensor<Dtype>*> &tops_diff, const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &bottoms_diff) { assert(tops.size() == 1); assert(tops_diff.size() == 1); assert(bottoms.size() == 1); assert(bottoms_diff.size() == 1); Tensor<Dtype>* top = tops[0]; Tensor<Dtype>* top_diff = tops_diff[0]; Tensor<Dtype>* bottom = bottoms[0]; Tensor<Dtype>* bottom_diff = bottoms_diff[0]; Session* S = Session::GetSession(); int batch_size = S->batch_size; if (S->gpu) { hipLaunchKernelGGL(( ReluGPUKernels::BackwardGPU<Dtype>), dim3(1),dim3(batch_size), 0, 0, top, top_diff, bottom, bottom_diff); } else { for (int b = 0; b < top->GetDims()[0]; ++b) { for (int h = 0; h < top->GetDims()[1]; ++h) { for (int w = 0; w < top->GetDims()[2]; ++w) { for (int c = 0; c < top->GetDims()[3]; ++c) { if (bottom->at(b,h,w,c) <= 0) { bottom_diff->at(b,h,w,c) = 0; } else { bottom_diff->at(b,h,w,c) = top_diff->at(b,h,w,c); } } } } } } } #endif
19b459059971bb0fa1953947e50a2580f7d95f27.cu
#ifndef RELU_LAYER_CUH_ #define RELU_LAYER_CUH_ #include <assert.h> #include "basics/layer.hpp" #include "basics/tensor.cu" #include "basics/session.hpp" #define BLOCKDIM 32 namespace ReluGPUKernels { template<class Dtype> __global__ void ForwardGPU3(Tensor<Dtype> * bottom, Tensor<Dtype> * top) { int idx = blockDim.x * blockIdx.x + threadIdx.x; Dtype* b_data = bottom->GetDataPtr(); Dtype* t_data = top->GetDataPtr(); if(idx < 0 || idx >= bottom->size()) return; t_data[idx] = (b_data[idx] >= 0 ? b_data[idx] : 0); } template <class Dtype> __global__ void ForwardGPU(Tensor<Dtype> * bottom, Tensor<Dtype> * top) { size_t size = bottom->size(); ReluGPUKernels::ForwardGPU3<Dtype><<<size/(BLOCKDIM*BLOCKDIM) + 1, BLOCKDIM*BLOCKDIM>>>(bottom, top); } template <class Dtype> __global__ void BackwardGPU(Tensor<Dtype>* top, Tensor<Dtype>* top_diff, Tensor<Dtype>* bottom, Tensor<Dtype>* bottom_diff) { int batch_idx = threadIdx.x; for (int h = 0; h < top->GetDims()[1]; ++h) { for (int w = 0; w < top->GetDims()[2]; ++w) { for (int c = 0; c < top->GetDims()[3]; ++c) { if (bottom->at(batch_idx,h,w,c) <= 0) { bottom_diff->at(batch_idx,h,w,c) = 0; } else { bottom_diff->at(batch_idx,h,w,c) = top_diff->at(batch_idx,h,w,c); } } } } } } template <class Dtype> class Relu: public Layer<Dtype> { public: void GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims); void Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops); void Backward(const std::vector<Tensor<Dtype>*> &tops, const std::vector<Tensor<Dtype>*> &tops_diff, const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &bottoms_diff); }; template<class Dtype> void Relu<Dtype>::GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims) { assert(bottoms_dims.size()); assert(tops_dims.size()); size_t * b_dims = bottoms_dims[0]; size_t * t_dims = tops_dims[0]; t_dims[0] = b_dims[0]; t_dims[1] = b_dims[1]; t_dims[2] = b_dims[2]; t_dims[3] = b_dims[3]; } template<class Dtype> void Relu<Dtype>::Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops) { assert(bottoms.size()==1); assert(tops.size()==1); Tensor<Dtype> * bottom = bottoms[0]; Tensor<Dtype> * top = tops[0]; if (Session::GetSession()->gpu) { ReluGPUKernels::ForwardGPU<<<1,1>>>(bottom, top); } else { for(int b = 0; b < bottom->GetDims()[0]; b++) { for(int o = 0; o < bottom->GetDims()[3]; o++) { for(int x = 0; x < bottom->GetDims()[2]; x += 1) { for(int y = 0; y < bottom->GetDims()[1]; y += 1) { Dtype val = bottom->at(b, y, x, o); top->at(b, y, x, o) = (val >= 0 ? val : 0); } } } } } } template <class Dtype> void Relu<Dtype>::Backward(const std::vector<Tensor<Dtype>*> &tops, const std::vector<Tensor<Dtype>*> &tops_diff, const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &bottoms_diff) { assert(tops.size() == 1); assert(tops_diff.size() == 1); assert(bottoms.size() == 1); assert(bottoms_diff.size() == 1); Tensor<Dtype>* top = tops[0]; Tensor<Dtype>* top_diff = tops_diff[0]; Tensor<Dtype>* bottom = bottoms[0]; Tensor<Dtype>* bottom_diff = bottoms_diff[0]; Session* S = Session::GetSession(); int batch_size = S->batch_size; if (S->gpu) { ReluGPUKernels::BackwardGPU<Dtype><<<1,batch_size>>>(top, top_diff, bottom, bottom_diff); } else { for (int b = 0; b < top->GetDims()[0]; ++b) { for (int h = 0; h < top->GetDims()[1]; ++h) { for (int w = 0; w < top->GetDims()[2]; ++w) { for (int c = 0; c < top->GetDims()[3]; ++c) { if (bottom->at(b,h,w,c) <= 0) { bottom_diff->at(b,h,w,c) = 0; } else { bottom_diff->at(b,h,w,c) = top_diff->at(b,h,w,c); } } } } } } } #endif
3da0f9f9dfe5875e6d66355a88e7afe85cdb49f5.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2017-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the license found in the * LICENSE file in the root directory of this source tree. */ #include "edit_dist.h" #include <THH/THH.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <utility> // std::pair template <typename scalar_t> __global__ void generate_deletion_label_kernel( const scalar_t* __restrict__ source, const size_t source_size, const size_t operation_size, int* __restrict__ operations, int* __restrict__ labels) { const int index = blockIdx.x; const int offset = index * operation_size; const int offset_label = index * source_size; for (int i = 0; i < source_size; i++) { labels[offset_label + i] = 0; } int k = 0; for (int i = 0; i < operation_size; i++){ if (operations[offset + i] == 0){ break; } else if (operations[offset + i] == 1){ continue; } else { labels[offset_label + k] = 3 - operations[offset + i]; k++; } } } template <typename scalar_t> __global__ void generate_insertion_label_kernel( const scalar_t* __restrict__ target, const size_t target_size, const size_t operation_size, int* __restrict__ operations, int* __restrict__ labels, int* __restrict__ masks) { const int index = blockIdx.x; const int offset = index * operation_size; const int offset_label = index * target_size; int k = 0; int u = 0; int m = 0; for (int i = 0; i < target_size; i++) { labels[offset_label + i] = 0; masks[offset_label + i] = 0; } for (int i = 0; i < operation_size-1; i++){ if (operations[offset + i] == 0){ break; } else if (operations[offset + i] == 2){ continue; } else if (operations[offset + i] == 1){ masks[offset_label + m] = 1; u++; m++; } else { labels[offset_label + k] = u; masks[offset_label + m] = 0; k++; m++; u = 0; } } } template <typename scalar_t> __global__ void levenshtein_distance_kernel( const scalar_t* __restrict__ source, const scalar_t* __restrict__ target, const int* __restrict__ source_length, const int* __restrict__ target_length, const size_t source_size, const size_t target_size, int* __restrict__ operations, int* __restrict__ errors_curr) { const int index = blockIdx.x; const int offset = index * (source_size + target_size); const int d = index * (source_size + 1) * (target_size + 1); const int t = target_size + 1; auto err_idx = [d, t](int i, int j) { return d + i * t + j; }; auto opt_idx = [offset](int k) { return offset + k; }; const int hyp_len = source_length[index]; const int ref_len = target_length[index]; const scalar_t* hyp_begin = source + index * source_size; const scalar_t* ref_begin = target + index * target_size; // dynamic programming for (int i = 0; i <= hyp_len; i++){ errors_curr[err_idx(i, 0)] = i; } for (int j = 0; j <= ref_len; j++){ errors_curr[err_idx(0, j)] = j; } for (int i = 1; i <= hyp_len; i++){ for (int j = 1; j <= ref_len; j++){ errors_curr[err_idx(i, j)] = min( min( errors_curr[err_idx(i-1, j)], errors_curr[err_idx(i, j-1)] ) + 1, errors_curr[err_idx(i-1, j-1)] + 2 * ( *(hyp_begin+i-1) == *(ref_begin+j-1) ? 0 : 1 ) ); } } // back-tracing int i = hyp_len; int j = ref_len; int o = hyp_len + ref_len; for (int k = 0; k < source_size + target_size; k++) { operations[opt_idx(k)] = 0; } while ((i >= 0) && (j >= 0)) { if ((i == 0) && (j == 0)) { break; } if ((j > 0) && (errors_curr[err_idx(i, j-1)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 1; j--; // insertion } else if ((i > 0) && (errors_curr[err_idx(i-1, j)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 2; i--; // deletion } else { o--; operations[opt_idx(o)] = 3; i--; j--; // do nothing } } // moving to the left for (int k = 0; k < hyp_len + ref_len; k++) { if (k + o < hyp_len + ref_len){ operations[opt_idx(k)] = operations[opt_idx(k+o)]; } else{ operations[opt_idx(k)] = 0; // padding } } } template <typename scalar_t> __global__ void faster_levenshtein_distance_kernel( const scalar_t* __restrict__ source, const scalar_t* __restrict__ target, const int* __restrict__ source_length, const int* __restrict__ target_length, const size_t source_size, const size_t target_size, int* __restrict__ operations) { extern __shared__ short errors[]; auto errors_curr = errors; const int index = blockIdx.x; const int offset = index * (source_size + target_size); const int t = target_size + 1; auto err_idx = [t](int i, int j) { return i * t + j; }; auto opt_idx = [offset](int k) { return offset + k; }; const int hyp_len = source_length[index]; const int ref_len = target_length[index]; const scalar_t* hyp_begin = source + index * source_size; const scalar_t* ref_begin = target + index * target_size; // dynamic programming for (int i = 0; i <= hyp_len; i++){ errors_curr[err_idx(i, 0)] = i; } for (int j = 0; j <= ref_len; j++){ errors_curr[err_idx(0, j)] = j; } for (int i = 1; i <= hyp_len; i++){ for (int j = 1; j <= ref_len; j++){ errors_curr[err_idx(i, j)] = min( min( errors_curr[err_idx(i-1, j)], errors_curr[err_idx(i, j-1)] ) + 1, errors_curr[err_idx(i-1, j-1)] + 2 * ( *(hyp_begin+i-1) == *(ref_begin+j-1) ? 0 : 1 ) ); } } // back-tracing int i = hyp_len; int j = ref_len; int o = hyp_len + ref_len; for (int k = 0; k < source_size + target_size; k++) { operations[opt_idx(k)] = 0; } while ((i >= 0) && (j >= 0)) { if ((i == 0) && (j == 0)) { break; } if ((j > 0) && (errors_curr[err_idx(i, j-1)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 1; j--; // insertion } else if ((i > 0) && (errors_curr[err_idx(i-1, j)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 2; i--; // deletion } else { o--; operations[opt_idx(o)] = 3; i--; j--; // do nothing } } // moving to the left for (int k = 0; k < hyp_len + ref_len; k++) { if (k + o < hyp_len + ref_len){ operations[opt_idx(k)] = operations[opt_idx(k+o)]; } else{ operations[opt_idx(k)] = 0; // padding } } } torch::Tensor GenerateDeletionLabelCuda( torch::Tensor source, torch::Tensor operations) { const auto batch_size = source.size(0); at::TensorOptions options(source.device()); options = options.dtype(at::ScalarType::Int); auto labels = torch::empty({batch_size, source.size(1)}, options); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(source.device().index()); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "generate_deletion_labels", ([&] { hipLaunchKernelGGL(( generate_deletion_label_kernel<scalar_t>), dim3(batch_size), dim3(1), 0, stream, source.data<scalar_t>(), source.size(1), operations.size(1), operations.data<int>(), labels.data<int>()); })); return labels; } std::pair<torch::Tensor, torch::Tensor> GenerateInsertionLabelCuda( torch::Tensor target, torch::Tensor operations) { const auto batch_size = target.size(0); at::TensorOptions options(target.device()); options = options.dtype(at::ScalarType::Int); auto labels = torch::empty({batch_size, target.size(1)}, options); auto masks = torch::empty({batch_size, target.size(1)}, options); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(target.device().index()); AT_DISPATCH_ALL_TYPES(target.scalar_type(), "generate_insertion_labels", ([&] { hipLaunchKernelGGL(( generate_insertion_label_kernel<scalar_t>), dim3(batch_size), dim3(1), 0, stream, target.data<scalar_t>(), target.size(1), operations.size(1), operations.data<int>(), labels.data<int>(), masks.data<int>()); })); return std::make_pair(labels, masks); } torch::Tensor LevenshteinDistanceCuda( torch::Tensor source, torch::Tensor target, torch::Tensor source_length, torch::Tensor target_length) { const auto batch_size = source.size(0); const auto shared_size = (source.size(1) + 1) * (target.size(1) + 1) * sizeof(short); at::TensorOptions options(source.device()); options = options.dtype(at::ScalarType::Int); auto operations = torch::empty({batch_size, source.size(1) + target.size(1)}, options); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(source.device().index()); if (shared_size > 40000) { auto distances = torch::empty({batch_size, (source.size(1) + 1) * (target.size(1) + 1)}, options); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "levenshtein_distance", ([&] { hipLaunchKernelGGL(( levenshtein_distance_kernel<scalar_t>), dim3(batch_size), dim3(1), 0, stream, source.data<scalar_t>(), target.data<scalar_t>(), source_length.data<int>(), target_length.data<int>(), source.size(1), target.size(1), operations.data<int>(), distances.data<int>()); })); } else { AT_DISPATCH_ALL_TYPES(source.scalar_type(), "faster_levenshtein_distance", ([&] { hipLaunchKernelGGL(( faster_levenshtein_distance_kernel<scalar_t>), dim3(batch_size), dim3(1), shared_size, stream, source.data<scalar_t>(), target.data<scalar_t>(), source_length.data<int>(), target_length.data<int>(), source.size(1), target.size(1), operations.data<int>()); })); } return operations; }
3da0f9f9dfe5875e6d66355a88e7afe85cdb49f5.cu
/** * Copyright 2017-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the license found in the * LICENSE file in the root directory of this source tree. */ #include "edit_dist.h" #include <THC/THC.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <utility> // std::pair template <typename scalar_t> __global__ void generate_deletion_label_kernel( const scalar_t* __restrict__ source, const size_t source_size, const size_t operation_size, int* __restrict__ operations, int* __restrict__ labels) { const int index = blockIdx.x; const int offset = index * operation_size; const int offset_label = index * source_size; for (int i = 0; i < source_size; i++) { labels[offset_label + i] = 0; } int k = 0; for (int i = 0; i < operation_size; i++){ if (operations[offset + i] == 0){ break; } else if (operations[offset + i] == 1){ continue; } else { labels[offset_label + k] = 3 - operations[offset + i]; k++; } } } template <typename scalar_t> __global__ void generate_insertion_label_kernel( const scalar_t* __restrict__ target, const size_t target_size, const size_t operation_size, int* __restrict__ operations, int* __restrict__ labels, int* __restrict__ masks) { const int index = blockIdx.x; const int offset = index * operation_size; const int offset_label = index * target_size; int k = 0; int u = 0; int m = 0; for (int i = 0; i < target_size; i++) { labels[offset_label + i] = 0; masks[offset_label + i] = 0; } for (int i = 0; i < operation_size-1; i++){ if (operations[offset + i] == 0){ break; } else if (operations[offset + i] == 2){ continue; } else if (operations[offset + i] == 1){ masks[offset_label + m] = 1; u++; m++; } else { labels[offset_label + k] = u; masks[offset_label + m] = 0; k++; m++; u = 0; } } } template <typename scalar_t> __global__ void levenshtein_distance_kernel( const scalar_t* __restrict__ source, const scalar_t* __restrict__ target, const int* __restrict__ source_length, const int* __restrict__ target_length, const size_t source_size, const size_t target_size, int* __restrict__ operations, int* __restrict__ errors_curr) { const int index = blockIdx.x; const int offset = index * (source_size + target_size); const int d = index * (source_size + 1) * (target_size + 1); const int t = target_size + 1; auto err_idx = [d, t](int i, int j) { return d + i * t + j; }; auto opt_idx = [offset](int k) { return offset + k; }; const int hyp_len = source_length[index]; const int ref_len = target_length[index]; const scalar_t* hyp_begin = source + index * source_size; const scalar_t* ref_begin = target + index * target_size; // dynamic programming for (int i = 0; i <= hyp_len; i++){ errors_curr[err_idx(i, 0)] = i; } for (int j = 0; j <= ref_len; j++){ errors_curr[err_idx(0, j)] = j; } for (int i = 1; i <= hyp_len; i++){ for (int j = 1; j <= ref_len; j++){ errors_curr[err_idx(i, j)] = min( min( errors_curr[err_idx(i-1, j)], errors_curr[err_idx(i, j-1)] ) + 1, errors_curr[err_idx(i-1, j-1)] + 2 * ( *(hyp_begin+i-1) == *(ref_begin+j-1) ? 0 : 1 ) ); } } // back-tracing int i = hyp_len; int j = ref_len; int o = hyp_len + ref_len; for (int k = 0; k < source_size + target_size; k++) { operations[opt_idx(k)] = 0; } while ((i >= 0) && (j >= 0)) { if ((i == 0) && (j == 0)) { break; } if ((j > 0) && (errors_curr[err_idx(i, j-1)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 1; j--; // insertion } else if ((i > 0) && (errors_curr[err_idx(i-1, j)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 2; i--; // deletion } else { o--; operations[opt_idx(o)] = 3; i--; j--; // do nothing } } // moving to the left for (int k = 0; k < hyp_len + ref_len; k++) { if (k + o < hyp_len + ref_len){ operations[opt_idx(k)] = operations[opt_idx(k+o)]; } else{ operations[opt_idx(k)] = 0; // padding } } } template <typename scalar_t> __global__ void faster_levenshtein_distance_kernel( const scalar_t* __restrict__ source, const scalar_t* __restrict__ target, const int* __restrict__ source_length, const int* __restrict__ target_length, const size_t source_size, const size_t target_size, int* __restrict__ operations) { extern __shared__ short errors[]; auto errors_curr = errors; const int index = blockIdx.x; const int offset = index * (source_size + target_size); const int t = target_size + 1; auto err_idx = [t](int i, int j) { return i * t + j; }; auto opt_idx = [offset](int k) { return offset + k; }; const int hyp_len = source_length[index]; const int ref_len = target_length[index]; const scalar_t* hyp_begin = source + index * source_size; const scalar_t* ref_begin = target + index * target_size; // dynamic programming for (int i = 0; i <= hyp_len; i++){ errors_curr[err_idx(i, 0)] = i; } for (int j = 0; j <= ref_len; j++){ errors_curr[err_idx(0, j)] = j; } for (int i = 1; i <= hyp_len; i++){ for (int j = 1; j <= ref_len; j++){ errors_curr[err_idx(i, j)] = min( min( errors_curr[err_idx(i-1, j)], errors_curr[err_idx(i, j-1)] ) + 1, errors_curr[err_idx(i-1, j-1)] + 2 * ( *(hyp_begin+i-1) == *(ref_begin+j-1) ? 0 : 1 ) ); } } // back-tracing int i = hyp_len; int j = ref_len; int o = hyp_len + ref_len; for (int k = 0; k < source_size + target_size; k++) { operations[opt_idx(k)] = 0; } while ((i >= 0) && (j >= 0)) { if ((i == 0) && (j == 0)) { break; } if ((j > 0) && (errors_curr[err_idx(i, j-1)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 1; j--; // insertion } else if ((i > 0) && (errors_curr[err_idx(i-1, j)] < errors_curr[err_idx(i, j)])) { o--; operations[opt_idx(o)] = 2; i--; // deletion } else { o--; operations[opt_idx(o)] = 3; i--; j--; // do nothing } } // moving to the left for (int k = 0; k < hyp_len + ref_len; k++) { if (k + o < hyp_len + ref_len){ operations[opt_idx(k)] = operations[opt_idx(k+o)]; } else{ operations[opt_idx(k)] = 0; // padding } } } torch::Tensor GenerateDeletionLabelCuda( torch::Tensor source, torch::Tensor operations) { const auto batch_size = source.size(0); at::TensorOptions options(source.device()); options = options.dtype(at::ScalarType::Int); auto labels = torch::empty({batch_size, source.size(1)}, options); auto stream = at::cuda::getCurrentCUDAStream(source.device().index()); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "generate_deletion_labels", ([&] { generate_deletion_label_kernel<scalar_t><<<batch_size, 1, 0, stream>>>( source.data<scalar_t>(), source.size(1), operations.size(1), operations.data<int>(), labels.data<int>()); })); return labels; } std::pair<torch::Tensor, torch::Tensor> GenerateInsertionLabelCuda( torch::Tensor target, torch::Tensor operations) { const auto batch_size = target.size(0); at::TensorOptions options(target.device()); options = options.dtype(at::ScalarType::Int); auto labels = torch::empty({batch_size, target.size(1)}, options); auto masks = torch::empty({batch_size, target.size(1)}, options); auto stream = at::cuda::getCurrentCUDAStream(target.device().index()); AT_DISPATCH_ALL_TYPES(target.scalar_type(), "generate_insertion_labels", ([&] { generate_insertion_label_kernel<scalar_t><<<batch_size, 1, 0, stream>>>( target.data<scalar_t>(), target.size(1), operations.size(1), operations.data<int>(), labels.data<int>(), masks.data<int>()); })); return std::make_pair(labels, masks); } torch::Tensor LevenshteinDistanceCuda( torch::Tensor source, torch::Tensor target, torch::Tensor source_length, torch::Tensor target_length) { const auto batch_size = source.size(0); const auto shared_size = (source.size(1) + 1) * (target.size(1) + 1) * sizeof(short); at::TensorOptions options(source.device()); options = options.dtype(at::ScalarType::Int); auto operations = torch::empty({batch_size, source.size(1) + target.size(1)}, options); auto stream = at::cuda::getCurrentCUDAStream(source.device().index()); if (shared_size > 40000) { auto distances = torch::empty({batch_size, (source.size(1) + 1) * (target.size(1) + 1)}, options); AT_DISPATCH_ALL_TYPES(source.scalar_type(), "levenshtein_distance", ([&] { levenshtein_distance_kernel<scalar_t><<<batch_size, 1, 0, stream>>>( source.data<scalar_t>(), target.data<scalar_t>(), source_length.data<int>(), target_length.data<int>(), source.size(1), target.size(1), operations.data<int>(), distances.data<int>()); })); } else { AT_DISPATCH_ALL_TYPES(source.scalar_type(), "faster_levenshtein_distance", ([&] { faster_levenshtein_distance_kernel<scalar_t><<<batch_size, 1, shared_size, stream>>>( source.data<scalar_t>(), target.data<scalar_t>(), source_length.data<int>(), target_length.data<int>(), source.size(1), target.size(1), operations.data<int>()); })); } return operations; }
f824d9adf2c835baa5688716dbf94b5b4edd34a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void matrixMul(int *a, int *b, int *c, int ROW, int COLUMNS, int temp) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < COLUMNS && row < ROW) { for(int i = 0; i < temp; i++) { sum += a[row * temp + i] * b[i * COLUMNS + col]; } c[row * COLUMNS + col] = sum; } }
f824d9adf2c835baa5688716dbf94b5b4edd34a6.cu
#include "includes.h" __global__ void matrixMul(int *a, int *b, int *c, int ROW, int COLUMNS, int temp) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < COLUMNS && row < ROW) { for(int i = 0; i < temp; i++) { sum += a[row * temp + i] * b[i * COLUMNS + col]; } c[row * COLUMNS + col] = sum; } }
dac5f5ce9507c2cf06b22343c2bb199c05bea964.hip
// !!! This is a file automatically generated by hipify!!! #include "cross_structs.cuh" #include "cross_kernal_v2.cuh" #include "cuda_utils.cuh" #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #include <cmath> #include <hip/hip_complex.h> #include <thrust/complex.h> #include <hipcub/hipcub.hpp> #include "defines.h" __constant__ cross_section_data cross_constants; //Computing temperature and pressure __device__ double humlick(double x, double y){ thrust::complex<double> T = thrust::complex<double>(y, -x); thrust::complex<double> humlic1; // double complex T = y - x*I; double S = fabs(x) + y; if (S >= 15) { // Region I humlic1 = T*0.5641896/(0.5+T*T); //fprintf(stdout, "I"); }else if (S >= 5.5) { // Region II thrust::complex<double> U = T * T; humlic1 = T * (1.410474 + U*.5641896)/(.75 + U*(3.+U)); //fprintf(stdout, "II"); }else if (y >= 0.195 * fabs(x) - 0.176) { // Region III humlic1 = (16.4955+T*(20.20933+T*(11.96482 +T*(3.778987+T*.5642236)))) / (16.4955+T*(38.82363 +T*(39.27121+T*(21.69274+T*(6.699398+T))))); //fprintf(stdout, "III"); }else{ // Region IV thrust::complex<double> U = T * T; //double complex humlic1; humlic1 = thrust::exp(U)-T*(36183.31-U*(3321.9905-U*(1540.787-U*(219.0313-U* (35.76683-U*(1.320522-U*.56419))))))/(32066.6-U*(24322.84-U* (9022.228-U*(2186.181-U*(364.2191-U*(61.57037-U*(1.841439-U))))))); //fprintf(stdout, "IV"); } return humlic1.real(); }; __device__ float humlickf(float x, float y){ thrust::complex<float> T = thrust::complex<float>(y, -x); thrust::complex<float> humlic1; // double complex T = y - x*I; float S = fabs(x) + y; if (S >= 15.0f) { // Region I humlic1 = T*0.5641896f/(0.5f+T*T); //fprintf(stdout, "I"); }else if (S >= 5.5f) { // Region II thrust::complex<float> U = T * T; humlic1 = T * (1.410474f + U*0.5641896f)/(0.75f + U*(3.0f+U)); //fprintf(stdout, "II"); }else if (y >= 0.195f * fabs(x) - 0.176f) { // Region III humlic1 = (16.4955f+T*(20.20933f+T*(11.96482f +T*(3.778987f+T*0.5642236f)))) / (16.4955f+T*(38.82363f +T*(39.27121f+T*(21.69274f+T*(6.699398f+T))))); //fprintf(stdout, "III"); }else{ // Region IV thrust::complex<float> U = T * T; //double complex humlic1; humlic1 = thrust::exp(U)-T*(36183.31f-U*(3321.9905f-U*(1540.787f-U*(219.0313f-U* (35.76683f-U*(1.320522f-U*0.56419f))))))/(32066.6f-U*(24322.84f-U* (9022.228f-U*(2186.181f-U*(364.2191f-U*(61.57037f-U*(1.841439f-U))))))); //fprintf(stdout, "IV"); } return humlic1.real(); }; __device__ double voigt_threegausshermite(double x, double y,double xxyy){ return 1.1181635900*y*IPI/(xxyy) + 2.0*IPI*y*(xxyy + 1.499988068)/( (xxyy+1.499988068)*(xxyy+1.499988068) - 4*x*x*1.499988068); }; __inline__ __device__ int warpAllReduceSum(int val) { for (int mask = warpSize/2; mask > 0; mask /= 2) val += __shfl_xor(val, mask); return val; } __host__ void copy_intensity_info(cross_section_data* cross_inf) { //void* ptr; //hipGetSymbolAddress ( &ptr, int_info ); hipMemcpyToSymbol(cross_constants, (void*)cross_inf, sizeof(cross_section_data),0,hipMemcpyHostToDevice); }; //--------------------------------------Compute spectroscopic quantities----------------------------------------------- __global__ void device_compute_abscoefs(const double* g_energies,const int* g_gns,const double* g_nu,const double* g_aif,double* g_abscoef,const double temperature,const double partition, const int N_ener){ //The stored shared data //Get the global and local thread number int g_idx = blockIdx.x * blockDim.x + threadIdx.x; double ei,gns,nu_if,aif,abscoef; double beta = -PLANCK*VELLGT/(BOLTZ*temperature); //if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition); if(g_idx < N_ener){ //Store values in local memory ei = g_energies[g_idx]; gns = g_gns[g_idx]; nu_if = g_nu[g_idx]; aif = g_aif[g_idx]; abscoef= CM_COEF*aif*gns *exp(beta*ei)*(1.0-exp(beta*nu_if))/ (nu_if*nu_if*partition); g_abscoef[g_idx] = abscoef; } } __global__ void device_compute_pressure(double* g_gamma,const double* g_n,const double temperature,const double pressure, const int N_ener){ int g_idx = blockIdx.x * blockDim.x + threadIdx.x; double gammaL; if(g_idx < N_ener){ gammaL = g_gamma[g_idx]*pow(cross_constants.ref_temp/temperature,g_n[g_idx])*(pressure*cross_constants.ref_press); g_gamma[g_idx] = gammaL; } } __global__ void device_compute_lorentzian(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,double hw,const int N,const int N_ener,const int start_idx) { //The stored shared data volatile __shared__ double l_nu[SHARED_SIZE]; volatile __shared__ double l_abscoef[SHARED_SIZE]; //volatile __shared__ int l_leave[SHARED_SIZE]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; int w_idx; int b_start = (threadIdx.x/32)*32; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double de = 0.0; int leave=0; if(g_idx < N){ freq = g_freq[start_idx+g_idx]; } for(int i = 0; i < N_ener; i+=WARP_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; //l_leave[l_idx] = 0; w_idx = i + l_idx; if(i + l_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; dfreq_ = freq-nu; if(dfreq_ > 10.0*hw){ leave = 1; } //Do we have any transitions within the warp range? if(warpAllReduceSum(leave)==WARP_SIZE) continue; l_abscoef[l_idx] = g_abscoef[w_idx]; } //l_nu[l_idx+ BLOCK_SIZE] = 1.0; //l_abscoef[l_idx+ BLOCK_SIZE] = 0.0; //if(i + l_idx + BLOCK_SIZE < N_ener) // l_nu[l_idx+BLOCK_SIZE] = g_nu[i + l_idx+BLOCK_SIZE]; // l_abscoef[l_idx+BLOCK_SIZE] = g_abscoef[i + l_idx+BLOCK_SIZE]; //} //__syncthreads(); for(int j = 0; j < WARP_SIZE; j++){ //nu_if = ; //Read value of nu nu = l_nu[b_start+j]; dfreq_ = freq-nu; //if(dfreq_ > hw*10.0) // continue; if(dfreq_<-hw*10.0){ //l_leave[l_idx] = 1; leave=1; } //gammaG de =dfreq_*dfreq_ + hw*hw; cs_val+=l_abscoef[b_start+j]*IPI*hw/de; //*__expf(temp_3*dfreq_*dfreq_); } if(warpAllReduceSum(leave)==WARP_SIZE) break; //__syncthreads(); } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val; } __global__ void device_compute_doppler(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double temperature,const int N,const int N_ener,const int start_idx) { typedef hipcub::WarpReduce<int> WarpReduceI; //The stored shared data volatile __shared__ double l_nu[SHARED_SIZE]; volatile __shared__ double l_abscoef[SHARED_SIZE]; //volatile __shared__ double l_freq[SHARED_SIZE]; //volatile __shared__ double l_result[SHARED_SIZE]; __shared__ typename WarpReduceI::TempStorage leave_warp[4]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; int w_idx; int warp_id = threadIdx.x/32; int b_start = (threadIdx.x/32)*32; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG; double x0= cross_constants.dfreq*0.5; double dpwcoeff = sqrt(2.0*BOLTZ*temperature*NA/((cross_constants.mean_mass)))/VELLGT; int leave = 0; if(g_idx < N){ //freq = g_freq[start_idx+g_idx]; freq = g_freq[start_idx+g_idx]; //cs_val = g_cs[start_idx+g_idx]; } //if(g_idx==9999) printf("%12.6f\n",freq); for(int i = 0; i < N_ener; i+=WARP_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; leave=0; w_idx = i + l_idx; if(i + l_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; l_abscoef[l_idx] = g_abscoef[w_idx]; dfreq_ = freq-l_nu[l_idx]; if(dfreq_ > DOPPLER_CUTOFF){ leave = 1; } }else{ leave = 1; } if(WarpReduceI(leave_warp[warp_id]).Sum(leave)>=31) continue; leave = 0; for(int j = 0; j < WARP_SIZE; j++){ nu = l_nu[b_start+j]; dfreq_ = freq-nu; //if(dfreq_ > DOPPLER_CUTOFF) // continue; //Do we have any transitions left within the warp range? gammaG= SQRTLN2/(nu*dpwcoeff); double xp,xm,de; xp = gammaG*(dfreq_+x0); xm = gammaG*(dfreq_-x0); de = erf(xp)-erf(xm); cs_val+=l_abscoef[b_start+j]*de; //*__expf(temp_3*dfreq_*dfreq_); } if(dfreq_<-DOPPLER_CUTOFF){ //l_abscoef[l_idx] = 0.0; leave=1; //break; } if(WarpReduceI(leave_warp[warp_id]).Sum(leave)>=31) break; } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val; } __global__ void device_compute_doppler_block(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double temperature,const int N,const int N_ener,const int start_idx) { typedef hipcub::BlockReduce<double, DOPPLER_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number int b_idx = blockIdx.x; int l_idx = threadIdx.x; double cs_val = 0.0; double final_cs = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double abscoef = 0.0; double dpwcoeff = (temperature*cross_constants.dpwcoeff); double gammaG; double x0; freq = g_freq[start_idx + b_idx]; //cs_val = g_cs[start_idx+g_idx]; //if(g_idx==9999) printf("%12.6f\n",freq); //l_cs_result[l_idx] = 0.0; for(int i = l_idx; i < N_ener; i+=DOPPLER_SIZE){ nu = 0.0; abscoef = 0.0; //Read value of nu nu = g_nu[i]; dfreq_ = freq-nu; if(dfreq_ > DOPPLER_CUTOFF) continue; if(dfreq_<-DOPPLER_CUTOFF) break; gammaG = SQRTLN2/(dpwcoeff*nu); x0 = gammaG*cross_constants.dfreq*0.5; double xp,xm,de; xp = gammaG*(dfreq_)+x0; xm = gammaG*(dfreq_)-x0; de = erf(xp)-erf(xm); //do work // if(dfreq_<hw)continue; // cs_val+=g_abscoef[i]*de; } //Store results into shared memory //l_cs_result[l_idx] = cs_val; //cs_val = 0; //Wait for everone to finish nicely __syncthreads(); final_cs = BlockReduce(temp_storage).Sum(cs_val); if(l_idx == 0){ //for(int i = 0; i < BLOCK_SIZE; i++) // cs_val+=l_cs_result[i]; g_cs[start_idx+b_idx]+=final_cs*0.5/cross_constants.dfreq; } } __global__ void device_compute_voigt(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const int N,const int N_ener,const int start_idx){ //The stored shared data typedef hipcub::BlockReduce<double, VOIGT_BLOCK> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; //__shared__ double l_cs_result[VOIGT_BLOCK]; //__shared__ double l_correction[VOIGT_BLOCK]; //Get the global and local thread number int b_idx = blockIdx.x; int l_idx = threadIdx.x; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG=0.05; double x,y; double abscoef; double aggregate = 0.0; double dpwcoeff = temperature*cross_constants.dpwcoeff; freq = g_freq[start_idx + b_idx]; //Lets find which energy we deal with //if(g_idx==9999) printf("%12.6f\n",freq); //l_cs_result[l_idx] = 0.0; //l_correction[l_idx] = 0.0; for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){ nu = 0.0; //Read value of nu nu = g_nu[i]; dfreq_ = nu-freq; if(dfreq_ < -LORENTZ_CUTOFF) continue; if(dfreq_ > LORENTZ_CUTOFF) break; //We are done here let another queued block do something //abscoef = g_abscoef[i]; gammaG = SQRTLN2/(nu*dpwcoeff); x = dfreq_*gammaG; y =g_gamma[i]*gammaG; cs_val+= g_abscoef[i]*humlickf(x,y)*gammaG; } //Wait for everone to finish nicely aggregate = BlockReduce(temp_storage).Sum(cs_val); if(l_idx==0)g_cs[start_idx+b_idx]+=aggregate*ISQRTPI; //cs_val; } __global__ void device_compute_voigt_exact(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const int N,const int N_ener,const int start_idx){ //The stored shared data typedef hipcub::BlockReduce<double, VOIGT_BLOCK> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; //__shared__ double l_cs_result[VOIGT_BLOCK]; //__shared__ double l_correction[VOIGT_BLOCK]; //Get the global and local thread number int b_idx = blockIdx.x; int l_idx = threadIdx.x; volatile double cs_val = 0.0; volatile double dfreq_=0.0; volatile double freq = 0.0; volatile double nu = 0.0; volatile double gammaD=0.05; volatile double gammaL=0.01; volatile double x,y,z,fp,voigt; volatile double abscoef; volatile double aggregate = 0.0; volatile double dpwcoeff = temperature*cross_constants.dpwcoeff; freq = g_freq[start_idx + b_idx]; //Lets find which energy we deal with //if(g_idx==9999) printf("%12.6f\n",freq); //l_cs_result[l_idx] = 0.0; //l_correction[l_idx] = 0.0; for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){ nu = 0.0; //Read value of nu nu = g_nu[i]; dfreq_ = nu-freq; if(dfreq_ < -LORENTZ_CUTOFF) continue; if(dfreq_ > LORENTZ_CUTOFF) break; //We are done here let another queued block do something //abscoef = g_abscoef[i]; gammaD = SQRTLN2/(nu*dpwcoeff); //gammaL = g_gamma[i]; //volatile double v1 = exp(4.0*PI*gammaL*gammaL/(gammaD*gammaD)); //volatile double v2 = exp(pdfreq*pdfreq/(gammaD*gammaD)); //volatile double v3 = cos((4.0*PI*gammaL*gammaL*SQRTLN2*pdfreq*pdfreq)/(gammaD*gammaD)); x =dfreq_*gammaD; y =g_gamma[i]*gammaD; z = (nu + freq)*gammaD; fp = SQRTPI*gammaD; volatile double ex2 = exp(-x * x); if(x==0){ voigt = erfcx(y); }else if (y==0){ voigt = ex2; }else{ volatile double ez2 = exp(-z * z); volatile double ey2 = exp(4.0*PI*y * y); volatile double v1 = ey2*cos(y*z); volatile double v2 = ey2*cos(x*z); volatile double v3 = v1/ex2 + v2/ez2; voigt = fp*v3; } /*volatile double yy = y*y; volatile double xx = x*x; volatile double zz = z*z; volatile double v1 = exp(4.0*PI*yy - zz); volatile double v2 = cos(4.0*PI*y*z); volatile double v3 = exp(4.0*PI*yy - xx); volatile double v4 = exp(4.0*PI*yy - xx); voigt = v1*v2 + v3*v4; if(voigt != voigt){ voigt = 0.0; } */ //Exact form cs_val+= g_abscoef[i]*voigt; //x = cs_val + y; //correction = (x - cs_val) - y; //cs_val=x; } //Store results into shared memory //l_cs_result[l_idx] = cs_val; //l_correction[l_idx] = correction; //cs_val //Wait for everone to finish nicely __syncthreads(); //if(l_idx == 0){ //cs_val = g_cs[start_idx+b_idx]; //correction = 0; //for(int i = 0; i < VOIGT_BLOCK; i++) // correction += l_correction[i]; /*for(int i = 0; i < VOIGT_BLOCK; i++){ y = l_cs_result[i] - correction; x = cs_val + y; correction = (x - cs_val) - y; cs_val=x; }*/ //} aggregate = BlockReduce(temp_storage).Sum(cs_val); if(l_idx==0)g_cs[start_idx+b_idx]+=aggregate*cross_constants.dfreq; //cs_val; } __global__ void device_compute_voigt_II(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const int N,const int N_ener,const int start_idx){ //The stored shared data //typedef hipcub::WarpReduce<int> WarpReduceI; __shared__ double l_nu[VOIGT_SHARED_SIZE]; __shared__ double l_abscoef[VOIGT_SHARED_SIZE]; __shared__ double l_gamma[VOIGT_SHARED_SIZE]; //__shared__ int l_leave[VOIGT_BLOCK]; //__shared__ int l_continue[VOIGT_BLOCK]; //typedef hipcub::BlockReduce<int, VOIGT_BLOCK> BlockReduce; //__shared__ typename BlockReduce::TempStorage temp_storage; //volatile __shared__ double l_abcissa[SHARED_SIZE]; //volatile __shared__ double l_abcissa[SHARED_SIZE]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG; double dpwcoeff = temperature*cross_constants.dpwcoeff; // int leave = 0; // int continue_cal = 0; double x,y; if(g_idx < N){ freq = g_freq[start_idx+g_idx]; //cs_val = g_cs[start_idx+g_idx]; } //if(g_idx==9999) printf("%12.6f\n",freq); for(int i = 0; i < N_ener; i+=VOIGT_SHARED_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; //leave=1; int w_idx = i + l_idx; //l_leave[l_idx] = 0; // if(w_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; //l_leave[l_idx] = 1; //l_continue[l_idx] = 1; //dfreq_ = freq-nu; /* if(dfreq_ < -LORENTZ_CUTOFF){ l_leave[l_idx] = 0; }else if (dfreq_ > LORENTZ_CUTOFF){ l_continue[l_idx] = 0; }else{ //Do we have any transitions within the warp range? //if(warpAllReduceSum(leave)==WARP_SIZE) // continue; }*/ l_abscoef[l_idx] = g_abscoef[w_idx]; l_gamma[l_idx] = g_gamma[w_idx]; } //if(BlockReduce(temp_storage).Sum(leave)==0) // break; __syncthreads(); /*leave = 0; continue_cal = 0; for(int j = 0; j < VOIGT_BLOCK; j++){ continue_cal += l_continue[j]; leave+=l_leave[j]; } if(leave == 0) break; if(continue_cal==0) continue; */ for(int j = 0; j < VOIGT_SHARED_SIZE; j++){ nu = l_nu[j]; dfreq_ = freq-nu; if(dfreq_ < -LORENTZ_CUTOFF) break; if(dfreq_ > LORENTZ_CUTOFF) continue; //Do we have any transitions left within the warp range? //if(dfreq_>-lorentz_cutoff){ // l_abscoef[l_idx] = 0.0; // leave=0; // //break; //} gammaG= SQRTLN2/(nu*dpwcoeff); x =abs(dfreq_)*gammaG; y =l_gamma[j]*gammaG; cs_val+=l_abscoef[j]*humlickf(x,y)*gammaG; //*__expf(temp_3*dfreq_*dfreq_); } //if(WarpReduceI(leave_warp[warp_id]).Sum(leave)==31) // break; __syncthreads(); } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val*ISQRTPI; } __global__ void device_compute_voigt_quad(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const double lorentz_cutoff,const int N,const int N_ener,const int start_idx){ //The stored shared data typedef hipcub::WarpReduce<int> WarpReduceI; typedef hipcub::WarpReduce<double> WarpReduceD; volatile __shared__ double l_nu[SHARED_SIZE]; volatile __shared__ double l_abscoef[SHARED_SIZE]; volatile __shared__ double l_gamma[SHARED_SIZE]; __shared__ typename WarpReduceI::TempStorage leave_warp[4]; volatile __shared__ double l_abcissa[SHARED_SIZE]; volatile __shared__ double l_weight[SHARED_SIZE]; __shared__ typename WarpReduceD::TempStorage cs_warp[4]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; int w_idx; int b_start = (threadIdx.x/32)*32; int warp_id = threadIdx.x/32; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG; double warp_cs = 0.0; double dpwcoeff = sqrt(2.0*BOLTZ*temperature*NA/((cross_constants.mean_mass)))/VELLGT; int leave = 0; double x,y; if(g_idx < N){ freq = g_freq[start_idx+g_idx]; //cs_val = g_cs[start_idx+g_idx]; } //if(g_idx==9999) printf("%12.6f\n",freq); for(int i = 0; i < N_ener; i+=WARP_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; leave=0; w_idx = i + l_idx; if(i + l_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; dfreq_ = freq-nu; if(dfreq_ > DOPPLER_CUTOFF){ leave = 1; } //Do we have any transitions within the warp range? if(WarpReduceI(leave_warp[warp_id]).Sum(leave)==WARP_SIZE) continue; l_abscoef[l_idx] = g_abscoef[w_idx]; l_gamma[l_idx] = g_gamma[w_idx]; } leave = 0; for(int j = 0; j < WARP_SIZE; j++){ warp_cs = 0.0; nu = l_nu[b_start+j]; dfreq_ = freq-nu; //if(dfreq_ > DOPPLER_CUTOFF) // continue; //Do we have any transitions left within the warp range? if(dfreq_<-lorentz_cutoff){ //l_abscoef[l_idx] = 0.0; leave=1; //break; } gammaG= SQRTLN2/(nu*dpwcoeff); x =abs(dfreq_)*gammaG; y =l_gamma[b_start+j]*gammaG; cs_val+=l_abscoef[b_start+j]*humlick(x,y)*gammaG*ISQRTPI; //*__expf(temp_3*dfreq_*dfreq_); } if(WarpReduceI(leave_warp[warp_id]).Sum(leave)>=WARP_SIZE) break; //__syncthreads(); } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val; } //Lorentzian __host__ void gpu_compute_lorentzian_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double temp,double part,double hw, int Npoints,int N_ener,int start_idx,hipStream_t stream){ int blockSize = 1024; int gridSize = (int)ceil((float)N_ener/blockSize); hipLaunchKernelGGL(( device_compute_abscoefs), dim3(gridSize),dim3(blockSize),0,stream, g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); blockSize = SHARED_SIZE; gridSize = (int)ceil((float)Npoints/blockSize); hipLaunchKernelGGL(( device_compute_lorentzian), dim3(gridSize),dim3(blockSize),0,stream, g_freq, g_intens,g_nu,g_abs,hw,Npoints,N_ener,start_idx); } //Gaussian __host__ void gpu_compute_gaussian_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double temp,double part,double hw, int Npoints,int N_ener,int start_idx,hipStream_t stream){ } //Doppler __host__ void gpu_compute_doppler_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double temp,double part, int Npoints,int N_ener,int start_idx,hipStream_t stream){ int blockSize = 1024; int gridSize = (int)ceil((float)N_ener/blockSize); hipLaunchKernelGGL(( device_compute_abscoefs), dim3(gridSize),dim3(blockSize),0,stream, g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); blockSize = DOPPLER_SIZE; gridSize = Npoints;//(int)ceil((float)Npoints/blockSize); hipFuncSetCacheConfig(device_compute_doppler_block, hipFuncCachePreferL1); hipLaunchKernelGGL(( device_compute_doppler_block), dim3(gridSize),dim3(blockSize),0,stream, g_freq, g_intens,g_nu,g_abs,sqrt(temp),Npoints,N_ener,start_idx); } //Voigt __host__ void gpu_compute_voigt_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double* g_gamma,double* g_n ,double temp,double press,double part,int Npoints,int N_ener,int start_idx,hipStream_t stream){ int blockSize = 1024; int gridSize = (int)ceil((float)N_ener/(float)blockSize); hipLaunchKernelGGL(( device_compute_abscoefs), dim3(gridSize),dim3(blockSize),0,stream, g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); hipLaunchKernelGGL(( device_compute_pressure), dim3(gridSize),dim3(blockSize),0,stream, g_gamma,g_n ,temp,press,N_ener); //device_compute_abscoefs<<<gridSize,blockSize,0,stream>>>(g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); //blockSize = VOIGT_SHARED_SIZE; //gridSize = (int)ceil((float)Npoints/(float)blockSize); //device_compute_voigt_II<<<gridSize,blockSize,0,stream>>>(g_freq, g_intens,g_nu,g_abs,g_gamma,sqrt(temp),press,Npoints,N_ener,start_idx); // blockSize = VOIGT_BLOCK; gridSize = Npoints; //hipDeviceSetSharedMemConfig() hipFuncSetCacheConfig(device_compute_voigt, hipFuncCachePreferL1); hipLaunchKernelGGL(( device_compute_voigt), dim3(gridSize),dim3(blockSize),0,stream, g_freq, g_intens,g_nu,g_abs,g_gamma,sqrt(temp),press,Npoints,N_ener,start_idx); }
dac5f5ce9507c2cf06b22343c2bb199c05bea964.cu
#include "cross_structs.cuh" #include "cross_kernal_v2.cuh" #include "cuda_utils.cuh" #include <cuda_runtime_api.h> #include <cuda.h> #include<stdio.h> #include<stdlib.h> #include <cmath> #include <cuComplex.h> #include <thrust/complex.h> #include <cub/cub.cuh> #include "defines.h" __constant__ cross_section_data cross_constants; //Computing temperature and pressure __device__ double humlick(double x, double y){ thrust::complex<double> T = thrust::complex<double>(y, -x); thrust::complex<double> humlic1; // double complex T = y - x*I; double S = fabs(x) + y; if (S >= 15) { // Region I humlic1 = T*0.5641896/(0.5+T*T); //fprintf(stdout, "I"); }else if (S >= 5.5) { // Region II thrust::complex<double> U = T * T; humlic1 = T * (1.410474 + U*.5641896)/(.75 + U*(3.+U)); //fprintf(stdout, "II"); }else if (y >= 0.195 * fabs(x) - 0.176) { // Region III humlic1 = (16.4955+T*(20.20933+T*(11.96482 +T*(3.778987+T*.5642236)))) / (16.4955+T*(38.82363 +T*(39.27121+T*(21.69274+T*(6.699398+T))))); //fprintf(stdout, "III"); }else{ // Region IV thrust::complex<double> U = T * T; //double complex humlic1; humlic1 = thrust::exp(U)-T*(36183.31-U*(3321.9905-U*(1540.787-U*(219.0313-U* (35.76683-U*(1.320522-U*.56419))))))/(32066.6-U*(24322.84-U* (9022.228-U*(2186.181-U*(364.2191-U*(61.57037-U*(1.841439-U))))))); //fprintf(stdout, "IV"); } return humlic1.real(); }; __device__ float humlickf(float x, float y){ thrust::complex<float> T = thrust::complex<float>(y, -x); thrust::complex<float> humlic1; // double complex T = y - x*I; float S = fabs(x) + y; if (S >= 15.0f) { // Region I humlic1 = T*0.5641896f/(0.5f+T*T); //fprintf(stdout, "I"); }else if (S >= 5.5f) { // Region II thrust::complex<float> U = T * T; humlic1 = T * (1.410474f + U*0.5641896f)/(0.75f + U*(3.0f+U)); //fprintf(stdout, "II"); }else if (y >= 0.195f * fabs(x) - 0.176f) { // Region III humlic1 = (16.4955f+T*(20.20933f+T*(11.96482f +T*(3.778987f+T*0.5642236f)))) / (16.4955f+T*(38.82363f +T*(39.27121f+T*(21.69274f+T*(6.699398f+T))))); //fprintf(stdout, "III"); }else{ // Region IV thrust::complex<float> U = T * T; //double complex humlic1; humlic1 = thrust::exp(U)-T*(36183.31f-U*(3321.9905f-U*(1540.787f-U*(219.0313f-U* (35.76683f-U*(1.320522f-U*0.56419f))))))/(32066.6f-U*(24322.84f-U* (9022.228f-U*(2186.181f-U*(364.2191f-U*(61.57037f-U*(1.841439f-U))))))); //fprintf(stdout, "IV"); } return humlic1.real(); }; __device__ double voigt_threegausshermite(double x, double y,double xxyy){ return 1.1181635900*y*IPI/(xxyy) + 2.0*IPI*y*(xxyy + 1.499988068)/( (xxyy+1.499988068)*(xxyy+1.499988068) - 4*x*x*1.499988068); }; __inline__ __device__ int warpAllReduceSum(int val) { for (int mask = warpSize/2; mask > 0; mask /= 2) val += __shfl_xor(val, mask); return val; } __host__ void copy_intensity_info(cross_section_data* cross_inf) { //void* ptr; //cudaGetSymbolAddress ( &ptr, int_info ); cudaMemcpyToSymbol(cross_constants, (void*)cross_inf, sizeof(cross_section_data),0,cudaMemcpyHostToDevice); }; //--------------------------------------Compute spectroscopic quantities----------------------------------------------- __global__ void device_compute_abscoefs(const double* g_energies,const int* g_gns,const double* g_nu,const double* g_aif,double* g_abscoef,const double temperature,const double partition, const int N_ener){ //The stored shared data //Get the global and local thread number int g_idx = blockIdx.x * blockDim.x + threadIdx.x; double ei,gns,nu_if,aif,abscoef; double beta = -PLANCK*VELLGT/(BOLTZ*temperature); //if(g_idx == 0) printf("partition = %12.6f\n",cross_constants.partition); if(g_idx < N_ener){ //Store values in local memory ei = g_energies[g_idx]; gns = g_gns[g_idx]; nu_if = g_nu[g_idx]; aif = g_aif[g_idx]; abscoef= CM_COEF*aif*gns *exp(beta*ei)*(1.0-exp(beta*nu_if))/ (nu_if*nu_if*partition); g_abscoef[g_idx] = abscoef; } } __global__ void device_compute_pressure(double* g_gamma,const double* g_n,const double temperature,const double pressure, const int N_ener){ int g_idx = blockIdx.x * blockDim.x + threadIdx.x; double gammaL; if(g_idx < N_ener){ gammaL = g_gamma[g_idx]*pow(cross_constants.ref_temp/temperature,g_n[g_idx])*(pressure*cross_constants.ref_press); g_gamma[g_idx] = gammaL; } } __global__ void device_compute_lorentzian(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,double hw,const int N,const int N_ener,const int start_idx) { //The stored shared data volatile __shared__ double l_nu[SHARED_SIZE]; volatile __shared__ double l_abscoef[SHARED_SIZE]; //volatile __shared__ int l_leave[SHARED_SIZE]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; int w_idx; int b_start = (threadIdx.x/32)*32; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double de = 0.0; int leave=0; if(g_idx < N){ freq = g_freq[start_idx+g_idx]; } for(int i = 0; i < N_ener; i+=WARP_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; //l_leave[l_idx] = 0; w_idx = i + l_idx; if(i + l_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; dfreq_ = freq-nu; if(dfreq_ > 10.0*hw){ leave = 1; } //Do we have any transitions within the warp range? if(warpAllReduceSum(leave)==WARP_SIZE) continue; l_abscoef[l_idx] = g_abscoef[w_idx]; } //l_nu[l_idx+ BLOCK_SIZE] = 1.0; //l_abscoef[l_idx+ BLOCK_SIZE] = 0.0; //if(i + l_idx + BLOCK_SIZE < N_ener) // l_nu[l_idx+BLOCK_SIZE] = g_nu[i + l_idx+BLOCK_SIZE]; // l_abscoef[l_idx+BLOCK_SIZE] = g_abscoef[i + l_idx+BLOCK_SIZE]; //} //__syncthreads(); for(int j = 0; j < WARP_SIZE; j++){ //nu_if = ; //Read value of nu nu = l_nu[b_start+j]; dfreq_ = freq-nu; //if(dfreq_ > hw*10.0) // continue; if(dfreq_<-hw*10.0){ //l_leave[l_idx] = 1; leave=1; } //gammaG de =dfreq_*dfreq_ + hw*hw; cs_val+=l_abscoef[b_start+j]*IPI*hw/de; //*__expf(temp_3*dfreq_*dfreq_); } if(warpAllReduceSum(leave)==WARP_SIZE) break; //__syncthreads(); } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val; } __global__ void device_compute_doppler(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double temperature,const int N,const int N_ener,const int start_idx) { typedef cub::WarpReduce<int> WarpReduceI; //The stored shared data volatile __shared__ double l_nu[SHARED_SIZE]; volatile __shared__ double l_abscoef[SHARED_SIZE]; //volatile __shared__ double l_freq[SHARED_SIZE]; //volatile __shared__ double l_result[SHARED_SIZE]; __shared__ typename WarpReduceI::TempStorage leave_warp[4]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; int w_idx; int warp_id = threadIdx.x/32; int b_start = (threadIdx.x/32)*32; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG; double x0= cross_constants.dfreq*0.5; double dpwcoeff = sqrt(2.0*BOLTZ*temperature*NA/((cross_constants.mean_mass)))/VELLGT; int leave = 0; if(g_idx < N){ //freq = g_freq[start_idx+g_idx]; freq = g_freq[start_idx+g_idx]; //cs_val = g_cs[start_idx+g_idx]; } //if(g_idx==9999) printf("%12.6f\n",freq); for(int i = 0; i < N_ener; i+=WARP_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; leave=0; w_idx = i + l_idx; if(i + l_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; l_abscoef[l_idx] = g_abscoef[w_idx]; dfreq_ = freq-l_nu[l_idx]; if(dfreq_ > DOPPLER_CUTOFF){ leave = 1; } }else{ leave = 1; } if(WarpReduceI(leave_warp[warp_id]).Sum(leave)>=31) continue; leave = 0; for(int j = 0; j < WARP_SIZE; j++){ nu = l_nu[b_start+j]; dfreq_ = freq-nu; //if(dfreq_ > DOPPLER_CUTOFF) // continue; //Do we have any transitions left within the warp range? gammaG= SQRTLN2/(nu*dpwcoeff); double xp,xm,de; xp = gammaG*(dfreq_+x0); xm = gammaG*(dfreq_-x0); de = erf(xp)-erf(xm); cs_val+=l_abscoef[b_start+j]*de; //*__expf(temp_3*dfreq_*dfreq_); } if(dfreq_<-DOPPLER_CUTOFF){ //l_abscoef[l_idx] = 0.0; leave=1; //break; } if(WarpReduceI(leave_warp[warp_id]).Sum(leave)>=31) break; } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val; } __global__ void device_compute_doppler_block(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double temperature,const int N,const int N_ener,const int start_idx) { typedef cub::BlockReduce<double, DOPPLER_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number int b_idx = blockIdx.x; int l_idx = threadIdx.x; double cs_val = 0.0; double final_cs = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double abscoef = 0.0; double dpwcoeff = (temperature*cross_constants.dpwcoeff); double gammaG; double x0; freq = g_freq[start_idx + b_idx]; //cs_val = g_cs[start_idx+g_idx]; //if(g_idx==9999) printf("%12.6f\n",freq); //l_cs_result[l_idx] = 0.0; for(int i = l_idx; i < N_ener; i+=DOPPLER_SIZE){ nu = 0.0; abscoef = 0.0; //Read value of nu nu = g_nu[i]; dfreq_ = freq-nu; if(dfreq_ > DOPPLER_CUTOFF) continue; if(dfreq_<-DOPPLER_CUTOFF) break; gammaG = SQRTLN2/(dpwcoeff*nu); x0 = gammaG*cross_constants.dfreq*0.5; double xp,xm,de; xp = gammaG*(dfreq_)+x0; xm = gammaG*(dfreq_)-x0; de = erf(xp)-erf(xm); //do work // if(dfreq_<hw)continue; // cs_val+=g_abscoef[i]*de; } //Store results into shared memory //l_cs_result[l_idx] = cs_val; //cs_val = 0; //Wait for everone to finish nicely __syncthreads(); final_cs = BlockReduce(temp_storage).Sum(cs_val); if(l_idx == 0){ //for(int i = 0; i < BLOCK_SIZE; i++) // cs_val+=l_cs_result[i]; g_cs[start_idx+b_idx]+=final_cs*0.5/cross_constants.dfreq; } } __global__ void device_compute_voigt(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const int N,const int N_ener,const int start_idx){ //The stored shared data typedef cub::BlockReduce<double, VOIGT_BLOCK> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; //__shared__ double l_cs_result[VOIGT_BLOCK]; //__shared__ double l_correction[VOIGT_BLOCK]; //Get the global and local thread number int b_idx = blockIdx.x; int l_idx = threadIdx.x; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG=0.05; double x,y; double abscoef; double aggregate = 0.0; double dpwcoeff = temperature*cross_constants.dpwcoeff; freq = g_freq[start_idx + b_idx]; //Lets find which energy we deal with //if(g_idx==9999) printf("%12.6f\n",freq); //l_cs_result[l_idx] = 0.0; //l_correction[l_idx] = 0.0; for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){ nu = 0.0; //Read value of nu nu = g_nu[i]; dfreq_ = nu-freq; if(dfreq_ < -LORENTZ_CUTOFF) continue; if(dfreq_ > LORENTZ_CUTOFF) break; //We are done here let another queued block do something //abscoef = g_abscoef[i]; gammaG = SQRTLN2/(nu*dpwcoeff); x = dfreq_*gammaG; y =g_gamma[i]*gammaG; cs_val+= g_abscoef[i]*humlickf(x,y)*gammaG; } //Wait for everone to finish nicely aggregate = BlockReduce(temp_storage).Sum(cs_val); if(l_idx==0)g_cs[start_idx+b_idx]+=aggregate*ISQRTPI; //cs_val; } __global__ void device_compute_voigt_exact(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const int N,const int N_ener,const int start_idx){ //The stored shared data typedef cub::BlockReduce<double, VOIGT_BLOCK> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; //__shared__ double l_cs_result[VOIGT_BLOCK]; //__shared__ double l_correction[VOIGT_BLOCK]; //Get the global and local thread number int b_idx = blockIdx.x; int l_idx = threadIdx.x; volatile double cs_val = 0.0; volatile double dfreq_=0.0; volatile double freq = 0.0; volatile double nu = 0.0; volatile double gammaD=0.05; volatile double gammaL=0.01; volatile double x,y,z,fp,voigt; volatile double abscoef; volatile double aggregate = 0.0; volatile double dpwcoeff = temperature*cross_constants.dpwcoeff; freq = g_freq[start_idx + b_idx]; //Lets find which energy we deal with //if(g_idx==9999) printf("%12.6f\n",freq); //l_cs_result[l_idx] = 0.0; //l_correction[l_idx] = 0.0; for(int i = l_idx; i < N_ener; i+=VOIGT_BLOCK){ nu = 0.0; //Read value of nu nu = g_nu[i]; dfreq_ = nu-freq; if(dfreq_ < -LORENTZ_CUTOFF) continue; if(dfreq_ > LORENTZ_CUTOFF) break; //We are done here let another queued block do something //abscoef = g_abscoef[i]; gammaD = SQRTLN2/(nu*dpwcoeff); //gammaL = g_gamma[i]; //volatile double v1 = exp(4.0*PI*gammaL*gammaL/(gammaD*gammaD)); //volatile double v2 = exp(pdfreq*pdfreq/(gammaD*gammaD)); //volatile double v3 = cos((4.0*PI*gammaL*gammaL*SQRTLN2*pdfreq*pdfreq)/(gammaD*gammaD)); x =dfreq_*gammaD; y =g_gamma[i]*gammaD; z = (nu + freq)*gammaD; fp = SQRTPI*gammaD; volatile double ex2 = exp(-x * x); if(x==0){ voigt = erfcx(y); }else if (y==0){ voigt = ex2; }else{ volatile double ez2 = exp(-z * z); volatile double ey2 = exp(4.0*PI*y * y); volatile double v1 = ey2*cos(y*z); volatile double v2 = ey2*cos(x*z); volatile double v3 = v1/ex2 + v2/ez2; voigt = fp*v3; } /*volatile double yy = y*y; volatile double xx = x*x; volatile double zz = z*z; volatile double v1 = exp(4.0*PI*yy - zz); volatile double v2 = cos(4.0*PI*y*z); volatile double v3 = exp(4.0*PI*yy - xx); volatile double v4 = exp(4.0*PI*yy - xx); voigt = v1*v2 + v3*v4; if(voigt != voigt){ voigt = 0.0; } */ //Exact form cs_val+= g_abscoef[i]*voigt; //x = cs_val + y; //correction = (x - cs_val) - y; //cs_val=x; } //Store results into shared memory //l_cs_result[l_idx] = cs_val; //l_correction[l_idx] = correction; //cs_val //Wait for everone to finish nicely __syncthreads(); //if(l_idx == 0){ //cs_val = g_cs[start_idx+b_idx]; //correction = 0; //for(int i = 0; i < VOIGT_BLOCK; i++) // correction += l_correction[i]; /*for(int i = 0; i < VOIGT_BLOCK; i++){ y = l_cs_result[i] - correction; x = cs_val + y; correction = (x - cs_val) - y; cs_val=x; }*/ //} aggregate = BlockReduce(temp_storage).Sum(cs_val); if(l_idx==0)g_cs[start_idx+b_idx]+=aggregate*cross_constants.dfreq; //cs_val; } __global__ void device_compute_voigt_II(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const int N,const int N_ener,const int start_idx){ //The stored shared data //typedef cub::WarpReduce<int> WarpReduceI; __shared__ double l_nu[VOIGT_SHARED_SIZE]; __shared__ double l_abscoef[VOIGT_SHARED_SIZE]; __shared__ double l_gamma[VOIGT_SHARED_SIZE]; //__shared__ int l_leave[VOIGT_BLOCK]; //__shared__ int l_continue[VOIGT_BLOCK]; //typedef cub::BlockReduce<int, VOIGT_BLOCK> BlockReduce; //__shared__ typename BlockReduce::TempStorage temp_storage; //volatile __shared__ double l_abcissa[SHARED_SIZE]; //volatile __shared__ double l_abcissa[SHARED_SIZE]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG; double dpwcoeff = temperature*cross_constants.dpwcoeff; // int leave = 0; // int continue_cal = 0; double x,y; if(g_idx < N){ freq = g_freq[start_idx+g_idx]; //cs_val = g_cs[start_idx+g_idx]; } //if(g_idx==9999) printf("%12.6f\n",freq); for(int i = 0; i < N_ener; i+=VOIGT_SHARED_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; //leave=1; int w_idx = i + l_idx; //l_leave[l_idx] = 0; // if(w_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; //l_leave[l_idx] = 1; //l_continue[l_idx] = 1; //dfreq_ = freq-nu; /* if(dfreq_ < -LORENTZ_CUTOFF){ l_leave[l_idx] = 0; }else if (dfreq_ > LORENTZ_CUTOFF){ l_continue[l_idx] = 0; }else{ //Do we have any transitions within the warp range? //if(warpAllReduceSum(leave)==WARP_SIZE) // continue; }*/ l_abscoef[l_idx] = g_abscoef[w_idx]; l_gamma[l_idx] = g_gamma[w_idx]; } //if(BlockReduce(temp_storage).Sum(leave)==0) // break; __syncthreads(); /*leave = 0; continue_cal = 0; for(int j = 0; j < VOIGT_BLOCK; j++){ continue_cal += l_continue[j]; leave+=l_leave[j]; } if(leave == 0) break; if(continue_cal==0) continue; */ for(int j = 0; j < VOIGT_SHARED_SIZE; j++){ nu = l_nu[j]; dfreq_ = freq-nu; if(dfreq_ < -LORENTZ_CUTOFF) break; if(dfreq_ > LORENTZ_CUTOFF) continue; //Do we have any transitions left within the warp range? //if(dfreq_>-lorentz_cutoff){ // l_abscoef[l_idx] = 0.0; // leave=0; // //break; //} gammaG= SQRTLN2/(nu*dpwcoeff); x =abs(dfreq_)*gammaG; y =l_gamma[j]*gammaG; cs_val+=l_abscoef[j]*humlickf(x,y)*gammaG; //*__expf(temp_3*dfreq_*dfreq_); } //if(WarpReduceI(leave_warp[warp_id]).Sum(leave)==31) // break; __syncthreads(); } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val*ISQRTPI; } __global__ void device_compute_voigt_quad(const double* g_freq, double* g_cs,const double* g_nu,const double* g_abscoef,const double* g_gamma,const double temperature,const double pressure,const double lorentz_cutoff,const int N,const int N_ener,const int start_idx){ //The stored shared data typedef cub::WarpReduce<int> WarpReduceI; typedef cub::WarpReduce<double> WarpReduceD; volatile __shared__ double l_nu[SHARED_SIZE]; volatile __shared__ double l_abscoef[SHARED_SIZE]; volatile __shared__ double l_gamma[SHARED_SIZE]; __shared__ typename WarpReduceI::TempStorage leave_warp[4]; volatile __shared__ double l_abcissa[SHARED_SIZE]; volatile __shared__ double l_weight[SHARED_SIZE]; __shared__ typename WarpReduceD::TempStorage cs_warp[4]; //__shared__ int leave[BLOCK_SIZE]; int g_idx = blockIdx.x * blockDim.x + threadIdx.x; //__shared__ double l_cs_result[BLOCK_SIZE]; //Get the global and local thread number //int b_idx = blockIdx.x; int l_idx = threadIdx.x; int w_idx; int b_start = (threadIdx.x/32)*32; int warp_id = threadIdx.x/32; double cs_val = 0.0; double dfreq_=0.0; double freq = 0.0; double nu = 0.0; double gammaG; double warp_cs = 0.0; double dpwcoeff = sqrt(2.0*BOLTZ*temperature*NA/((cross_constants.mean_mass)))/VELLGT; int leave = 0; double x,y; if(g_idx < N){ freq = g_freq[start_idx+g_idx]; //cs_val = g_cs[start_idx+g_idx]; } //if(g_idx==9999) printf("%12.6f\n",freq); for(int i = 0; i < N_ener; i+=WARP_SIZE){ l_nu[l_idx] = 1e100; l_abscoef[l_idx] = 0.0; leave=0; w_idx = i + l_idx; if(i + l_idx < N_ener) { l_nu[l_idx] = g_nu[w_idx]; dfreq_ = freq-nu; if(dfreq_ > DOPPLER_CUTOFF){ leave = 1; } //Do we have any transitions within the warp range? if(WarpReduceI(leave_warp[warp_id]).Sum(leave)==WARP_SIZE) continue; l_abscoef[l_idx] = g_abscoef[w_idx]; l_gamma[l_idx] = g_gamma[w_idx]; } leave = 0; for(int j = 0; j < WARP_SIZE; j++){ warp_cs = 0.0; nu = l_nu[b_start+j]; dfreq_ = freq-nu; //if(dfreq_ > DOPPLER_CUTOFF) // continue; //Do we have any transitions left within the warp range? if(dfreq_<-lorentz_cutoff){ //l_abscoef[l_idx] = 0.0; leave=1; //break; } gammaG= SQRTLN2/(nu*dpwcoeff); x =abs(dfreq_)*gammaG; y =l_gamma[b_start+j]*gammaG; cs_val+=l_abscoef[b_start+j]*humlick(x,y)*gammaG*ISQRTPI; //*__expf(temp_3*dfreq_*dfreq_); } if(WarpReduceI(leave_warp[warp_id]).Sum(leave)>=WARP_SIZE) break; //__syncthreads(); } if(g_idx < N) g_cs[start_idx+g_idx]+=cs_val; } //Lorentzian __host__ void gpu_compute_lorentzian_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double temp,double part,double hw, int Npoints,int N_ener,int start_idx,cudaStream_t stream){ int blockSize = 1024; int gridSize = (int)ceil((float)N_ener/blockSize); device_compute_abscoefs<<<gridSize,blockSize,0,stream>>>(g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); blockSize = SHARED_SIZE; gridSize = (int)ceil((float)Npoints/blockSize); device_compute_lorentzian<<<gridSize,blockSize,0,stream>>>(g_freq, g_intens,g_nu,g_abs,hw,Npoints,N_ener,start_idx); } //Gaussian __host__ void gpu_compute_gaussian_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double temp,double part,double hw, int Npoints,int N_ener,int start_idx,cudaStream_t stream){ } //Doppler __host__ void gpu_compute_doppler_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double temp,double part, int Npoints,int N_ener,int start_idx,cudaStream_t stream){ int blockSize = 1024; int gridSize = (int)ceil((float)N_ener/blockSize); device_compute_abscoefs<<<gridSize,blockSize,0,stream>>>(g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); blockSize = DOPPLER_SIZE; gridSize = Npoints;//(int)ceil((float)Npoints/blockSize); cudaFuncSetCacheConfig(device_compute_doppler_block, cudaFuncCachePreferL1); device_compute_doppler_block<<<gridSize,blockSize,0,stream>>>(g_freq, g_intens,g_nu,g_abs,sqrt(temp),Npoints,N_ener,start_idx); } //Voigt __host__ void gpu_compute_voigt_profile(double* g_freq, double* g_intens, double* g_energies, double* g_nu, int* g_gns,double* g_aif,double* g_abs,double* g_gamma,double* g_n ,double temp,double press,double part,int Npoints,int N_ener,int start_idx,cudaStream_t stream){ int blockSize = 1024; int gridSize = (int)ceil((float)N_ener/(float)blockSize); device_compute_abscoefs<<<gridSize,blockSize,0,stream>>>(g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); device_compute_pressure<<<gridSize,blockSize,0,stream>>>(g_gamma,g_n ,temp,press,N_ener); //device_compute_abscoefs<<<gridSize,blockSize,0,stream>>>(g_energies,g_gns,g_nu,g_aif,g_abs,temp,part,N_ener); //blockSize = VOIGT_SHARED_SIZE; //gridSize = (int)ceil((float)Npoints/(float)blockSize); //device_compute_voigt_II<<<gridSize,blockSize,0,stream>>>(g_freq, g_intens,g_nu,g_abs,g_gamma,sqrt(temp),press,Npoints,N_ener,start_idx); // blockSize = VOIGT_BLOCK; gridSize = Npoints; //cudaDeviceSetSharedMemConfig() cudaFuncSetCacheConfig(device_compute_voigt, cudaFuncCachePreferL1); device_compute_voigt<<<gridSize,blockSize,0,stream>>>(g_freq, g_intens,g_nu,g_abs,g_gamma,sqrt(temp),press,Npoints,N_ener,start_idx); }
7386c8df81a8689c2a17821eeaff69a78d678ff1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float)threadIdx.x; } int main(int argc, char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; nblocks = 2; nthreads = 8; nsize = nblocks*nthreads; h_x = (float *)malloc(nsize*sizeof(float)); hipMalloc((void **)&d_x, nsize*sizeof(float)); my_first_kernel << <nblocks, nthreads >> >(d_x); hipMemcpy(h_x, d_x, nsize*sizeof(float), hipMemcpyDeviceToHost); for (n = 0; n<nsize; n++) printf(" n, x = %d %f \n", n, h_x[n]); hipFree(d_x); free(h_x); return 0; } // Output: /* n, x = 0 0.0 n, x = 1 1.0 n, x = 2 2.0 n, x = 3 3.0 n, x = 4 4.0 n, x = 5 5.0 n, x = 6 6.0 n, x = 7 7.0 n, x = 8 0.0 n, x = 9 1.0 n, x = 10 2.0 n, x = 11 3.0 n, x = 12 4.0 n, x = 13 5.0 n, x = 14 6.0 n, x = 15 7.0 */
7386c8df81a8689c2a17821eeaff69a78d678ff1.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x*blockIdx.x; x[tid] = (float)threadIdx.x; } int main(int argc, char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; nblocks = 2; nthreads = 8; nsize = nblocks*nthreads; h_x = (float *)malloc(nsize*sizeof(float)); cudaMalloc((void **)&d_x, nsize*sizeof(float)); my_first_kernel << <nblocks, nthreads >> >(d_x); cudaMemcpy(h_x, d_x, nsize*sizeof(float), cudaMemcpyDeviceToHost); for (n = 0; n<nsize; n++) printf(" n, x = %d %f \n", n, h_x[n]); cudaFree(d_x); free(h_x); return 0; } // Output: /* n, x = 0 0.0 n, x = 1 1.0 n, x = 2 2.0 n, x = 3 3.0 n, x = 4 4.0 n, x = 5 5.0 n, x = 6 6.0 n, x = 7 7.0 n, x = 8 0.0 n, x = 9 1.0 n, x = 10 2.0 n, x = 11 3.0 n, x = 12 4.0 n, x = 13 5.0 n, x = 14 6.0 n, x = 15 7.0 */
d37c642c29b490312de1b976fee148eaf17eb37d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Implements the Landau kernel */ #include <petscconf.h> #include <petsc/private/dmpleximpl.h> /*I "dmpleximpl.h" I*/ #include <petsclandau.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/aij/seq/aij.h> #include <petscmat.h> #include <petsccublas.h> // hack to avoid configure problems in CI. Delete when resolved #if !defined (PETSC_HAVE_CUDA_ATOMIC) #define atomicAdd(e, f) (*e) += f #endif #define PETSC_DEVICE_FUNC_DECL __device__ #include "../land_tensors.h" #include <petscaijdevice.h> #define CHECK_LAUNCH_ERROR() \ do { \ /* Check synchronous errors, i.e. pre-launch */ \ hipError_t err = hipGetLastError(); \ if (hipSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \ } \ /* Check asynchronous errors, i.e. kernel failed (ULF) */ \ err = hipDeviceSynchronize(); \ if (hipSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \ } \ } while (0) PETSC_EXTERN PetscErrorCode LandauCUDACreateMatMaps(P4estVertexMaps *maps, pointInterpolationP4est (*points)[LANDAU_MAX_Q_FACE], PetscInt Nf, PetscInt Nq) { P4estVertexMaps h_maps; hipError_t cerr; PetscFunctionBegin; h_maps.num_elements =maps->num_elements; h_maps.num_face = maps->num_face; h_maps.num_reduced = maps->num_reduced; h_maps.deviceType = maps->deviceType; h_maps.Nf = Nf; h_maps.Nq = Nq; cerr = hipMalloc((void **)&h_maps.c_maps, maps->num_reduced * sizeof *points);CHKERRCUDA(cerr); cerr = hipMemcpy( h_maps.c_maps, maps->c_maps, maps->num_reduced * sizeof *points, hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMalloc((void **)&h_maps.gIdx, maps->num_elements * sizeof *maps->gIdx);CHKERRCUDA(cerr); cerr = hipMemcpy( h_maps.gIdx, maps->gIdx, maps->num_elements * sizeof *maps->gIdx, hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMalloc((void **)&maps->data, sizeof(P4estVertexMaps));CHKERRCUDA(cerr); cerr = hipMemcpy( maps->data, &h_maps, sizeof(P4estVertexMaps), hipMemcpyHostToDevice);CHKERRCUDA(cerr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode LandauCUDADestroyMatMaps(P4estVertexMaps *pMaps) { P4estVertexMaps *d_maps = pMaps->data, h_maps; hipError_t cerr; PetscFunctionBegin; cerr = hipMemcpy(&h_maps, d_maps, sizeof(P4estVertexMaps), hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipFree(h_maps.c_maps);CHKERRCUDA(cerr); cerr = hipFree(h_maps.gIdx);CHKERRCUDA(cerr); cerr = hipFree(d_maps);CHKERRCUDA(cerr); PetscFunctionReturn(0); } // The GPU Landau kernel // __global__ void landau_form_fdf(const PetscInt nip, const PetscInt dim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJ_a[], const PetscReal * const BB, const PetscReal * const DD, LandauIPReal *IPDataRaw, LandauIPReal d_f[], LandauIPReal d_dfdx[], LandauIPReal d_dfdy[], #if LANDAU_DIM==3 LandauIPReal d_dfdz[], #endif PetscErrorCode *ierr) // output { const PetscInt Nq = blockDim.x, myelem = blockIdx.x; const PetscInt myQi = threadIdx.x; const PetscInt jpidx = myQi + myelem * Nq; const PetscReal *invJ = &invJ_a[jpidx*dim*dim]; const PetscReal *Bq = &BB[myQi*Nb], *Dq = &DD[myQi*Nb*dim]; // un pack IPData LandauIPReal *IPData_coefs = &IPDataRaw[nip*(dim+1)]; LandauIPReal *coef = &IPData_coefs[myelem*Nb*Nf]; PetscInt f,d,b,e; PetscScalar u_x[LANDAU_MAX_SPECIES][LANDAU_DIM]; *ierr = 0; /* get f and df */ for (f = 0; f < Nf; ++f) { PetscScalar refSpaceDer[LANDAU_DIM]; d_f[jpidx + f*nip] = 0.0; for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0; for (b = 0; b < Nb; ++b) { const PetscInt cidx = b; d_f[jpidx + f*nip] += Bq[cidx]*coef[f*Nb+cidx]; for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx*dim+d]*coef[f*Nb+cidx]; } for (d = 0; d < dim; ++d) { for (e = 0, u_x[f][d] = 0.0; e < dim; ++e) { u_x[f][d] += invJ[e*dim+d]*refSpaceDer[e]; } } } for (f=0;f<Nf;f++) { d_dfdx[jpidx + f*nip] = PetscRealPart(u_x[f][0]); d_dfdy[jpidx + f*nip] = PetscRealPart(u_x[f][1]); #if LANDAU_DIM==3 d_dfdz[jpidx + f*nip] = PetscRealPart(u_x[f][2]); #endif } } __device__ void landau_inner_integral_v2(const PetscInt myQi, const PetscInt jpidx, PetscInt nip, const PetscInt Nq, const PetscInt Nf, const PetscInt Nb, const PetscInt dim, LandauIPReal *IPDataRaw, const PetscReal invJj[], const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal * const BB, const PetscReal * const DD, PetscScalar *elemMat, P4estVertexMaps *d_maps, PetscSplitCSRDataStructure *d_mat, // output PetscScalar fieldMats[][LANDAU_MAX_NQ], // all these arrays are in shared memory PetscReal g2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal g3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal gg2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal gg3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_nu_alpha[], PetscReal s_nu_beta[], PetscReal s_invMass[], PetscReal s_f[], PetscReal s_dfx[], PetscReal s_dfy[], LandauIPReal d_f[], LandauIPReal d_dfdx[], LandauIPReal d_dfdy[], // global memory #if LANDAU_DIM==3 PetscReal s_dfz[], LandauIPReal d_dfdz[], #endif PetscReal d_mass_w[], PetscReal shift, PetscInt myelem, PetscErrorCode *ierr) { int delta,d,f,g,d2,dp,d3,fieldA,ipidx_b,nip_pad = nip; // vectorization padding not supported; *ierr = 0; if (!d_mass_w) { // get g2 & g3 PetscReal gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM]; LandauIPData IPData; // create g2 & g3 for (f=threadIdx.x; f<Nf; f+=blockDim.x) { for (d=0;d<dim;d++) { // clear accumulation data D & K gg2[d][myQi][f] = 0; for (d2=0;d2<dim;d2++) gg3[d][d2][myQi][f] = 0; } } if (threadIdx.y == 0) { for (int i = threadIdx.x; i < Nf; i += blockDim.x) { s_nu_alpha[i] = nu_alpha[i]; s_nu_beta[i] = nu_beta[i]; s_invMass[i] = invMass[i]; } } for (d2 = 0; d2 < dim; d2++) { gg2_temp[d2] = 0; for (d3 = 0; d3 < dim; d3++) { gg3_temp[d2][d3] = 0; } } __syncthreads(); // un pack IPData IPData.w = IPDataRaw; IPData.x = IPDataRaw + 1*nip_pad; IPData.y = IPDataRaw + 2*nip_pad; IPData.z = IPDataRaw + 3*nip_pad; for (ipidx_b = 0; ipidx_b < nip; ipidx_b += blockDim.x) { const PetscReal vj[3] = {IPData.x[jpidx], IPData.y[jpidx], IPData.z ? IPData.z[jpidx] : 0}; int ipidx = ipidx_b + threadIdx.x; __syncthreads(); if (ipidx < nip) { for (fieldA = threadIdx.y; fieldA < Nf; fieldA += blockDim.y) { s_f [fieldA*blockDim.x+threadIdx.x] = d_f[ipidx + fieldA*nip_pad]; s_dfx[fieldA*blockDim.x+threadIdx.x] = d_dfdx[ipidx + fieldA*nip_pad]; s_dfy[fieldA*blockDim.x+threadIdx.x] = d_dfdy[ipidx + fieldA*nip_pad]; #if LANDAU_DIM==3 s_dfz[fieldA*blockDim.x+threadIdx.x] = d_dfdz[ipidx + fieldA*nip_pad]; #endif } } __syncthreads(); if (ipidx < nip) { const PetscReal wi = IPData.w[ipidx], x = IPData.x[ipidx], y = IPData.y[ipidx]; PetscReal temp1[3] = {0, 0, 0}, temp2 = 0; #if LANDAU_DIM==2 PetscReal Ud[2][2], Uk[2][2]; LandauTensor2D(vj, x, y, Ud, Uk, (ipidx==jpidx) ? 0. : 1.); #else PetscReal U[3][3], z = IPData.z[ipidx]; LandauTensor3D(vj, x, y, z, U, (ipidx==jpidx) ? 0. : 1.); #endif for (fieldA = 0; fieldA < Nf; fieldA++) { temp1[0] += s_dfx[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA]; temp1[1] += s_dfy[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA]; #if LANDAU_DIM==3 temp1[2] += s_dfz[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA]; #endif temp2 += s_f [fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]; } temp1[0] *= wi; temp1[1] *= wi; #if LANDAU_DIM==3 temp1[2] *= wi; #endif temp2 *= wi; #if LANDAU_DIM==2 for (d2 = 0; d2 < 2; d2++) { for (d3 = 0; d3 < 2; ++d3) { /* K = U * grad(f): g2=e: i,A */ gg2_temp[d2] += Uk[d2][d3]*temp1[d3]; /* D = -U * (I \kron (fx)): g3=f: i,j,A */ gg3_temp[d2][d3] += Ud[d2][d3]*temp2; } } #else for (d2 = 0; d2 < 3; ++d2) { for (d3 = 0; d3 < 3; ++d3) { /* K = U * grad(f): g2 = e: i,A */ gg2_temp[d2] += U[d2][d3]*temp1[d3]; /* D = -U * (I \kron (fx)): g3 = f: i,j,A */ gg3_temp[d2][d3] += U[d2][d3]*temp2; } } #endif } } /* IPs */ /* reduce gg temp sums across threads */ for (delta = blockDim.x/2; delta > 0; delta /= 2) { for (d2 = 0; d2 < dim; d2++) { gg2_temp[d2] += __shfl_xor_sync(0xffffffff, gg2_temp[d2], delta, blockDim.x); for (d3 = 0; d3 < dim; d3++) { gg3_temp[d2][d3] += __shfl_xor_sync(0xffffffff, gg3_temp[d2][d3], delta, blockDim.x); } } } // add alpha and put in gg2/3 for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) { for (d2 = 0; d2 < dim; d2++) { gg2[d2][myQi][fieldA] += gg2_temp[d2]*s_nu_alpha[fieldA]; for (d3 = 0; d3 < dim; d3++) { gg3[d2][d3][myQi][fieldA] -= gg3_temp[d2][d3]*s_nu_alpha[fieldA]*s_invMass[fieldA]; } } } __syncthreads(); /* add electric field term once per IP */ for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) { gg2[dim-1][myQi][fieldA] += Eq_m[fieldA]; } __syncthreads(); //intf("%d %d gg2[1][1]=%g\n",myelem,qj_start,gg2[1][dim-1]); /* Jacobian transform - g2 */ for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) { PetscReal wj = IPData.w[jpidx]; for (d = 0; d < dim; ++d) { g2[d][myQi][fieldA] = 0.0; for (d2 = 0; d2 < dim; ++d2) { g2[d][myQi][fieldA] += invJj[d*dim+d2]*gg2[d2][myQi][fieldA]; g3[d][d2][myQi][fieldA] = 0.0; for (d3 = 0; d3 < dim; ++d3) { for (dp = 0; dp < dim; ++dp) { g3[d][d2][myQi][fieldA] += invJj[d*dim + d3]*gg3[d3][dp][myQi][fieldA]*invJj[d2*dim + dp]; } } g3[d][d2][myQi][fieldA] *= wj; } g2[d][myQi][fieldA] *= wj; } } __syncthreads(); // Synchronize (ensure all the data is available) and sum IP matrices } // !mass_w /* FE matrix construction */ { int fieldA,d,qj,d2,q,idx,totDim=Nb*Nf; /* assemble */ for (fieldA = 0; fieldA < Nf; fieldA++) { if (fieldMats) { for (f = threadIdx.y; f < Nb ; f += blockDim.y) { for (g = threadIdx.x; g < Nb; g += blockDim.x) { fieldMats[f][g] = 0; } } } for (f = threadIdx.y; f < Nb ; f += blockDim.y) { const PetscInt i = fieldA*Nb + f; /* Element matrix row */ for (g = threadIdx.x; g < Nb; g += blockDim.x) { const PetscInt j = fieldA*Nb + g; /* Element matrix column */ const PetscInt fOff = i*totDim + j; PetscScalar t = elemMat ? elemMat[fOff] : fieldMats[f][g]; for (qj = 0 ; qj < Nq ; qj++) { const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim]; if (!d_mass_w) { for (d = 0; d < dim; ++d) { t += DIq[f*dim+d]*g2[d][qj][fieldA]*BJq[g]; for (d2 = 0; d2 < dim; ++d2) { t += DIq[f*dim + d]*g3[d][d2][qj][fieldA]*DIq[g*dim + d2]; } } } else { const PetscInt jpidx = qj + myelem * Nq; t += BJq[f] * d_mass_w[jpidx]*shift * BJq[g]; } } if (elemMat) elemMat[fOff] = t; else fieldMats[f][g] = t; } } if (fieldMats) { PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]; PetscReal row_scale[LANDAU_MAX_Q_FACE],col_scale[LANDAU_MAX_Q_FACE]; PetscInt nr,nc,rows0[LANDAU_MAX_Q_FACE],cols0[LANDAU_MAX_Q_FACE],rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE]; const LandauIdx *const Idxs = &d_maps->gIdx[myelem][fieldA][0]; for (f = threadIdx.y; f < Nb ; f += blockDim.y) { idx = Idxs[f]; if (idx >= 0) { nr = 1; rows0[0] = idx; row_scale[0] = 1.; } else { idx = -idx - 1; nr = d_maps->num_face; for (q = 0; q < d_maps->num_face; q++) { rows0[q] = d_maps->c_maps[idx][q].gid; row_scale[q] = d_maps->c_maps[idx][q].scale; } } for (g = threadIdx.x; g < Nb; g += blockDim.x) { idx = Idxs[g]; if (idx >= 0) { nc = 1; cols0[0] = idx; col_scale[0] = 1.; } else { idx = -idx - 1; nc = d_maps->num_face; for (q = 0; q < d_maps->num_face; q++) { cols0[q] = d_maps->c_maps[idx][q].gid; col_scale[q] = d_maps->c_maps[idx][q].scale; } } for (q = 0; q < nr; q++) rows[q] = rows0[q]; for (q = 0; q < nc; q++) cols[q] = cols0[q]; for (q = 0; q < nr; q++) { for (d = 0; d < nc; d++) { vals[q*nc + d] = row_scale[q]*col_scale[d]*fieldMats[f][g]; } } MatSetValuesDevice(d_mat,nr,rows,nc,cols,vals,ADD_VALUES,ierr); if (*ierr) return; } } } } } } // // The GPU Landau kernel // __global__ void __launch_bounds__(256,1) landau_kernel_v2(const PetscInt nip, const PetscInt dim, const PetscInt totDim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJj[], const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal * const BB, const PetscReal * const DD, LandauIPReal *IPDataRaw, PetscScalar elemMats_out[], P4estVertexMaps *d_maps, PetscSplitCSRDataStructure *d_mat, LandauIPReal d_f[], LandauIPReal d_dfdx[], LandauIPReal d_dfdy[], #if LANDAU_DIM==3 LandauIPReal d_dfdz[], #endif PetscReal d_mass_w[], PetscReal shift, PetscErrorCode *ierr) { const PetscInt Nq = blockDim.y, myelem = blockIdx.x; extern __shared__ PetscReal smem[]; int size = 0; PetscReal (*g2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = // shared mem not needed when mass_w (PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM; PetscReal (*g3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES; PetscReal (*gg2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM; PetscReal (*gg3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES; PetscReal *s_nu_alpha = &smem[size]; size += LANDAU_MAX_SPECIES; PetscReal *s_nu_beta = &smem[size]; size += LANDAU_MAX_SPECIES; PetscReal *s_invMass = &smem[size]; size += LANDAU_MAX_SPECIES; PetscReal *s_f = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; PetscReal *s_dfx = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; PetscReal *s_dfy = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; #if LANDAU_DIM==3 PetscReal *s_dfz = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; #endif PetscScalar (*fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ] = d_maps ? (PetscScalar (*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) &smem[size] : NULL; if (d_maps) size += LANDAU_MAX_NQ*LANDAU_MAX_NQ; const PetscInt myQi = threadIdx.y; const PetscInt jpidx = myQi + myelem * Nq; //const PetscInt subblocksz = nip/nSubBlks + !!(nip%nSubBlks), ip_start = mySubBlk*subblocksz, ip_end = (mySubBlk+1)*subblocksz > nip ? nip : (mySubBlk+1)*subblocksz; /* this could be wrong with very few global IPs */ PetscScalar *elemMat = elemMats_out ? &elemMats_out[myelem*totDim*totDim] : NULL; /* my output */ int tid = threadIdx.x + threadIdx.y*blockDim.x; const PetscReal *invJ = invJj ? &invJj[jpidx*dim*dim] : NULL; if (elemMat) for (int i = tid; i < totDim*totDim; i += blockDim.x*blockDim.y) elemMat[i] = 0; __syncthreads(); landau_inner_integral_v2(myQi, jpidx, nip, Nq, Nf, Nb, dim, IPDataRaw, invJ, nu_alpha, nu_beta, invMass, Eq_m, BB, DD, elemMat, d_maps, d_mat, *fieldMats, *g2, *g3, *gg2, *gg3, s_nu_alpha, s_nu_beta, s_invMass, s_f, s_dfx, s_dfy, d_f, d_dfdx, d_dfdy, #if LANDAU_DIM==3 s_dfz, d_dfdz, #endif d_mass_w, shift, myelem, ierr); /* compact */ } PetscErrorCode LandauCUDAJacobian(DM plex, const PetscInt Nq, const PetscReal nu_alpha[],const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const LandauIPData *const IPData, const PetscReal invJj[], PetscReal *mass_w, PetscReal shift, const PetscLogEvent events[], Mat JacP) { PetscErrorCode ierr,*d_ierr; hipError_t cerr; PetscInt ii,ej,*Nbf,Nb,nip_dim2,cStart,cEnd,Nf,dim,numGCells,totDim,nip,szf=sizeof(LandauIPReal),ipdatasz; PetscReal *d_BB,*d_DD,*d_invJj=NULL,*d_nu_alpha,*d_nu_beta,*d_invMass,*d_Eq_m,*d_mass_w=NULL; PetscScalar *d_elemMats=NULL; LandauIPReal *d_f=NULL, *d_dfdx=NULL, *d_dfdy=NULL; #if LANDAU_DIM==3 PetscScalar *d_dfdz=NULL; #endif PetscTabulation *Tf; PetscDS prob; PetscSection section, globalSection; LandauIPReal *d_IPDataRaw=NULL; LandauCtx *ctx; PetscSplitCSRDataStructure *d_mat=NULL; P4estVertexMaps *h_maps, *d_maps=NULL; int nnn = 256/Nq; PetscFunctionBegin; while (nnn & nnn - 1) nnn = nnn & nnn - 1; if (nnn>16) nnn = 16; ierr = PetscLogEventBegin(events[3],0,0,0,0);CHKERRQ(ierr); ierr = DMGetDimension(plex, &dim);CHKERRQ(ierr); if (dim!=LANDAU_DIM) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "LANDAU_DIM %D != dim %d",LANDAU_DIM,dim); ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr); numGCells = cEnd - cStart; nip = numGCells*Nq; /* length of inner global iteration */ ierr = DMGetDS(plex, &prob);CHKERRQ(ierr); ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr); ierr = PetscDSGetDimensions(prob, &Nbf);CHKERRQ(ierr); Nb = Nbf[0]; if (Nq != Nb) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Nq != Nb. %D %D",Nq,Nb); ierr = PetscDSGetTotalDimension(prob, &totDim);CHKERRQ(ierr); ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr); ierr = DMGetLocalSection(plex, &section);CHKERRQ(ierr); ierr = DMGetGlobalSection(plex, &globalSection);CHKERRQ(ierr); // create data cerr = hipMalloc((void **)&d_BB, Nq*Nb*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMemcpy( d_BB, Tf[0]->T[0], Nq*Nb*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMalloc((void **)&d_DD, Nq*Nb*dim*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMemcpy( d_DD, Tf[0]->T[1], Nq*Nb*dim*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); nip_dim2 = Nq*numGCells*dim*dim; if (mass_w) { cerr = hipMalloc((void **)&d_mass_w, nip*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMemcpy( d_mass_w, mass_w,nip*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); } else { ipdatasz = LandauGetIPDataSize(IPData); cerr = hipMalloc((void **)&d_IPDataRaw,ipdatasz*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMemcpy(d_IPDataRaw, IPData->w, ipdatasz*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); // assumes IPData starts with 'w' cerr = hipMalloc((void **)&d_nu_alpha, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMalloc((void **)&d_nu_beta, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMalloc((void **)&d_invMass, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMalloc((void **)&d_Eq_m, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMemcpy(d_nu_alpha, nu_alpha, Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(d_nu_beta, nu_beta, Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(d_invMass, invMass, Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = hipMemcpy(d_Eq_m, Eq_m, Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); // f and df cerr = hipMalloc((void **)&d_f, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMalloc((void **)&d_dfdx, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMalloc((void **)&d_dfdy, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input #if LANDAU_DIM==3 cerr = hipMalloc((void **)&d_dfdz, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input #endif // collect geometry cerr = hipMalloc((void **)&d_invJj, nip_dim2*szf);CHKERRCUDA(cerr); // kernel input cerr = hipMemcpy(d_invJj, invJj, nip_dim2*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = DMGetApplicationContext(plex, &ctx);CHKERRQ(ierr); if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); if (ctx->gpu_assembly) { PetscContainer container; ierr = PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);CHKERRQ(ierr); if (container) { // not here first call ierr = PetscContainerGetPointer(container, (void **) &h_maps);CHKERRQ(ierr); if (h_maps->data) { d_maps = h_maps->data; if (!d_maps) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata"); } else { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata in container"); } // this does the setup the first time called ierr = MatCUSPARSEGetDeviceMatWrite(JacP,&d_mat);CHKERRQ(ierr); } else { cerr = hipMalloc((void **)&d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar));CHKERRCUDA(cerr); // kernel output - first call is on CPU } } else { cerr = hipMalloc((void **)&d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar));CHKERRCUDA(cerr); // kernel output - no GPU assembly } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[3],0,0,0,0);CHKERRQ(ierr); cerr = hipMalloc((void **)&d_ierr, sizeof(ierr));CHKERRCUDA(cerr); // kernel input if (!mass_w) { // form f and df dim3 dimBlock(Nq,1); ierr = PetscLogEventBegin(events[8],0,0,0,0);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ii = 0; // PetscPrintf(PETSC_COMM_SELF, "numGCells=%d dim.x=%d Nq=%d nThreads=%d, %d kB shared mem\n",numGCells,n,Nq,Nq*n,ii*szf/1024); hipLaunchKernelGGL(( landau_form_fdf), dim3(numGCells),dim3(dimBlock),ii*szf, 0, nip, dim, Nf, Nb, d_invJj, d_BB, d_DD, d_IPDataRaw, d_f, d_dfdx, d_dfdy, #if LANDAU_DIM==3 d_dfdz, #endif d_ierr); CHECK_LAUNCH_ERROR(); ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(2*Nb*(1+dim)));CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); cerr = hipMemcpy(&ierr, d_ierr, sizeof(ierr), hipMemcpyDeviceToHost);CHKERRCUDA(cerr); CHKERRQ(ierr); ierr = PetscLogEventEnd(events[8],0,0,0,0);CHKERRQ(ierr); } ierr = PetscLogEventBegin(events[4],0,0,0,0);CHKERRQ(ierr); { dim3 dimBlock(nnn,Nq); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(mass_w ? (nip*(11*Nf+ 4*dim*dim) + 6*Nf*dim*dim*dim + 10*Nf*dim*dim + 4*Nf*dim + Nb*Nf*Nb*Nq*dim*dim*5) : Nb*Nf*Nb*Nq*4));CHKERRQ(ierr); ii = 2*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM*(1+LANDAU_DIM) + 3*LANDAU_MAX_SPECIES + (1+LANDAU_DIM)*dimBlock.x*LANDAU_MAX_SPECIES; ii += (LANDAU_MAX_NQ*LANDAU_MAX_NQ)*LANDAU_MAX_SPECIES; if (ii*szf >= 49152) { cerr = hipFuncSetAttribute(landau_kernel_v2, hipFuncAttributeMaxDynamicSharedMemorySize, 98304);CHKERRCUDA(cerr); } // PetscPrintf(PETSC_COMM_SELF, "numGCells=%d dim.x=%d Nq=%d nThreads=%d, %d kB shared mem\n",numGCells,n,Nq,Nq*n,ii*szf/1024); hipLaunchKernelGGL(( landau_kernel_v2), dim3(numGCells),dim3(dimBlock),ii*szf, 0, nip,dim,totDim,Nf,Nb,d_invJj,d_nu_alpha,d_nu_beta,d_invMass,d_Eq_m, d_BB, d_DD, d_IPDataRaw, d_elemMats, d_maps, d_mat, d_f, d_dfdx, d_dfdy, #if LANDAU_DIM==3 d_dfdz, #endif d_mass_w, shift, d_ierr); CHECK_LAUNCH_ERROR(); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); //cerr = hipMemcpy(&ierr, d_ierr, sizeof(ierr), hipMemcpyDeviceToHost);CHKERRCUDA(cerr); //CHKERRQ(ierr); } cerr = hipFree(d_ierr);CHKERRCUDA(cerr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[4],0,0,0,0);CHKERRQ(ierr); // delete device data ierr = PetscLogEventBegin(events[5],0,0,0,0);CHKERRQ(ierr); cerr = hipFree(d_BB);CHKERRCUDA(cerr); cerr = hipFree(d_DD);CHKERRCUDA(cerr); if (mass_w) { cerr = hipFree(d_mass_w);CHKERRCUDA(cerr); } else { cerr = hipFree(d_IPDataRaw);CHKERRCUDA(cerr); cerr = hipFree(d_f);CHKERRCUDA(cerr); cerr = hipFree(d_dfdx);CHKERRCUDA(cerr); cerr = hipFree(d_dfdy);CHKERRCUDA(cerr); #if LANDAU_DIM==3 cerr = hipFree(d_dfdz);CHKERRCUDA(cerr); #endif cerr = hipFree(d_invJj);CHKERRCUDA(cerr); cerr = hipFree(d_nu_alpha);CHKERRCUDA(cerr); cerr = hipFree(d_nu_beta);CHKERRCUDA(cerr); cerr = hipFree(d_invMass);CHKERRCUDA(cerr); cerr = hipFree(d_Eq_m);CHKERRCUDA(cerr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[5],0,0,0,0);CHKERRQ(ierr); // First time assembly even with GPU assembly if (d_elemMats) { PetscScalar *elemMats=NULL,*elMat; ierr = PetscLogEventBegin(events[5],0,0,0,0);CHKERRQ(ierr); ierr = PetscMalloc1(totDim*totDim*numGCells,&elemMats);CHKERRQ(ierr); cerr = hipMemcpy(elemMats, d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar), hipMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = hipFree(d_elemMats);CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[5],0,0,0,0);CHKERRQ(ierr); ierr = PetscLogEventBegin(events[6],0,0,0,0);CHKERRQ(ierr); for (ej = cStart, elMat = elemMats ; ej < cEnd; ++ej, elMat += totDim*totDim) { ierr = DMPlexMatSetClosure(plex, section, globalSection, JacP, ej, elMat, ADD_VALUES);CHKERRQ(ierr); if (ej==-1) { int d,f; PetscPrintf(PETSC_COMM_SELF,"GPU Element matrix\n"); for (d = 0; d < totDim; ++d){ for (f = 0; f < totDim; ++f) PetscPrintf(PETSC_COMM_SELF," %12.5e", PetscRealPart(elMat[d*totDim + f])); PetscPrintf(PETSC_COMM_SELF,"\n"); } } } ierr = PetscFree(elemMats);CHKERRQ(ierr); ierr = PetscLogEventEnd(events[6],0,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); }
d37c642c29b490312de1b976fee148eaf17eb37d.cu
/* Implements the Landau kernel */ #include <petscconf.h> #include <petsc/private/dmpleximpl.h> /*I "dmpleximpl.h" I*/ #include <petsclandau.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/aij/seq/aij.h> #include <petscmat.h> #include <petsccublas.h> // hack to avoid configure problems in CI. Delete when resolved #if !defined (PETSC_HAVE_CUDA_ATOMIC) #define atomicAdd(e, f) (*e) += f #endif #define PETSC_DEVICE_FUNC_DECL __device__ #include "../land_tensors.h" #include <petscaijdevice.h> #define CHECK_LAUNCH_ERROR() \ do { \ /* Check synchronous errors, i.e. pre-launch */ \ cudaError_t err = cudaGetLastError(); \ if (cudaSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \ } \ /* Check asynchronous errors, i.e. kernel failed (ULF) */ \ err = cudaDeviceSynchronize(); \ if (cudaSuccess != err) { \ SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \ } \ } while (0) PETSC_EXTERN PetscErrorCode LandauCUDACreateMatMaps(P4estVertexMaps *maps, pointInterpolationP4est (*points)[LANDAU_MAX_Q_FACE], PetscInt Nf, PetscInt Nq) { P4estVertexMaps h_maps; cudaError_t cerr; PetscFunctionBegin; h_maps.num_elements =maps->num_elements; h_maps.num_face = maps->num_face; h_maps.num_reduced = maps->num_reduced; h_maps.deviceType = maps->deviceType; h_maps.Nf = Nf; h_maps.Nq = Nq; cerr = cudaMalloc((void **)&h_maps.c_maps, maps->num_reduced * sizeof *points);CHKERRCUDA(cerr); cerr = cudaMemcpy( h_maps.c_maps, maps->c_maps, maps->num_reduced * sizeof *points, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&h_maps.gIdx, maps->num_elements * sizeof *maps->gIdx);CHKERRCUDA(cerr); cerr = cudaMemcpy( h_maps.gIdx, maps->gIdx, maps->num_elements * sizeof *maps->gIdx, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&maps->data, sizeof(P4estVertexMaps));CHKERRCUDA(cerr); cerr = cudaMemcpy( maps->data, &h_maps, sizeof(P4estVertexMaps), cudaMemcpyHostToDevice);CHKERRCUDA(cerr); PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode LandauCUDADestroyMatMaps(P4estVertexMaps *pMaps) { P4estVertexMaps *d_maps = pMaps->data, h_maps; cudaError_t cerr; PetscFunctionBegin; cerr = cudaMemcpy(&h_maps, d_maps, sizeof(P4estVertexMaps), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaFree(h_maps.c_maps);CHKERRCUDA(cerr); cerr = cudaFree(h_maps.gIdx);CHKERRCUDA(cerr); cerr = cudaFree(d_maps);CHKERRCUDA(cerr); PetscFunctionReturn(0); } // The GPU Landau kernel // __global__ void landau_form_fdf(const PetscInt nip, const PetscInt dim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJ_a[], const PetscReal * const BB, const PetscReal * const DD, LandauIPReal *IPDataRaw, LandauIPReal d_f[], LandauIPReal d_dfdx[], LandauIPReal d_dfdy[], #if LANDAU_DIM==3 LandauIPReal d_dfdz[], #endif PetscErrorCode *ierr) // output { const PetscInt Nq = blockDim.x, myelem = blockIdx.x; const PetscInt myQi = threadIdx.x; const PetscInt jpidx = myQi + myelem * Nq; const PetscReal *invJ = &invJ_a[jpidx*dim*dim]; const PetscReal *Bq = &BB[myQi*Nb], *Dq = &DD[myQi*Nb*dim]; // un pack IPData LandauIPReal *IPData_coefs = &IPDataRaw[nip*(dim+1)]; LandauIPReal *coef = &IPData_coefs[myelem*Nb*Nf]; PetscInt f,d,b,e; PetscScalar u_x[LANDAU_MAX_SPECIES][LANDAU_DIM]; *ierr = 0; /* get f and df */ for (f = 0; f < Nf; ++f) { PetscScalar refSpaceDer[LANDAU_DIM]; d_f[jpidx + f*nip] = 0.0; for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0; for (b = 0; b < Nb; ++b) { const PetscInt cidx = b; d_f[jpidx + f*nip] += Bq[cidx]*coef[f*Nb+cidx]; for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx*dim+d]*coef[f*Nb+cidx]; } for (d = 0; d < dim; ++d) { for (e = 0, u_x[f][d] = 0.0; e < dim; ++e) { u_x[f][d] += invJ[e*dim+d]*refSpaceDer[e]; } } } for (f=0;f<Nf;f++) { d_dfdx[jpidx + f*nip] = PetscRealPart(u_x[f][0]); d_dfdy[jpidx + f*nip] = PetscRealPart(u_x[f][1]); #if LANDAU_DIM==3 d_dfdz[jpidx + f*nip] = PetscRealPart(u_x[f][2]); #endif } } __device__ void landau_inner_integral_v2(const PetscInt myQi, const PetscInt jpidx, PetscInt nip, const PetscInt Nq, const PetscInt Nf, const PetscInt Nb, const PetscInt dim, LandauIPReal *IPDataRaw, const PetscReal invJj[], const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal * const BB, const PetscReal * const DD, PetscScalar *elemMat, P4estVertexMaps *d_maps, PetscSplitCSRDataStructure *d_mat, // output PetscScalar fieldMats[][LANDAU_MAX_NQ], // all these arrays are in shared memory PetscReal g2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal g3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal gg2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal gg3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES], PetscReal s_nu_alpha[], PetscReal s_nu_beta[], PetscReal s_invMass[], PetscReal s_f[], PetscReal s_dfx[], PetscReal s_dfy[], LandauIPReal d_f[], LandauIPReal d_dfdx[], LandauIPReal d_dfdy[], // global memory #if LANDAU_DIM==3 PetscReal s_dfz[], LandauIPReal d_dfdz[], #endif PetscReal d_mass_w[], PetscReal shift, PetscInt myelem, PetscErrorCode *ierr) { int delta,d,f,g,d2,dp,d3,fieldA,ipidx_b,nip_pad = nip; // vectorization padding not supported; *ierr = 0; if (!d_mass_w) { // get g2 & g3 PetscReal gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM]; LandauIPData IPData; // create g2 & g3 for (f=threadIdx.x; f<Nf; f+=blockDim.x) { for (d=0;d<dim;d++) { // clear accumulation data D & K gg2[d][myQi][f] = 0; for (d2=0;d2<dim;d2++) gg3[d][d2][myQi][f] = 0; } } if (threadIdx.y == 0) { for (int i = threadIdx.x; i < Nf; i += blockDim.x) { s_nu_alpha[i] = nu_alpha[i]; s_nu_beta[i] = nu_beta[i]; s_invMass[i] = invMass[i]; } } for (d2 = 0; d2 < dim; d2++) { gg2_temp[d2] = 0; for (d3 = 0; d3 < dim; d3++) { gg3_temp[d2][d3] = 0; } } __syncthreads(); // un pack IPData IPData.w = IPDataRaw; IPData.x = IPDataRaw + 1*nip_pad; IPData.y = IPDataRaw + 2*nip_pad; IPData.z = IPDataRaw + 3*nip_pad; for (ipidx_b = 0; ipidx_b < nip; ipidx_b += blockDim.x) { const PetscReal vj[3] = {IPData.x[jpidx], IPData.y[jpidx], IPData.z ? IPData.z[jpidx] : 0}; int ipidx = ipidx_b + threadIdx.x; __syncthreads(); if (ipidx < nip) { for (fieldA = threadIdx.y; fieldA < Nf; fieldA += blockDim.y) { s_f [fieldA*blockDim.x+threadIdx.x] = d_f[ipidx + fieldA*nip_pad]; s_dfx[fieldA*blockDim.x+threadIdx.x] = d_dfdx[ipidx + fieldA*nip_pad]; s_dfy[fieldA*blockDim.x+threadIdx.x] = d_dfdy[ipidx + fieldA*nip_pad]; #if LANDAU_DIM==3 s_dfz[fieldA*blockDim.x+threadIdx.x] = d_dfdz[ipidx + fieldA*nip_pad]; #endif } } __syncthreads(); if (ipidx < nip) { const PetscReal wi = IPData.w[ipidx], x = IPData.x[ipidx], y = IPData.y[ipidx]; PetscReal temp1[3] = {0, 0, 0}, temp2 = 0; #if LANDAU_DIM==2 PetscReal Ud[2][2], Uk[2][2]; LandauTensor2D(vj, x, y, Ud, Uk, (ipidx==jpidx) ? 0. : 1.); #else PetscReal U[3][3], z = IPData.z[ipidx]; LandauTensor3D(vj, x, y, z, U, (ipidx==jpidx) ? 0. : 1.); #endif for (fieldA = 0; fieldA < Nf; fieldA++) { temp1[0] += s_dfx[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA]; temp1[1] += s_dfy[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA]; #if LANDAU_DIM==3 temp1[2] += s_dfz[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA]; #endif temp2 += s_f [fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]; } temp1[0] *= wi; temp1[1] *= wi; #if LANDAU_DIM==3 temp1[2] *= wi; #endif temp2 *= wi; #if LANDAU_DIM==2 for (d2 = 0; d2 < 2; d2++) { for (d3 = 0; d3 < 2; ++d3) { /* K = U * grad(f): g2=e: i,A */ gg2_temp[d2] += Uk[d2][d3]*temp1[d3]; /* D = -U * (I \kron (fx)): g3=f: i,j,A */ gg3_temp[d2][d3] += Ud[d2][d3]*temp2; } } #else for (d2 = 0; d2 < 3; ++d2) { for (d3 = 0; d3 < 3; ++d3) { /* K = U * grad(f): g2 = e: i,A */ gg2_temp[d2] += U[d2][d3]*temp1[d3]; /* D = -U * (I \kron (fx)): g3 = f: i,j,A */ gg3_temp[d2][d3] += U[d2][d3]*temp2; } } #endif } } /* IPs */ /* reduce gg temp sums across threads */ for (delta = blockDim.x/2; delta > 0; delta /= 2) { for (d2 = 0; d2 < dim; d2++) { gg2_temp[d2] += __shfl_xor_sync(0xffffffff, gg2_temp[d2], delta, blockDim.x); for (d3 = 0; d3 < dim; d3++) { gg3_temp[d2][d3] += __shfl_xor_sync(0xffffffff, gg3_temp[d2][d3], delta, blockDim.x); } } } // add alpha and put in gg2/3 for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) { for (d2 = 0; d2 < dim; d2++) { gg2[d2][myQi][fieldA] += gg2_temp[d2]*s_nu_alpha[fieldA]; for (d3 = 0; d3 < dim; d3++) { gg3[d2][d3][myQi][fieldA] -= gg3_temp[d2][d3]*s_nu_alpha[fieldA]*s_invMass[fieldA]; } } } __syncthreads(); /* add electric field term once per IP */ for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) { gg2[dim-1][myQi][fieldA] += Eq_m[fieldA]; } __syncthreads(); //intf("%d %d gg2[1][1]=%g\n",myelem,qj_start,gg2[1][dim-1]); /* Jacobian transform - g2 */ for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) { PetscReal wj = IPData.w[jpidx]; for (d = 0; d < dim; ++d) { g2[d][myQi][fieldA] = 0.0; for (d2 = 0; d2 < dim; ++d2) { g2[d][myQi][fieldA] += invJj[d*dim+d2]*gg2[d2][myQi][fieldA]; g3[d][d2][myQi][fieldA] = 0.0; for (d3 = 0; d3 < dim; ++d3) { for (dp = 0; dp < dim; ++dp) { g3[d][d2][myQi][fieldA] += invJj[d*dim + d3]*gg3[d3][dp][myQi][fieldA]*invJj[d2*dim + dp]; } } g3[d][d2][myQi][fieldA] *= wj; } g2[d][myQi][fieldA] *= wj; } } __syncthreads(); // Synchronize (ensure all the data is available) and sum IP matrices } // !mass_w /* FE matrix construction */ { int fieldA,d,qj,d2,q,idx,totDim=Nb*Nf; /* assemble */ for (fieldA = 0; fieldA < Nf; fieldA++) { if (fieldMats) { for (f = threadIdx.y; f < Nb ; f += blockDim.y) { for (g = threadIdx.x; g < Nb; g += blockDim.x) { fieldMats[f][g] = 0; } } } for (f = threadIdx.y; f < Nb ; f += blockDim.y) { const PetscInt i = fieldA*Nb + f; /* Element matrix row */ for (g = threadIdx.x; g < Nb; g += blockDim.x) { const PetscInt j = fieldA*Nb + g; /* Element matrix column */ const PetscInt fOff = i*totDim + j; PetscScalar t = elemMat ? elemMat[fOff] : fieldMats[f][g]; for (qj = 0 ; qj < Nq ; qj++) { const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim]; if (!d_mass_w) { for (d = 0; d < dim; ++d) { t += DIq[f*dim+d]*g2[d][qj][fieldA]*BJq[g]; for (d2 = 0; d2 < dim; ++d2) { t += DIq[f*dim + d]*g3[d][d2][qj][fieldA]*DIq[g*dim + d2]; } } } else { const PetscInt jpidx = qj + myelem * Nq; t += BJq[f] * d_mass_w[jpidx]*shift * BJq[g]; } } if (elemMat) elemMat[fOff] = t; else fieldMats[f][g] = t; } } if (fieldMats) { PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE]; PetscReal row_scale[LANDAU_MAX_Q_FACE],col_scale[LANDAU_MAX_Q_FACE]; PetscInt nr,nc,rows0[LANDAU_MAX_Q_FACE],cols0[LANDAU_MAX_Q_FACE],rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE]; const LandauIdx *const Idxs = &d_maps->gIdx[myelem][fieldA][0]; for (f = threadIdx.y; f < Nb ; f += blockDim.y) { idx = Idxs[f]; if (idx >= 0) { nr = 1; rows0[0] = idx; row_scale[0] = 1.; } else { idx = -idx - 1; nr = d_maps->num_face; for (q = 0; q < d_maps->num_face; q++) { rows0[q] = d_maps->c_maps[idx][q].gid; row_scale[q] = d_maps->c_maps[idx][q].scale; } } for (g = threadIdx.x; g < Nb; g += blockDim.x) { idx = Idxs[g]; if (idx >= 0) { nc = 1; cols0[0] = idx; col_scale[0] = 1.; } else { idx = -idx - 1; nc = d_maps->num_face; for (q = 0; q < d_maps->num_face; q++) { cols0[q] = d_maps->c_maps[idx][q].gid; col_scale[q] = d_maps->c_maps[idx][q].scale; } } for (q = 0; q < nr; q++) rows[q] = rows0[q]; for (q = 0; q < nc; q++) cols[q] = cols0[q]; for (q = 0; q < nr; q++) { for (d = 0; d < nc; d++) { vals[q*nc + d] = row_scale[q]*col_scale[d]*fieldMats[f][g]; } } MatSetValuesDevice(d_mat,nr,rows,nc,cols,vals,ADD_VALUES,ierr); if (*ierr) return; } } } } } } // // The GPU Landau kernel // __global__ void __launch_bounds__(256,1) landau_kernel_v2(const PetscInt nip, const PetscInt dim, const PetscInt totDim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJj[], const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const PetscReal * const BB, const PetscReal * const DD, LandauIPReal *IPDataRaw, PetscScalar elemMats_out[], P4estVertexMaps *d_maps, PetscSplitCSRDataStructure *d_mat, LandauIPReal d_f[], LandauIPReal d_dfdx[], LandauIPReal d_dfdy[], #if LANDAU_DIM==3 LandauIPReal d_dfdz[], #endif PetscReal d_mass_w[], PetscReal shift, PetscErrorCode *ierr) { const PetscInt Nq = blockDim.y, myelem = blockIdx.x; extern __shared__ PetscReal smem[]; int size = 0; PetscReal (*g2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = // shared mem not needed when mass_w (PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM; PetscReal (*g3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES; PetscReal (*gg2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM; PetscReal (*gg3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = (PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size]; size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES; PetscReal *s_nu_alpha = &smem[size]; size += LANDAU_MAX_SPECIES; PetscReal *s_nu_beta = &smem[size]; size += LANDAU_MAX_SPECIES; PetscReal *s_invMass = &smem[size]; size += LANDAU_MAX_SPECIES; PetscReal *s_f = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; PetscReal *s_dfx = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; PetscReal *s_dfy = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; #if LANDAU_DIM==3 PetscReal *s_dfz = &smem[size]; size += blockDim.x*LANDAU_MAX_SPECIES; #endif PetscScalar (*fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ] = d_maps ? (PetscScalar (*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) &smem[size] : NULL; if (d_maps) size += LANDAU_MAX_NQ*LANDAU_MAX_NQ; const PetscInt myQi = threadIdx.y; const PetscInt jpidx = myQi + myelem * Nq; //const PetscInt subblocksz = nip/nSubBlks + !!(nip%nSubBlks), ip_start = mySubBlk*subblocksz, ip_end = (mySubBlk+1)*subblocksz > nip ? nip : (mySubBlk+1)*subblocksz; /* this could be wrong with very few global IPs */ PetscScalar *elemMat = elemMats_out ? &elemMats_out[myelem*totDim*totDim] : NULL; /* my output */ int tid = threadIdx.x + threadIdx.y*blockDim.x; const PetscReal *invJ = invJj ? &invJj[jpidx*dim*dim] : NULL; if (elemMat) for (int i = tid; i < totDim*totDim; i += blockDim.x*blockDim.y) elemMat[i] = 0; __syncthreads(); landau_inner_integral_v2(myQi, jpidx, nip, Nq, Nf, Nb, dim, IPDataRaw, invJ, nu_alpha, nu_beta, invMass, Eq_m, BB, DD, elemMat, d_maps, d_mat, *fieldMats, *g2, *g3, *gg2, *gg3, s_nu_alpha, s_nu_beta, s_invMass, s_f, s_dfx, s_dfy, d_f, d_dfdx, d_dfdy, #if LANDAU_DIM==3 s_dfz, d_dfdz, #endif d_mass_w, shift, myelem, ierr); /* compact */ } PetscErrorCode LandauCUDAJacobian(DM plex, const PetscInt Nq, const PetscReal nu_alpha[],const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[], const LandauIPData *const IPData, const PetscReal invJj[], PetscReal *mass_w, PetscReal shift, const PetscLogEvent events[], Mat JacP) { PetscErrorCode ierr,*d_ierr; cudaError_t cerr; PetscInt ii,ej,*Nbf,Nb,nip_dim2,cStart,cEnd,Nf,dim,numGCells,totDim,nip,szf=sizeof(LandauIPReal),ipdatasz; PetscReal *d_BB,*d_DD,*d_invJj=NULL,*d_nu_alpha,*d_nu_beta,*d_invMass,*d_Eq_m,*d_mass_w=NULL; PetscScalar *d_elemMats=NULL; LandauIPReal *d_f=NULL, *d_dfdx=NULL, *d_dfdy=NULL; #if LANDAU_DIM==3 PetscScalar *d_dfdz=NULL; #endif PetscTabulation *Tf; PetscDS prob; PetscSection section, globalSection; LandauIPReal *d_IPDataRaw=NULL; LandauCtx *ctx; PetscSplitCSRDataStructure *d_mat=NULL; P4estVertexMaps *h_maps, *d_maps=NULL; int nnn = 256/Nq; PetscFunctionBegin; while (nnn & nnn - 1) nnn = nnn & nnn - 1; if (nnn>16) nnn = 16; ierr = PetscLogEventBegin(events[3],0,0,0,0);CHKERRQ(ierr); ierr = DMGetDimension(plex, &dim);CHKERRQ(ierr); if (dim!=LANDAU_DIM) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "LANDAU_DIM %D != dim %d",LANDAU_DIM,dim); ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr); numGCells = cEnd - cStart; nip = numGCells*Nq; /* length of inner global iteration */ ierr = DMGetDS(plex, &prob);CHKERRQ(ierr); ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr); ierr = PetscDSGetDimensions(prob, &Nbf);CHKERRQ(ierr); Nb = Nbf[0]; if (Nq != Nb) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Nq != Nb. %D %D",Nq,Nb); ierr = PetscDSGetTotalDimension(prob, &totDim);CHKERRQ(ierr); ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr); ierr = DMGetLocalSection(plex, &section);CHKERRQ(ierr); ierr = DMGetGlobalSection(plex, &globalSection);CHKERRQ(ierr); // create data cerr = cudaMalloc((void **)&d_BB, Nq*Nb*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMemcpy( d_BB, Tf[0]->T[0], Nq*Nb*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMalloc((void **)&d_DD, Nq*Nb*dim*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMemcpy( d_DD, Tf[0]->T[1], Nq*Nb*dim*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); nip_dim2 = Nq*numGCells*dim*dim; if (mass_w) { cerr = cudaMalloc((void **)&d_mass_w, nip*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMemcpy( d_mass_w, mass_w,nip*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } else { ipdatasz = LandauGetIPDataSize(IPData); cerr = cudaMalloc((void **)&d_IPDataRaw,ipdatasz*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMemcpy(d_IPDataRaw, IPData->w, ipdatasz*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); // assumes IPData starts with 'w' cerr = cudaMalloc((void **)&d_nu_alpha, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMalloc((void **)&d_nu_beta, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMalloc((void **)&d_invMass, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMalloc((void **)&d_Eq_m, Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMemcpy(d_nu_alpha, nu_alpha, Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(d_nu_beta, nu_beta, Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(d_invMass, invMass, Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); cerr = cudaMemcpy(d_Eq_m, Eq_m, Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); // f and df cerr = cudaMalloc((void **)&d_f, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMalloc((void **)&d_dfdx, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMalloc((void **)&d_dfdy, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input #if LANDAU_DIM==3 cerr = cudaMalloc((void **)&d_dfdz, nip*Nf*szf);CHKERRCUDA(cerr); // kernel input #endif // collect geometry cerr = cudaMalloc((void **)&d_invJj, nip_dim2*szf);CHKERRCUDA(cerr); // kernel input cerr = cudaMemcpy(d_invJj, invJj, nip_dim2*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = DMGetApplicationContext(plex, &ctx);CHKERRQ(ierr); if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context"); if (ctx->gpu_assembly) { PetscContainer container; ierr = PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);CHKERRQ(ierr); if (container) { // not here first call ierr = PetscContainerGetPointer(container, (void **) &h_maps);CHKERRQ(ierr); if (h_maps->data) { d_maps = h_maps->data; if (!d_maps) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata"); } else { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata in container"); } // this does the setup the first time called ierr = MatCUSPARSEGetDeviceMatWrite(JacP,&d_mat);CHKERRQ(ierr); } else { cerr = cudaMalloc((void **)&d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar));CHKERRCUDA(cerr); // kernel output - first call is on CPU } } else { cerr = cudaMalloc((void **)&d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar));CHKERRCUDA(cerr); // kernel output - no GPU assembly } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[3],0,0,0,0);CHKERRQ(ierr); cerr = cudaMalloc((void **)&d_ierr, sizeof(ierr));CHKERRCUDA(cerr); // kernel input if (!mass_w) { // form f and df dim3 dimBlock(Nq,1); ierr = PetscLogEventBegin(events[8],0,0,0,0);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ii = 0; // PetscPrintf(PETSC_COMM_SELF, "numGCells=%d dim.x=%d Nq=%d nThreads=%d, %d kB shared mem\n",numGCells,n,Nq,Nq*n,ii*szf/1024); landau_form_fdf<<<numGCells,dimBlock,ii*szf>>>( nip, dim, Nf, Nb, d_invJj, d_BB, d_DD, d_IPDataRaw, d_f, d_dfdx, d_dfdy, #if LANDAU_DIM==3 d_dfdz, #endif d_ierr); CHECK_LAUNCH_ERROR(); ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(2*Nb*(1+dim)));CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); cerr = cudaMemcpy(&ierr, d_ierr, sizeof(ierr), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); CHKERRQ(ierr); ierr = PetscLogEventEnd(events[8],0,0,0,0);CHKERRQ(ierr); } ierr = PetscLogEventBegin(events[4],0,0,0,0);CHKERRQ(ierr); { dim3 dimBlock(nnn,Nq); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(mass_w ? (nip*(11*Nf+ 4*dim*dim) + 6*Nf*dim*dim*dim + 10*Nf*dim*dim + 4*Nf*dim + Nb*Nf*Nb*Nq*dim*dim*5) : Nb*Nf*Nb*Nq*4));CHKERRQ(ierr); ii = 2*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM*(1+LANDAU_DIM) + 3*LANDAU_MAX_SPECIES + (1+LANDAU_DIM)*dimBlock.x*LANDAU_MAX_SPECIES; ii += (LANDAU_MAX_NQ*LANDAU_MAX_NQ)*LANDAU_MAX_SPECIES; if (ii*szf >= 49152) { cerr = cudaFuncSetAttribute(landau_kernel_v2, cudaFuncAttributeMaxDynamicSharedMemorySize, 98304);CHKERRCUDA(cerr); } // PetscPrintf(PETSC_COMM_SELF, "numGCells=%d dim.x=%d Nq=%d nThreads=%d, %d kB shared mem\n",numGCells,n,Nq,Nq*n,ii*szf/1024); landau_kernel_v2<<<numGCells,dimBlock,ii*szf>>>(nip,dim,totDim,Nf,Nb,d_invJj,d_nu_alpha,d_nu_beta,d_invMass,d_Eq_m, d_BB, d_DD, d_IPDataRaw, d_elemMats, d_maps, d_mat, d_f, d_dfdx, d_dfdy, #if LANDAU_DIM==3 d_dfdz, #endif d_mass_w, shift, d_ierr); CHECK_LAUNCH_ERROR(); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); //cerr = cudaMemcpy(&ierr, d_ierr, sizeof(ierr), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); //CHKERRQ(ierr); } cerr = cudaFree(d_ierr);CHKERRCUDA(cerr); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[4],0,0,0,0);CHKERRQ(ierr); // delete device data ierr = PetscLogEventBegin(events[5],0,0,0,0);CHKERRQ(ierr); cerr = cudaFree(d_BB);CHKERRCUDA(cerr); cerr = cudaFree(d_DD);CHKERRCUDA(cerr); if (mass_w) { cerr = cudaFree(d_mass_w);CHKERRCUDA(cerr); } else { cerr = cudaFree(d_IPDataRaw);CHKERRCUDA(cerr); cerr = cudaFree(d_f);CHKERRCUDA(cerr); cerr = cudaFree(d_dfdx);CHKERRCUDA(cerr); cerr = cudaFree(d_dfdy);CHKERRCUDA(cerr); #if LANDAU_DIM==3 cerr = cudaFree(d_dfdz);CHKERRCUDA(cerr); #endif cerr = cudaFree(d_invJj);CHKERRCUDA(cerr); cerr = cudaFree(d_nu_alpha);CHKERRCUDA(cerr); cerr = cudaFree(d_nu_beta);CHKERRCUDA(cerr); cerr = cudaFree(d_invMass);CHKERRCUDA(cerr); cerr = cudaFree(d_Eq_m);CHKERRCUDA(cerr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[5],0,0,0,0);CHKERRQ(ierr); // First time assembly even with GPU assembly if (d_elemMats) { PetscScalar *elemMats=NULL,*elMat; ierr = PetscLogEventBegin(events[5],0,0,0,0);CHKERRQ(ierr); ierr = PetscMalloc1(totDim*totDim*numGCells,&elemMats);CHKERRQ(ierr); cerr = cudaMemcpy(elemMats, d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); cerr = cudaFree(d_elemMats);CHKERRCUDA(cerr); ierr = PetscLogEventEnd(events[5],0,0,0,0);CHKERRQ(ierr); ierr = PetscLogEventBegin(events[6],0,0,0,0);CHKERRQ(ierr); for (ej = cStart, elMat = elemMats ; ej < cEnd; ++ej, elMat += totDim*totDim) { ierr = DMPlexMatSetClosure(plex, section, globalSection, JacP, ej, elMat, ADD_VALUES);CHKERRQ(ierr); if (ej==-1) { int d,f; PetscPrintf(PETSC_COMM_SELF,"GPU Element matrix\n"); for (d = 0; d < totDim; ++d){ for (f = 0; f < totDim; ++f) PetscPrintf(PETSC_COMM_SELF," %12.5e", PetscRealPart(elMat[d*totDim + f])); PetscPrintf(PETSC_COMM_SELF,"\n"); } } } ierr = PetscFree(elemMats);CHKERRQ(ierr); ierr = PetscLogEventEnd(events[6],0,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); }
912677e53d16a5131e01e118a935857acb5e7b78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __constant__ float tau0 = 3.5f; // relaxation time __constant__ float visc = 1.0f; // viscosity #define BLOCK_SIZE 64 #define DIST_SIZE 74304u #define OPTION_SAVE_MACRO_FIELDS 1 #define OPTION_BULK 2 #define INVALID_NODE 0xffffffff #define DT 1.0f #include <stdio.h> // Additional geometry parameters (velocities, pressures, etc) __constant__ float node_params[2] = { 1.00000000000000005551e-01f , 0.00000000000000000000e+00f , }; // OpenCL compatibility code. __device__ int inline get_local_size(int i) { if (i == 0) { return blockDim.x; } else { return blockDim.y; } } __device__ int inline get_global_size(int i) { if (i == 0) { return blockDim.x * gridDim.x; } else { return blockDim.y * gridDim.y; } } __device__ int inline get_group_id(int i) { if (i == 0) { return blockIdx.x; } else { return blockIdx.y; } } __device__ int inline get_local_id(int i) { if (i == 0) { return threadIdx.x; } else { return threadIdx.y; } } __device__ int inline get_global_id(int i) { if (i == 0) { return threadIdx.x + blockIdx.x * blockDim.x; } else { return threadIdx.y + blockIdx.y * blockDim.y; } } typedef struct Dist { float fC; float fE; float fN; float fW; float fS; float fNE; float fNW; float fSW; float fSE; } Dist; // Functions for checking whether a node is of a given specific type. __device__ inline bool is_NTFluid(unsigned int type) { return type == 1; } __device__ inline bool isNTFullBBWall(unsigned int type) { return type == 2; } __device__ inline bool is_NTGhost(unsigned int type) { return type == 4; } __device__ inline bool isNTRegularizedVelocity(unsigned int type) { return type == 3; } // Returns true is the node does not require any special processing // to calculate macroscopic fields. __device__ inline bool NTUsesStandardMacro(unsigned int type) { return (false || is_NTFluid(type) || isNTFullBBWall(type) ); } // Wet nodes are nodes that undergo a standard collision procedure. __device__ inline bool isWetNode(unsigned int type) { return (false || is_NTFluid(type) || isNTRegularizedVelocity(type) ); } // Wet nodes are nodes that undergo a standard collision procedure. __device__ inline bool isExcludedNode(unsigned int type) { return (false || is_NTGhost(type) ); } __device__ inline bool isPropagationOnly(unsigned int type) { return (false ); } // Internal helper, do not use directly. __device__ inline void _storeNodeScratchSpace(unsigned int scratch_id, unsigned int num_values, float *buffer, float *g_buffer) { for (int i = 0; i < num_values; i++) { g_buffer[i + scratch_id * num_values] = buffer[i]; } } // Internal helper, do not use directly. __device__ inline void _loadNodeScratchSpace(unsigned int scratch_id, unsigned int num_values, float *g_buffer, float *buffer) { for (int i = 0; i < num_values; i++) { buffer[i] = g_buffer[i + scratch_id * num_values]; } } // Reads values from node scratch space (in global memory) into a local buffer. // // scratch_id: scratch space ID for nodes of type 'type' // type: node type // g_buffer: pointer to a buffer in the global memory used for scratch // space // buffer: pointer to a local buffer where the values will be saved __device__ inline void loadNodeScratchSpace(unsigned int scratch_id, unsigned int type, float *g_buffer, float* buffer) { switch (type) { } } // Stores values from a local buffer into the node scratch space in global memory. // // Arguments: see loadNodeScratchSpace __device__ inline void storeNodeScratchSpace(unsigned int scratch_id, unsigned int type, float* buffer, float* g_buffer) { switch (type) { } } __device__ inline unsigned int decodeNodeType(unsigned int nodetype) { return nodetype & 7; } __device__ inline unsigned int decodeNodeOrientation(unsigned int nodetype) { return nodetype >> 5; } // Returns the node's scratch ID, to be passed to (load,store)NodeScratchSpace as scratch_id. __device__ inline unsigned int decodeNodeScratchId(unsigned int nodetype) { return (nodetype >> 5) & 0; } __device__ inline unsigned int decodeNodeParamIdx(unsigned int nodetype) { return (nodetype >> 3) & 3; } __device__ inline unsigned int getGlobalIdx(int gx, int gy) { return gx + 288 * gy; } __device__ inline void decodeGlobalIdx(unsigned int gi, int *gx, int *gy) { *gx = gi % 288; *gy = gi / 288; } __device__ void die(void) { asm("trap;"); } __device__ void checkInvalidValues(Dist* d, int gx, int gy ) { bool valid = true; if (!isfinite(d->fC)) { valid = false; printf("ERR(subdomain=0): Invalid value of fC (%f) at: " "(%d, %d)" "\n", d->fC, gx, gy ); } if (!isfinite(d->fE)) { valid = false; printf("ERR(subdomain=0): Invalid value of fE (%f) at: " "(%d, %d)" "\n", d->fE, gx, gy ); } if (!isfinite(d->fN)) { valid = false; printf("ERR(subdomain=0): Invalid value of fN (%f) at: " "(%d, %d)" "\n", d->fN, gx, gy ); } if (!isfinite(d->fW)) { valid = false; printf("ERR(subdomain=0): Invalid value of fW (%f) at: " "(%d, %d)" "\n", d->fW, gx, gy ); } if (!isfinite(d->fS)) { valid = false; printf("ERR(subdomain=0): Invalid value of fS (%f) at: " "(%d, %d)" "\n", d->fS, gx, gy ); } if (!isfinite(d->fNE)) { valid = false; printf("ERR(subdomain=0): Invalid value of fNE (%f) at: " "(%d, %d)" "\n", d->fNE, gx, gy ); } if (!isfinite(d->fNW)) { valid = false; printf("ERR(subdomain=0): Invalid value of fNW (%f) at: " "(%d, %d)" "\n", d->fNW, gx, gy ); } if (!isfinite(d->fSW)) { valid = false; printf("ERR(subdomain=0): Invalid value of fSW (%f) at: " "(%d, %d)" "\n", d->fSW, gx, gy ); } if (!isfinite(d->fSE)) { valid = false; printf("ERR(subdomain=0): Invalid value of fSE (%f) at: " "(%d, %d)" "\n", d->fSE, gx, gy ); } if (!valid) { die(); } } // Load the distributions from din to dout, for the node with the index 'idx'. // Performs propagation when reading distributions from global memory. // This implements the propagate-on-read scheme. // Implements the propagate-on-read scheme for the AA access pattern, where the // distributions are not located in their natural slots, but the opposite ones // (e.g. fNE is located where fSW normally is). This ensures that within a single // timestep, the distributions are read from and written to the exact same places // in global memory. __device__ inline void getDist( Dist *dout, const float *__restrict__ din, unsigned int gi ) { dout->fC = din[gi + DIST_SIZE * 0 + (unsigned int)0]; dout->fE = din[gi + DIST_SIZE * 1 + (unsigned int)0]; dout->fN = din[gi + DIST_SIZE * 2 + (unsigned int)0]; dout->fW = din[gi + DIST_SIZE * 3 + (unsigned int)0]; dout->fS = din[gi + DIST_SIZE * 4 + (unsigned int)0]; dout->fNE = din[gi + DIST_SIZE * 5 + (unsigned int)0]; dout->fNW = din[gi + DIST_SIZE * 6 + (unsigned int)0]; dout->fSW = din[gi + DIST_SIZE * 7 + (unsigned int)0]; dout->fSE = din[gi + DIST_SIZE * 8 + (unsigned int)0]; } // Returns a node parameter which is a vector (in 'out'). __device__ inline void node_param_get_vector(const int idx, float *out ) { out[0] = node_params[idx]; out[1] = node_params[idx + 1]; } // Returns a node parameter which is a scalar. __device__ inline float node_param_get_scalar(const int idx ) { return node_params[idx]; } // Add comments for the Guo density implementation. __device__ inline void bounce_back(Dist *fi) { float t; t = fi->fE; fi->fE = fi->fW; fi->fW = t; t = fi->fN; fi->fN = fi->fS; fi->fS = t; t = fi->fNE; fi->fNE = fi->fSW; fi->fSW = t; t = fi->fNW; fi->fNW = fi->fSE; fi->fSE = t; } // Compute the 0th moment of the distributions, i.e. density. __device__ inline void compute_0th_moment(Dist *fi, float *out) { *out = fi->fC + fi->fE + fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW + fi->fW; } // Compute the 1st moments of the distributions, i.e. momentum. __device__ inline void compute_1st_moment(Dist *fi, float *out, int add, float factor) { if (add) { out[0] += factor * ( fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW ); out[1] += factor * ( fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW ); } else { out[0] = factor * ( fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW ); out[1] = factor * ( fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW ); } } // Compute the 2nd moments of the distributions. Order of components is: // 2D: xx, xy, yy // 3D: xx, xy, xz, yy, yz, zz __device__ inline void compute_2nd_moment(Dist *fi, float *out) { out[0] = fi->fE + fi->fNE + fi->fNW + fi->fSE + fi->fSW + fi->fW ; out[1] = fi->fNE - fi->fNW - fi->fSE + fi->fSW ; out[2] = fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW ; } // Computes the 2nd moment of the non-equilibrium distribution function // given the full distribution fuction 'fi'. __device__ inline void compute_noneq_2nd_moment(Dist* fi, const float rho, float *v0, float *out) { out[0] = fi->fE + fi->fNE + fi->fNW + fi->fSE + fi->fSW + fi->fW - rho*((v0[0]*v0[0]) + 1.0f* (1.0f / 3.0f)) ; out[1] = fi->fNE - fi->fNW - fi->fSE + fi->fSW - rho*v0[0]*v0[1] ; out[2] = fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW - rho*((v0[1]*v0[1]) + 1.0f* (1.0f / 3.0f)) ; } // Compute the 1st moments of the distributions and divide it by the 0-th moment // i.e. compute velocity. __device__ inline void compute_1st_div_0th(Dist *fi, float *out, float zero) { out[0] = (fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW)/zero ; out[1] = (fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW)/zero ; } __device__ inline void compute_macro_quant(Dist *fi, float *rho, float *v) { compute_0th_moment(fi, rho); compute_1st_div_0th(fi, v, *rho); } __device__ inline void get0thMoment(Dist *fi, int node_type, int orientation, float *out) { compute_0th_moment(fi, out); } // Common code for the equilibrium and Zou-He density boundary conditions. // // Get macroscopic density rho and velocity v given a distribution fi, and // the node class node_type. // __device__ inline void getMacro( Dist *fi, int ncode, int node_type, int orientation, float *rho, float *v0 ) { if (NTUsesStandardMacro(node_type) || orientation == 0) { compute_macro_quant(fi, rho, v0); } else if (isNTRegularizedVelocity(node_type)) { int node_param_idx = decodeNodeParamIdx(ncode); // We're dealing with a boundary node, for which some of the distributions // might be meaningless. Fill them with the values of the opposite // distributions. switch (orientation) { case 1: { // fE is undefined. fi->fE = fi->fW; // fNE is undefined. fi->fNE = fi->fSW; // fSE is undefined. fi->fSE = fi->fNW; break; } case 2: { // fN is undefined. fi->fN = fi->fS; // fNE is undefined. fi->fNE = fi->fSW; // fNW is undefined. fi->fNW = fi->fSE; break; } case 3: { // fW is undefined. fi->fW = fi->fE; // fNW is undefined. fi->fNW = fi->fSE; // fSW is undefined. fi->fSW = fi->fNE; break; } case 4: { // fS is undefined. fi->fS = fi->fN; // fSW is undefined. fi->fSW = fi->fNE; // fSE is undefined. fi->fSE = fi->fNW; break; } } *rho = fi->fC + fi->fE + fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW + fi->fW; node_param_get_vector(node_param_idx, v0 ); switch (orientation) { case 1: *rho = (*rho)/(-v0[0] + 1.0f) ; break; case 2: *rho = (*rho)/(-v0[1] + 1.0f) ; break; case 3: *rho = (*rho)/(v0[0] + 1.0f) ; break; case 4: *rho = (*rho)/(v0[1] + 1.0f) ; break; } } } // Uses extrapolation/other schemes to compute missing distributions for some implementations // of boundary condtitions. __device__ inline void fixMissingDistributions( Dist *fi, float *dist_in, int ncode, int node_type, int orientation, unsigned int gi, float *__restrict__ ivx, float *__restrict__ ivy, float *gg0m0 ) { if (0) {} } // TODO: Check whether it is more efficient to actually recompute // node_type and orientation instead of passing them as variables. __device__ inline void postcollisionBoundaryConditions( Dist *fi, int ncode, int node_type, int orientation, float *rho, float *v0, unsigned int gi, float *dist_out ) { if (0) {} } __device__ inline void precollisionBoundaryConditions(Dist *fi, int ncode, int node_type, int orientation, float *rho, float *v0 ) { if (0) {} else if (isNTFullBBWall(node_type)) { bounce_back(fi); } else if (0 || isNTRegularizedVelocity(node_type) ) { // Bounce-back of the non-equilibrium parts. switch (orientation) { case 1: fi->fE = fi->fW + (2.0f* (1.0f / 3.0f))*(*rho)*v0[0] ; fi->fNE = fi->fSW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; fi->fSE = fi->fNW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; break; case 2: fi->fN = fi->fS + (2.0f* (1.0f / 3.0f))*(*rho)*v0[1] ; fi->fNE = fi->fSW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; fi->fNW = fi->fSE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; break; case 3: fi->fW = fi->fE - 2.0f* (1.0f / 3.0f)*(*rho)*v0[0] ; fi->fNW = fi->fSE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; fi->fSW = fi->fNE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; break; case 4: fi->fS = fi->fN - 2.0f* (1.0f / 3.0f)*(*rho)*v0[1] ; fi->fSW = fi->fNE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; fi->fSE = fi->fNW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; break; case 0: bounce_back(fi); return; } float flux[3]; compute_noneq_2nd_moment(fi, *rho, v0, flux); fi->fC = max(1e-7f, (4.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*(*rho) + -2.0f* (1.0f / 3.0f)*flux[0] - 2.0f* (1.0f / 3.0f)*flux[2] ); fi->fE = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*(*rho) + (1.0f* (1.0f / 3.0f))*flux[0] - 1.0f* (1.0f / 6.0f)*flux[2] ); fi->fN = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*(*rho) + -1.0f* (1.0f / 6.0f)*flux[0] + (1.0f* (1.0f / 3.0f))*flux[2] ); fi->fW = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*(*rho) + (1.0f* (1.0f / 3.0f))*flux[0] - 1.0f* (1.0f / 6.0f)*flux[2] ); fi->fS = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*(*rho) + -1.0f* (1.0f / 6.0f)*flux[0] + (1.0f* (1.0f / 3.0f))*flux[2] ); fi->fNE = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] + (1.0f* (1.0f / 4.0f))*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); fi->fNW = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] - 1.0f* (1.0f / 4.0f)*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); fi->fSW = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] + (1.0f* (1.0f / 4.0f))*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); fi->fSE = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] - 1.0f* (1.0f / 4.0f)*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); } } // // Performs the relaxation step in the BGK model given the density rho, // the velocity v and the distribution fi. __device__ inline void BGK_relaxate0( float rho, float *iv0 , Dist *d0, int node_type, int ncode ) { float v0[2]; Dist feq0; v0[0] = iv0[0]; v0[1] = iv0[1]; ; feq0.fC = (4.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*rho ; feq0.fE = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fN = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fW = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fS = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fNE = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; feq0.fNW = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; feq0.fSW = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; feq0.fSE = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; float omega = 2.85714285714285698425e-01f ; d0->fC += omega * (feq0.fC - d0->fC); d0->fE += omega * (feq0.fE - d0->fE); d0->fN += omega * (feq0.fN - d0->fN); d0->fW += omega * (feq0.fW - d0->fW); d0->fS += omega * (feq0.fS - d0->fS); d0->fNE += omega * (feq0.fNE - d0->fNE); d0->fNW += omega * (feq0.fNW - d0->fNW); d0->fSW += omega * (feq0.fSW - d0->fSW); d0->fSE += omega * (feq0.fSE - d0->fSE); // FIXME: This should be moved to postcollision boundary conditions. } // A kernel to set the node distributions using the equilibrium distributions // and the macroscopic fields. __global__ void SetInitialConditions( float *dist1_in, float *__restrict__ ivx, float *__restrict__ ivy, const float *__restrict__ irho, const int *__restrict__ map ) { int lx = get_local_id(0); // ID inside the current block int gx = get_global_id(0); int gy = get_group_id(1); unsigned int gi = getGlobalIdx(gx, gy); // Nothing to do if we're outside of the simulation domain. if (gx > 257) { return; } // Cache macroscopic fields in local variables. float rho = irho[gi] ; float v0[2]; v0[0] = ivx[gi]; v0[1] = ivy[gi]; dist1_in[gi + (0u + (unsigned int)(0 + 0))] = (4.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (74304u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (148608u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (222912u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (297216u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (371520u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; dist1_in[gi + (445824u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; dist1_in[gi + (520128u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; dist1_in[gi + (594432u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; } __global__ void PrepareMacroFields( const int *__restrict__ map, const float *__restrict__ dist_in, float *orho, int options ) { int lx = get_local_id(0); // ID inside the current block int gx = get_global_id(0); int gy = get_group_id(1); unsigned int gi = getGlobalIdx(gx, gy); // Nothing to do if we're outside of the simulation domain. if (gx > 257) { return; } int ncode = map[gi]; int type = decodeNodeType(ncode); // Unused nodes do not participate in the simulation. if (isExcludedNode(type) || isPropagationOnly(type)) return; int orientation = decodeNodeOrientation(ncode); Dist fi; float out; getDist( &fi, dist_in, gi ); get0thMoment(&fi, type, orientation, &out); orho[gi] = out; } __global__ void CollideAndPropagate( const int *__restrict__ map, float *__restrict__ dist_in, float *__restrict__ dist_out, float *__restrict__ gg0m0, float *__restrict__ ovx, float *__restrict__ ovy, int options ) { int lx = get_local_id(0); // ID inside the current block int gx = get_global_id(0); int gy = get_group_id(1); unsigned int gi = getGlobalIdx(gx, gy); // Nothing to do if we're outside of the simulation domain. if (gx > 257) { return; } // Shared variables for in-block propagation __shared__ float prop_fE[BLOCK_SIZE]; __shared__ float prop_fNE[BLOCK_SIZE]; __shared__ float prop_fSE[BLOCK_SIZE]; #define prop_fW prop_fE #define prop_fSW prop_fNE #define prop_fNW prop_fSE int ncode = map[gi]; int type = decodeNodeType(ncode); // Unused nodes do not participate in the simulation. if (isExcludedNode(type)) { return; } int orientation = decodeNodeOrientation(ncode); // Cache the distributions in local variables Dist d0; if (!isPropagationOnly(type) ) { getDist( &d0, dist_in, gi ); fixMissingDistributions(&d0, dist_in, ncode, type, orientation, gi, ovx, ovy , gg0m0 ); // Macroscopic quantities for the current cell float g0m0, v[2]; getMacro(&d0, ncode, type, orientation, &g0m0, v ); precollisionBoundaryConditions(&d0, ncode, type, orientation, &g0m0, v ); if (isWetNode(type)) { BGK_relaxate0( g0m0, v , &d0, type, ncode ); } postcollisionBoundaryConditions(&d0, ncode, type, orientation, &g0m0, v, gi, dist_out ); if (isWetNode(type) ) { checkInvalidValues(&d0, gx, gy ); } // Only save the macroscopic quantities if requested to do so. if ((options & OPTION_SAVE_MACRO_FIELDS) && isWetNode(type) ) { gg0m0[gi] = g0m0 ; ovx[gi] = v[0]; ovy[gi] = v[1]; ; } } // propagation only const bool propagation_only = isPropagationOnly(type); // Initialize the shared array with invalid sentinel values. If the sentinel // value is not subsequently overridden, it will not be propagated. prop_fE[lx] = -1.0f; __syncthreads(); if (!propagation_only ) { // Update the 0-th direction distribution dist_out[gi] = d0.fC; // Propagation in directions orthogonal to the X axis (global memory) { if (gy < 257) { dist_out[gi + (148608u + (unsigned int)(0 + 288))] = d0.fN; } } { if (gy > 0) { dist_out[gi + (297216u + (unsigned int)(0 + -288))] = d0.fS; } } // E propagation in shared memory if (gx < 257) { // Note: propagation to ghost nodes is done directly in global memory as there // are no threads running for the ghost nodes. if (lx < 63 && gx != 256) { prop_fE[lx+1] = d0.fE; prop_fNE[lx+1] = d0.fNE; prop_fSE[lx+1] = d0.fSE; // E propagation in global memory (at right block boundary) } else { { dist_out[gi + (74304u + (unsigned int)(0 + 1))] = d0.fE; } { if (gy < 257) { dist_out[gi + (371520u + (unsigned int)(0 + 289))] = d0.fNE; } } { if (gy > 0) { dist_out[gi + (594432u + (unsigned int)(0 + -287))] = d0.fSE; } } } } } __syncthreads(); // Save locally propagated distributions into global memory. // The leftmost thread is not updated in this block. if (lx > 0 && gx < 258 && !propagation_only ) if (prop_fE[lx] != -1.0f) { dist_out[gi + (74304u + (unsigned int)(0 + 0))] = prop_fE[lx]; if (gy < 257) { dist_out[gi + (371520u + (unsigned int)(0 + 288))] = prop_fNE[lx]; } if (gy > 0) { dist_out[gi + (594432u + (unsigned int)(0 + -288))] = prop_fSE[lx]; } } __syncthreads(); // Refill the propagation buffer with sentinel values. prop_fE[lx] = -1.0f; __syncthreads(); if (!propagation_only ) { // W propagation in shared memory // Note: propagation to ghost nodes is done directly in global memory as there // are no threads running for the ghost nodes. if ((lx > 1 || (lx > 0 && gx >= 64)) && !propagation_only) { prop_fW[lx-1] = d0.fW; prop_fNW[lx-1] = d0.fNW; prop_fSW[lx-1] = d0.fSW; // W propagation in global memory (at left block boundary) } else if (gx > 0) { { dist_out[gi + (222912u + (unsigned int)(0 + -1))] = d0.fW; } { if (gy < 257) { dist_out[gi + (445824u + (unsigned int)(0 + 287))] = d0.fNW; } } { if (gy > 0) { dist_out[gi + (520128u + (unsigned int)(0 + -289))] = d0.fSW; } } } } __syncthreads(); // The rightmost thread is not updated in this block. if (lx < 63 && gx < 257 && !propagation_only ) if (prop_fE[lx] != -1.0f) { dist_out[gi + (222912u + (unsigned int)(0 + 0))] = prop_fW[lx]; if (gy < 257) { dist_out[gi + (445824u + (unsigned int)(0 + 288))] = prop_fNW[lx]; } if (gy > 0) { dist_out[gi + (520128u + (unsigned int)(0 + -288))] = prop_fSW[lx]; } } } // Copies momentum transfer for a force object into a linear buffer // so that a force can be computed easily via a sum reduction. // TODO(michalj): Fuse this with summation to improve performance. __global__ void ComputeForceObjects( const unsigned int *__restrict__ idx, const unsigned int *__restrict__ idx2, const float *__restrict__ dist, float *out, const unsigned int max_idx ) { const unsigned int gidx = get_global_id(0); if (gidx >= max_idx) { return; } const unsigned int gi = idx[gidx]; const unsigned int gi2 = idx2[gidx]; const float mx = dist[gi] + dist[gi2]; out[gidx] = mx; } // Applies periodic boundary conditions within a single subdomain. // dist: pointer to the distributions array // axis: along which axis the PBCs are to be applied (0:x, 1:y, 2:z) __global__ void ApplyPeriodicBoundaryConditions( float *dist, int axis) { const int idx1 = get_global_id(0); unsigned int gi_low, gi_high; // For single block PBC, the envelope size (width of the ghost node // layer) is always 1. // TODO(michalj): Generalize this for the case when envelope_size != 1. if (axis == 0) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(0, idx1); // ghost node gi_high = getGlobalIdx(256, idx1); // real node { // TODO(michalj): Generalize this for grids with e_i > 1. // Load distributions to be propagated from low idx to high idx. const float ffW = dist[gi_low + DIST_SIZE * 3 + (unsigned int)0]; const float ffNW = dist[gi_low + DIST_SIZE * 6 + (unsigned int)0]; const float ffSW = dist[gi_low + DIST_SIZE * 7 + (unsigned int)0]; if (gi_high != INVALID_NODE && isfinite(ffW)) { dist[gi_high + DIST_SIZE * 3 + (unsigned int)0] = ffW; } if (isfinite(ffNW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256) { dist[gi_high + DIST_SIZE * 6 + (unsigned int)0] = ffNW; } else { if (0) {} } } if (isfinite(ffSW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1) { dist[gi_high + DIST_SIZE * 7 + (unsigned int)0] = ffSW; } else { if (0) {} } } } // low to high { // Load distributrions to be propagated from high idx to low idx. const float ffE = dist[gi_high + DIST_SIZE * 1 + (unsigned int)1]; const float ffNE = dist[gi_high + DIST_SIZE * 5 + (unsigned int)1]; const float ffSE = dist[gi_high + DIST_SIZE * 8 + (unsigned int)1]; if (isfinite(ffE) && gi_low != INVALID_NODE) { dist[gi_low + DIST_SIZE * 1 + (unsigned int)1] = ffE; } if (isfinite(ffNE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256 ) { dist[gi_low + DIST_SIZE * 5 + (unsigned int)1] = ffNE; } else { if (0) {} } } if (isfinite(ffSE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1 ) { dist[gi_low + DIST_SIZE * 8 + (unsigned int)1] = ffSE; } else { if (0) {} } } } // high to low } else if (axis == 1) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(idx1, 0); // ghost node gi_high = getGlobalIdx(idx1, 256); // real node { // TODO(michalj): Generalize this for grids with e_i > 1. // Load distributions to be propagated from low idx to high idx. const float ffS = dist[gi_low + DIST_SIZE * 4 + (unsigned int)0]; const float ffSW = dist[gi_low + DIST_SIZE * 7 + (unsigned int)0]; const float ffSE = dist[gi_low + DIST_SIZE * 8 + (unsigned int)0]; if (gi_high != INVALID_NODE && isfinite(ffS)) { dist[gi_high + DIST_SIZE * 4 + (unsigned int)0] = ffS; } if (isfinite(ffSW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1) { dist[gi_high + DIST_SIZE * 7 + (unsigned int)0] = ffSW; } else { if (0) {} } } if (isfinite(ffSE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256) { dist[gi_high + DIST_SIZE * 8 + (unsigned int)0] = ffSE; } else { if (0) {} } } } // low to high { // Load distributrions to be propagated from high idx to low idx. const float ffN = dist[gi_high + DIST_SIZE * 2 + (unsigned int)288]; const float ffNE = dist[gi_high + DIST_SIZE * 5 + (unsigned int)288]; const float ffNW = dist[gi_high + DIST_SIZE * 6 + (unsigned int)288]; if (isfinite(ffN) && gi_low != INVALID_NODE) { dist[gi_low + DIST_SIZE * 2 + (unsigned int)288] = ffN; } if (isfinite(ffNE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256 ) { dist[gi_low + DIST_SIZE * 5 + (unsigned int)288] = ffNE; } else { if (0) {} } } if (isfinite(ffNW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1 ) { dist[gi_low + DIST_SIZE * 6 + (unsigned int)288] = ffNW; } else { if (0) {} } } } // high to low } } // Applies periodic boundary conditions to a scalar field within a single subdomain. // dist: pointer to the array with the field data // axis: along which axis the PBCs are to be applied (0:x, 1:y, 2:z) __global__ void ApplyMacroPeriodicBoundaryConditions( float *field, int axis) { const int idx1 = get_global_id(0); unsigned int gi_low, gi_high; // TODO(michalj): Generalize this for the case when envelope_size != 1. if (axis == 0) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(0, idx1); // ghost node gi_high = getGlobalIdx(256, idx1); // real node if ( isfinite(field[gi_high])) { field[gi_low] = field[gi_high]; } gi_low = getGlobalIdx(1, idx1); // real node gi_high = getGlobalIdx(257, idx1); // ghost node if ( isfinite(field[gi_low])) { field[gi_high] = field[gi_low]; } } else if (axis == 1) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(idx1, 0); // ghost node gi_high = getGlobalIdx(idx1, 256); // real node if ( isfinite(field[gi_high])) { field[gi_low] = field[gi_high]; } gi_low = getGlobalIdx(idx1, 1); // real node gi_high = getGlobalIdx(idx1, 257); // ghost node if ( isfinite(field[gi_low])) { field[gi_high] = field[gi_low]; } } } // Collects ghost node data for connections along axes other than X. // dist: distributions array // base_gy: where along the X axis to start collecting the data // face: see LBBlock class constants // buffer: buffer where the data is to be saved __global__ void CollectContinuousData( float *dist, int face, int base_gx, int max_lx, float *buffer) { const int idx = get_global_id(0); float tmp; if (idx >= max_lx) { return; } switch (face) { case 2: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; unsigned int gi = getGlobalIdx(base_gx + gx, 0); switch (dist_num) { case 0: { tmp = dist[gi + DIST_SIZE * 4 + (unsigned int)0]; break; } case 1: { tmp = dist[gi + DIST_SIZE * 7 + (unsigned int)0]; break; } case 2: { tmp = dist[gi + DIST_SIZE * 8 + (unsigned int)0]; break; } } buffer[idx] = tmp; break; } case 3: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; unsigned int gi = getGlobalIdx(base_gx + gx, 257); switch (dist_num) { case 0: { tmp = dist[gi + DIST_SIZE * 2 + (unsigned int)0]; break; } case 1: { tmp = dist[gi + DIST_SIZE * 5 + (unsigned int)0]; break; } case 2: { tmp = dist[gi + DIST_SIZE * 6 + (unsigned int)0]; break; } } buffer[idx] = tmp; break; } } } __global__ void DistributeContinuousData( float *dist, int face, int base_gx, int max_lx, float *buffer) { const int idx = get_global_id(0); if (idx >= max_lx) { return; } switch (face) { case 2: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; const float tmp = buffer[idx]; unsigned int gi = getGlobalIdx(base_gx + gx, 256); switch (dist_num) { case 0: { dist[gi + DIST_SIZE * 4 + (unsigned int)0] = tmp; break; } case 1: { dist[gi + DIST_SIZE * 7 + (unsigned int)0] = tmp; break; } case 2: { dist[gi + DIST_SIZE * 8 + (unsigned int)0] = tmp; break; } } break; } case 3: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; const float tmp = buffer[idx]; unsigned int gi = getGlobalIdx(base_gx + gx, 1); switch (dist_num) { case 0: { dist[gi + DIST_SIZE * 2 + (unsigned int)0] = tmp; break; } case 1: { dist[gi + DIST_SIZE * 5 + (unsigned int)0] = tmp; break; } case 2: { dist[gi + DIST_SIZE * 6 + (unsigned int)0] = tmp; break; } } break; } } } __global__ void CollectSparseData( unsigned int *idx_array, float *dist, float *buffer, int max_idx) { int idx = get_global_id(0); if (idx >= max_idx) { return; } unsigned int gi = idx_array[idx]; if (gi == INVALID_NODE) return; if (gi >= DIST_SIZE * 9) { printf("invalid node index detected in sparse coll %d (%d, %d)\n", gi, get_global_id(0), get_global_id(1)); return; } buffer[idx] = dist[gi]; } __global__ void DistributeSparseData( unsigned int *idx_array, float *dist, float *buffer, int max_idx) { int idx = get_global_id(0); if (idx >= max_idx) { return; } unsigned int gi = idx_array[idx]; if (gi == INVALID_NODE) return; if (gi >= DIST_SIZE * 9) { printf("invalid node index detected in sparse dist %d (%d, %d)\n", gi, get_global_id(0), get_global_id(1)); return; } dist[gi] = buffer[idx]; } __global__ void CollectContinuousMacroData( float *field, int base_gx, int max_lx, int gy, float *buffer) { const int idx = get_global_id(0); if (idx >= max_lx) { return; } unsigned int gi = getGlobalIdx(base_gx + idx, gy); buffer[idx] = field[gi]; } __global__ void DistributeContinuousMacroData( float *field, int base_gx, int max_lx, int gy, float *buffer) { const int idx = get_global_id(0); if (idx >= max_lx) { return; } unsigned int gi = getGlobalIdx(base_gx + idx, gy); field[gi] = buffer[idx]; }
912677e53d16a5131e01e118a935857acb5e7b78.cu
__constant__ float tau0 = 3.5f; // relaxation time __constant__ float visc = 1.0f; // viscosity #define BLOCK_SIZE 64 #define DIST_SIZE 74304u #define OPTION_SAVE_MACRO_FIELDS 1 #define OPTION_BULK 2 #define INVALID_NODE 0xffffffff #define DT 1.0f #include <stdio.h> // Additional geometry parameters (velocities, pressures, etc) __constant__ float node_params[2] = { 1.00000000000000005551e-01f , 0.00000000000000000000e+00f , }; // OpenCL compatibility code. __device__ int inline get_local_size(int i) { if (i == 0) { return blockDim.x; } else { return blockDim.y; } } __device__ int inline get_global_size(int i) { if (i == 0) { return blockDim.x * gridDim.x; } else { return blockDim.y * gridDim.y; } } __device__ int inline get_group_id(int i) { if (i == 0) { return blockIdx.x; } else { return blockIdx.y; } } __device__ int inline get_local_id(int i) { if (i == 0) { return threadIdx.x; } else { return threadIdx.y; } } __device__ int inline get_global_id(int i) { if (i == 0) { return threadIdx.x + blockIdx.x * blockDim.x; } else { return threadIdx.y + blockIdx.y * blockDim.y; } } typedef struct Dist { float fC; float fE; float fN; float fW; float fS; float fNE; float fNW; float fSW; float fSE; } Dist; // Functions for checking whether a node is of a given specific type. __device__ inline bool is_NTFluid(unsigned int type) { return type == 1; } __device__ inline bool isNTFullBBWall(unsigned int type) { return type == 2; } __device__ inline bool is_NTGhost(unsigned int type) { return type == 4; } __device__ inline bool isNTRegularizedVelocity(unsigned int type) { return type == 3; } // Returns true is the node does not require any special processing // to calculate macroscopic fields. __device__ inline bool NTUsesStandardMacro(unsigned int type) { return (false || is_NTFluid(type) || isNTFullBBWall(type) ); } // Wet nodes are nodes that undergo a standard collision procedure. __device__ inline bool isWetNode(unsigned int type) { return (false || is_NTFluid(type) || isNTRegularizedVelocity(type) ); } // Wet nodes are nodes that undergo a standard collision procedure. __device__ inline bool isExcludedNode(unsigned int type) { return (false || is_NTGhost(type) ); } __device__ inline bool isPropagationOnly(unsigned int type) { return (false ); } // Internal helper, do not use directly. __device__ inline void _storeNodeScratchSpace(unsigned int scratch_id, unsigned int num_values, float *buffer, float *g_buffer) { for (int i = 0; i < num_values; i++) { g_buffer[i + scratch_id * num_values] = buffer[i]; } } // Internal helper, do not use directly. __device__ inline void _loadNodeScratchSpace(unsigned int scratch_id, unsigned int num_values, float *g_buffer, float *buffer) { for (int i = 0; i < num_values; i++) { buffer[i] = g_buffer[i + scratch_id * num_values]; } } // Reads values from node scratch space (in global memory) into a local buffer. // // scratch_id: scratch space ID for nodes of type 'type' // type: node type // g_buffer: pointer to a buffer in the global memory used for scratch // space // buffer: pointer to a local buffer where the values will be saved __device__ inline void loadNodeScratchSpace(unsigned int scratch_id, unsigned int type, float *g_buffer, float* buffer) { switch (type) { } } // Stores values from a local buffer into the node scratch space in global memory. // // Arguments: see loadNodeScratchSpace __device__ inline void storeNodeScratchSpace(unsigned int scratch_id, unsigned int type, float* buffer, float* g_buffer) { switch (type) { } } __device__ inline unsigned int decodeNodeType(unsigned int nodetype) { return nodetype & 7; } __device__ inline unsigned int decodeNodeOrientation(unsigned int nodetype) { return nodetype >> 5; } // Returns the node's scratch ID, to be passed to (load,store)NodeScratchSpace as scratch_id. __device__ inline unsigned int decodeNodeScratchId(unsigned int nodetype) { return (nodetype >> 5) & 0; } __device__ inline unsigned int decodeNodeParamIdx(unsigned int nodetype) { return (nodetype >> 3) & 3; } __device__ inline unsigned int getGlobalIdx(int gx, int gy) { return gx + 288 * gy; } __device__ inline void decodeGlobalIdx(unsigned int gi, int *gx, int *gy) { *gx = gi % 288; *gy = gi / 288; } __device__ void die(void) { asm("trap;"); } __device__ void checkInvalidValues(Dist* d, int gx, int gy ) { bool valid = true; if (!isfinite(d->fC)) { valid = false; printf("ERR(subdomain=0): Invalid value of fC (%f) at: " "(%d, %d)" "\n", d->fC, gx, gy ); } if (!isfinite(d->fE)) { valid = false; printf("ERR(subdomain=0): Invalid value of fE (%f) at: " "(%d, %d)" "\n", d->fE, gx, gy ); } if (!isfinite(d->fN)) { valid = false; printf("ERR(subdomain=0): Invalid value of fN (%f) at: " "(%d, %d)" "\n", d->fN, gx, gy ); } if (!isfinite(d->fW)) { valid = false; printf("ERR(subdomain=0): Invalid value of fW (%f) at: " "(%d, %d)" "\n", d->fW, gx, gy ); } if (!isfinite(d->fS)) { valid = false; printf("ERR(subdomain=0): Invalid value of fS (%f) at: " "(%d, %d)" "\n", d->fS, gx, gy ); } if (!isfinite(d->fNE)) { valid = false; printf("ERR(subdomain=0): Invalid value of fNE (%f) at: " "(%d, %d)" "\n", d->fNE, gx, gy ); } if (!isfinite(d->fNW)) { valid = false; printf("ERR(subdomain=0): Invalid value of fNW (%f) at: " "(%d, %d)" "\n", d->fNW, gx, gy ); } if (!isfinite(d->fSW)) { valid = false; printf("ERR(subdomain=0): Invalid value of fSW (%f) at: " "(%d, %d)" "\n", d->fSW, gx, gy ); } if (!isfinite(d->fSE)) { valid = false; printf("ERR(subdomain=0): Invalid value of fSE (%f) at: " "(%d, %d)" "\n", d->fSE, gx, gy ); } if (!valid) { die(); } } // Load the distributions from din to dout, for the node with the index 'idx'. // Performs propagation when reading distributions from global memory. // This implements the propagate-on-read scheme. // Implements the propagate-on-read scheme for the AA access pattern, where the // distributions are not located in their natural slots, but the opposite ones // (e.g. fNE is located where fSW normally is). This ensures that within a single // timestep, the distributions are read from and written to the exact same places // in global memory. __device__ inline void getDist( Dist *dout, const float *__restrict__ din, unsigned int gi ) { dout->fC = din[gi + DIST_SIZE * 0 + (unsigned int)0]; dout->fE = din[gi + DIST_SIZE * 1 + (unsigned int)0]; dout->fN = din[gi + DIST_SIZE * 2 + (unsigned int)0]; dout->fW = din[gi + DIST_SIZE * 3 + (unsigned int)0]; dout->fS = din[gi + DIST_SIZE * 4 + (unsigned int)0]; dout->fNE = din[gi + DIST_SIZE * 5 + (unsigned int)0]; dout->fNW = din[gi + DIST_SIZE * 6 + (unsigned int)0]; dout->fSW = din[gi + DIST_SIZE * 7 + (unsigned int)0]; dout->fSE = din[gi + DIST_SIZE * 8 + (unsigned int)0]; } // Returns a node parameter which is a vector (in 'out'). __device__ inline void node_param_get_vector(const int idx, float *out ) { out[0] = node_params[idx]; out[1] = node_params[idx + 1]; } // Returns a node parameter which is a scalar. __device__ inline float node_param_get_scalar(const int idx ) { return node_params[idx]; } // Add comments for the Guo density implementation. __device__ inline void bounce_back(Dist *fi) { float t; t = fi->fE; fi->fE = fi->fW; fi->fW = t; t = fi->fN; fi->fN = fi->fS; fi->fS = t; t = fi->fNE; fi->fNE = fi->fSW; fi->fSW = t; t = fi->fNW; fi->fNW = fi->fSE; fi->fSE = t; } // Compute the 0th moment of the distributions, i.e. density. __device__ inline void compute_0th_moment(Dist *fi, float *out) { *out = fi->fC + fi->fE + fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW + fi->fW; } // Compute the 1st moments of the distributions, i.e. momentum. __device__ inline void compute_1st_moment(Dist *fi, float *out, int add, float factor) { if (add) { out[0] += factor * ( fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW ); out[1] += factor * ( fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW ); } else { out[0] = factor * ( fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW ); out[1] = factor * ( fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW ); } } // Compute the 2nd moments of the distributions. Order of components is: // 2D: xx, xy, yy // 3D: xx, xy, xz, yy, yz, zz __device__ inline void compute_2nd_moment(Dist *fi, float *out) { out[0] = fi->fE + fi->fNE + fi->fNW + fi->fSE + fi->fSW + fi->fW ; out[1] = fi->fNE - fi->fNW - fi->fSE + fi->fSW ; out[2] = fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW ; } // Computes the 2nd moment of the non-equilibrium distribution function // given the full distribution fuction 'fi'. __device__ inline void compute_noneq_2nd_moment(Dist* fi, const float rho, float *v0, float *out) { out[0] = fi->fE + fi->fNE + fi->fNW + fi->fSE + fi->fSW + fi->fW - rho*((v0[0]*v0[0]) + 1.0f* (1.0f / 3.0f)) ; out[1] = fi->fNE - fi->fNW - fi->fSE + fi->fSW - rho*v0[0]*v0[1] ; out[2] = fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW - rho*((v0[1]*v0[1]) + 1.0f* (1.0f / 3.0f)) ; } // Compute the 1st moments of the distributions and divide it by the 0-th moment // i.e. compute velocity. __device__ inline void compute_1st_div_0th(Dist *fi, float *out, float zero) { out[0] = (fi->fE + fi->fNE - fi->fNW + fi->fSE - fi->fSW - fi->fW)/zero ; out[1] = (fi->fN + fi->fNE + fi->fNW - fi->fS - fi->fSE - fi->fSW)/zero ; } __device__ inline void compute_macro_quant(Dist *fi, float *rho, float *v) { compute_0th_moment(fi, rho); compute_1st_div_0th(fi, v, *rho); } __device__ inline void get0thMoment(Dist *fi, int node_type, int orientation, float *out) { compute_0th_moment(fi, out); } // Common code for the equilibrium and Zou-He density boundary conditions. // // Get macroscopic density rho and velocity v given a distribution fi, and // the node class node_type. // __device__ inline void getMacro( Dist *fi, int ncode, int node_type, int orientation, float *rho, float *v0 ) { if (NTUsesStandardMacro(node_type) || orientation == 0) { compute_macro_quant(fi, rho, v0); } else if (isNTRegularizedVelocity(node_type)) { int node_param_idx = decodeNodeParamIdx(ncode); // We're dealing with a boundary node, for which some of the distributions // might be meaningless. Fill them with the values of the opposite // distributions. switch (orientation) { case 1: { // fE is undefined. fi->fE = fi->fW; // fNE is undefined. fi->fNE = fi->fSW; // fSE is undefined. fi->fSE = fi->fNW; break; } case 2: { // fN is undefined. fi->fN = fi->fS; // fNE is undefined. fi->fNE = fi->fSW; // fNW is undefined. fi->fNW = fi->fSE; break; } case 3: { // fW is undefined. fi->fW = fi->fE; // fNW is undefined. fi->fNW = fi->fSE; // fSW is undefined. fi->fSW = fi->fNE; break; } case 4: { // fS is undefined. fi->fS = fi->fN; // fSW is undefined. fi->fSW = fi->fNE; // fSE is undefined. fi->fSE = fi->fNW; break; } } *rho = fi->fC + fi->fE + fi->fN + fi->fNE + fi->fNW + fi->fS + fi->fSE + fi->fSW + fi->fW; node_param_get_vector(node_param_idx, v0 ); switch (orientation) { case 1: *rho = (*rho)/(-v0[0] + 1.0f) ; break; case 2: *rho = (*rho)/(-v0[1] + 1.0f) ; break; case 3: *rho = (*rho)/(v0[0] + 1.0f) ; break; case 4: *rho = (*rho)/(v0[1] + 1.0f) ; break; } } } // Uses extrapolation/other schemes to compute missing distributions for some implementations // of boundary condtitions. __device__ inline void fixMissingDistributions( Dist *fi, float *dist_in, int ncode, int node_type, int orientation, unsigned int gi, float *__restrict__ ivx, float *__restrict__ ivy, float *gg0m0 ) { if (0) {} } // TODO: Check whether it is more efficient to actually recompute // node_type and orientation instead of passing them as variables. __device__ inline void postcollisionBoundaryConditions( Dist *fi, int ncode, int node_type, int orientation, float *rho, float *v0, unsigned int gi, float *dist_out ) { if (0) {} } __device__ inline void precollisionBoundaryConditions(Dist *fi, int ncode, int node_type, int orientation, float *rho, float *v0 ) { if (0) {} else if (isNTFullBBWall(node_type)) { bounce_back(fi); } else if (0 || isNTRegularizedVelocity(node_type) ) { // Bounce-back of the non-equilibrium parts. switch (orientation) { case 1: fi->fE = fi->fW + (2.0f* (1.0f / 3.0f))*(*rho)*v0[0] ; fi->fNE = fi->fSW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; fi->fSE = fi->fNW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; break; case 2: fi->fN = fi->fS + (2.0f* (1.0f / 3.0f))*(*rho)*v0[1] ; fi->fNE = fi->fSW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; fi->fNW = fi->fSE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; break; case 3: fi->fW = fi->fE - 2.0f* (1.0f / 3.0f)*(*rho)*v0[0] ; fi->fNW = fi->fSE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] + (1.0f* (1.0f * (1.0f / 6.0f)))*v0[1]) ; fi->fSW = fi->fNE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; break; case 4: fi->fS = fi->fN - 2.0f* (1.0f / 3.0f)*(*rho)*v0[1] ; fi->fSW = fi->fNE + (*rho)*(-1.0f* (1.0f * (1.0f / 6.0f))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; fi->fSE = fi->fNW + (*rho)*((1.0f* (1.0f * (1.0f / 6.0f)))*v0[0] - 1.0f* (1.0f * (1.0f / 6.0f))*v0[1]) ; break; case 0: bounce_back(fi); return; } float flux[3]; compute_noneq_2nd_moment(fi, *rho, v0, flux); fi->fC = max(1e-7f, (4.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*(*rho) + -2.0f* (1.0f / 3.0f)*flux[0] - 2.0f* (1.0f / 3.0f)*flux[2] ); fi->fE = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*(*rho) + (1.0f* (1.0f / 3.0f))*flux[0] - 1.0f* (1.0f / 6.0f)*flux[2] ); fi->fN = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*(*rho) + -1.0f* (1.0f / 6.0f)*flux[0] + (1.0f* (1.0f / 3.0f))*flux[2] ); fi->fW = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*(*rho) + (1.0f* (1.0f / 3.0f))*flux[0] - 1.0f* (1.0f / 6.0f)*flux[2] ); fi->fS = max(1e-7f, (1.0f* (1.0f / 9.0f))*(*rho)*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*(*rho) + -1.0f* (1.0f / 6.0f)*flux[0] + (1.0f* (1.0f / 3.0f))*flux[2] ); fi->fNE = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] + (1.0f* (1.0f / 4.0f))*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); fi->fNW = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] - 1.0f* (1.0f / 4.0f)*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); fi->fSW = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] + (1.0f* (1.0f / 4.0f))*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); fi->fSE = max(1e-7f, (1.0f* (1.0f / 36.0f))*(*rho)*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*(*rho) + (1.0f* (1.0f / 12.0f))*flux[0] - 1.0f* (1.0f / 4.0f)*flux[1] + (1.0f* (1.0f / 12.0f))*flux[2] ); } } // // Performs the relaxation step in the BGK model given the density rho, // the velocity v and the distribution fi. __device__ inline void BGK_relaxate0( float rho, float *iv0 , Dist *d0, int node_type, int ncode ) { float v0[2]; Dist feq0; v0[0] = iv0[0]; v0[1] = iv0[1]; ; feq0.fC = (4.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*rho ; feq0.fE = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fN = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fW = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fS = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; feq0.fNE = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; feq0.fNW = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; feq0.fSW = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; feq0.fSE = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; float omega = 2.85714285714285698425e-01f ; d0->fC += omega * (feq0.fC - d0->fC); d0->fE += omega * (feq0.fE - d0->fE); d0->fN += omega * (feq0.fN - d0->fN); d0->fW += omega * (feq0.fW - d0->fW); d0->fS += omega * (feq0.fS - d0->fS); d0->fNE += omega * (feq0.fNE - d0->fNE); d0->fNW += omega * (feq0.fNW - d0->fNW); d0->fSW += omega * (feq0.fSW - d0->fSW); d0->fSE += omega * (feq0.fSE - d0->fSE); // FIXME: This should be moved to postcollision boundary conditions. } // A kernel to set the node distributions using the equilibrium distributions // and the macroscopic fields. __global__ void SetInitialConditions( float *dist1_in, float *__restrict__ ivx, float *__restrict__ ivy, const float *__restrict__ irho, const int *__restrict__ map ) { int lx = get_local_id(0); // ID inside the current block int gx = get_global_id(0); int gy = get_group_id(1); unsigned int gi = getGlobalIdx(gx, gy); // Nothing to do if we're outside of the simulation domain. if (gx > 257) { return; } // Cache macroscopic fields in local variables. float rho = irho[gi] ; float v0[2]; v0[0] = ivx[gi]; v0[1] = ivy[gi]; dist1_in[gi + (0u + (unsigned int)(0 + 0))] = (4.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (4.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (74304u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] + 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (148608u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (222912u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(v0[0]*(3.0f*v0[0] - 3.0f) - 3.0f* (1.0f * (1.0f / 2.0f))*(v0[1]*v0[1])) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (297216u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 9.0f))*rho*(-3.0f* (1.0f * (1.0f / 2.0f))*(v0[0]*v0[0]) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 9.0f))*rho ; dist1_in[gi + (371520u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; dist1_in[gi + (445824u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] + 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; dist1_in[gi + (520128u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] + 9.0f*v0[1] - 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; dist1_in[gi + (594432u + (unsigned int)(0 + 0))] = (1.0f* (1.0f / 36.0f))*rho*(v0[0]*(3.0f*v0[0] - 9.0f*v0[1] + 3.0f) + v0[1]*(3.0f*v0[1] - 3.0f)) + (1.0f* (1.0f / 36.0f))*rho ; } __global__ void PrepareMacroFields( const int *__restrict__ map, const float *__restrict__ dist_in, float *orho, int options ) { int lx = get_local_id(0); // ID inside the current block int gx = get_global_id(0); int gy = get_group_id(1); unsigned int gi = getGlobalIdx(gx, gy); // Nothing to do if we're outside of the simulation domain. if (gx > 257) { return; } int ncode = map[gi]; int type = decodeNodeType(ncode); // Unused nodes do not participate in the simulation. if (isExcludedNode(type) || isPropagationOnly(type)) return; int orientation = decodeNodeOrientation(ncode); Dist fi; float out; getDist( &fi, dist_in, gi ); get0thMoment(&fi, type, orientation, &out); orho[gi] = out; } __global__ void CollideAndPropagate( const int *__restrict__ map, float *__restrict__ dist_in, float *__restrict__ dist_out, float *__restrict__ gg0m0, float *__restrict__ ovx, float *__restrict__ ovy, int options ) { int lx = get_local_id(0); // ID inside the current block int gx = get_global_id(0); int gy = get_group_id(1); unsigned int gi = getGlobalIdx(gx, gy); // Nothing to do if we're outside of the simulation domain. if (gx > 257) { return; } // Shared variables for in-block propagation __shared__ float prop_fE[BLOCK_SIZE]; __shared__ float prop_fNE[BLOCK_SIZE]; __shared__ float prop_fSE[BLOCK_SIZE]; #define prop_fW prop_fE #define prop_fSW prop_fNE #define prop_fNW prop_fSE int ncode = map[gi]; int type = decodeNodeType(ncode); // Unused nodes do not participate in the simulation. if (isExcludedNode(type)) { return; } int orientation = decodeNodeOrientation(ncode); // Cache the distributions in local variables Dist d0; if (!isPropagationOnly(type) ) { getDist( &d0, dist_in, gi ); fixMissingDistributions(&d0, dist_in, ncode, type, orientation, gi, ovx, ovy , gg0m0 ); // Macroscopic quantities for the current cell float g0m0, v[2]; getMacro(&d0, ncode, type, orientation, &g0m0, v ); precollisionBoundaryConditions(&d0, ncode, type, orientation, &g0m0, v ); if (isWetNode(type)) { BGK_relaxate0( g0m0, v , &d0, type, ncode ); } postcollisionBoundaryConditions(&d0, ncode, type, orientation, &g0m0, v, gi, dist_out ); if (isWetNode(type) ) { checkInvalidValues(&d0, gx, gy ); } // Only save the macroscopic quantities if requested to do so. if ((options & OPTION_SAVE_MACRO_FIELDS) && isWetNode(type) ) { gg0m0[gi] = g0m0 ; ovx[gi] = v[0]; ovy[gi] = v[1]; ; } } // propagation only const bool propagation_only = isPropagationOnly(type); // Initialize the shared array with invalid sentinel values. If the sentinel // value is not subsequently overridden, it will not be propagated. prop_fE[lx] = -1.0f; __syncthreads(); if (!propagation_only ) { // Update the 0-th direction distribution dist_out[gi] = d0.fC; // Propagation in directions orthogonal to the X axis (global memory) { if (gy < 257) { dist_out[gi + (148608u + (unsigned int)(0 + 288))] = d0.fN; } } { if (gy > 0) { dist_out[gi + (297216u + (unsigned int)(0 + -288))] = d0.fS; } } // E propagation in shared memory if (gx < 257) { // Note: propagation to ghost nodes is done directly in global memory as there // are no threads running for the ghost nodes. if (lx < 63 && gx != 256) { prop_fE[lx+1] = d0.fE; prop_fNE[lx+1] = d0.fNE; prop_fSE[lx+1] = d0.fSE; // E propagation in global memory (at right block boundary) } else { { dist_out[gi + (74304u + (unsigned int)(0 + 1))] = d0.fE; } { if (gy < 257) { dist_out[gi + (371520u + (unsigned int)(0 + 289))] = d0.fNE; } } { if (gy > 0) { dist_out[gi + (594432u + (unsigned int)(0 + -287))] = d0.fSE; } } } } } __syncthreads(); // Save locally propagated distributions into global memory. // The leftmost thread is not updated in this block. if (lx > 0 && gx < 258 && !propagation_only ) if (prop_fE[lx] != -1.0f) { dist_out[gi + (74304u + (unsigned int)(0 + 0))] = prop_fE[lx]; if (gy < 257) { dist_out[gi + (371520u + (unsigned int)(0 + 288))] = prop_fNE[lx]; } if (gy > 0) { dist_out[gi + (594432u + (unsigned int)(0 + -288))] = prop_fSE[lx]; } } __syncthreads(); // Refill the propagation buffer with sentinel values. prop_fE[lx] = -1.0f; __syncthreads(); if (!propagation_only ) { // W propagation in shared memory // Note: propagation to ghost nodes is done directly in global memory as there // are no threads running for the ghost nodes. if ((lx > 1 || (lx > 0 && gx >= 64)) && !propagation_only) { prop_fW[lx-1] = d0.fW; prop_fNW[lx-1] = d0.fNW; prop_fSW[lx-1] = d0.fSW; // W propagation in global memory (at left block boundary) } else if (gx > 0) { { dist_out[gi + (222912u + (unsigned int)(0 + -1))] = d0.fW; } { if (gy < 257) { dist_out[gi + (445824u + (unsigned int)(0 + 287))] = d0.fNW; } } { if (gy > 0) { dist_out[gi + (520128u + (unsigned int)(0 + -289))] = d0.fSW; } } } } __syncthreads(); // The rightmost thread is not updated in this block. if (lx < 63 && gx < 257 && !propagation_only ) if (prop_fE[lx] != -1.0f) { dist_out[gi + (222912u + (unsigned int)(0 + 0))] = prop_fW[lx]; if (gy < 257) { dist_out[gi + (445824u + (unsigned int)(0 + 288))] = prop_fNW[lx]; } if (gy > 0) { dist_out[gi + (520128u + (unsigned int)(0 + -288))] = prop_fSW[lx]; } } } // Copies momentum transfer for a force object into a linear buffer // so that a force can be computed easily via a sum reduction. // TODO(michalj): Fuse this with summation to improve performance. __global__ void ComputeForceObjects( const unsigned int *__restrict__ idx, const unsigned int *__restrict__ idx2, const float *__restrict__ dist, float *out, const unsigned int max_idx ) { const unsigned int gidx = get_global_id(0); if (gidx >= max_idx) { return; } const unsigned int gi = idx[gidx]; const unsigned int gi2 = idx2[gidx]; const float mx = dist[gi] + dist[gi2]; out[gidx] = mx; } // Applies periodic boundary conditions within a single subdomain. // dist: pointer to the distributions array // axis: along which axis the PBCs are to be applied (0:x, 1:y, 2:z) __global__ void ApplyPeriodicBoundaryConditions( float *dist, int axis) { const int idx1 = get_global_id(0); unsigned int gi_low, gi_high; // For single block PBC, the envelope size (width of the ghost node // layer) is always 1. // TODO(michalj): Generalize this for the case when envelope_size != 1. if (axis == 0) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(0, idx1); // ghost node gi_high = getGlobalIdx(256, idx1); // real node { // TODO(michalj): Generalize this for grids with e_i > 1. // Load distributions to be propagated from low idx to high idx. const float ffW = dist[gi_low + DIST_SIZE * 3 + (unsigned int)0]; const float ffNW = dist[gi_low + DIST_SIZE * 6 + (unsigned int)0]; const float ffSW = dist[gi_low + DIST_SIZE * 7 + (unsigned int)0]; if (gi_high != INVALID_NODE && isfinite(ffW)) { dist[gi_high + DIST_SIZE * 3 + (unsigned int)0] = ffW; } if (isfinite(ffNW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256) { dist[gi_high + DIST_SIZE * 6 + (unsigned int)0] = ffNW; } else { if (0) {} } } if (isfinite(ffSW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1) { dist[gi_high + DIST_SIZE * 7 + (unsigned int)0] = ffSW; } else { if (0) {} } } } // low to high { // Load distributrions to be propagated from high idx to low idx. const float ffE = dist[gi_high + DIST_SIZE * 1 + (unsigned int)1]; const float ffNE = dist[gi_high + DIST_SIZE * 5 + (unsigned int)1]; const float ffSE = dist[gi_high + DIST_SIZE * 8 + (unsigned int)1]; if (isfinite(ffE) && gi_low != INVALID_NODE) { dist[gi_low + DIST_SIZE * 1 + (unsigned int)1] = ffE; } if (isfinite(ffNE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256 ) { dist[gi_low + DIST_SIZE * 5 + (unsigned int)1] = ffNE; } else { if (0) {} } } if (isfinite(ffSE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1 ) { dist[gi_low + DIST_SIZE * 8 + (unsigned int)1] = ffSE; } else { if (0) {} } } } // high to low } else if (axis == 1) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(idx1, 0); // ghost node gi_high = getGlobalIdx(idx1, 256); // real node { // TODO(michalj): Generalize this for grids with e_i > 1. // Load distributions to be propagated from low idx to high idx. const float ffS = dist[gi_low + DIST_SIZE * 4 + (unsigned int)0]; const float ffSW = dist[gi_low + DIST_SIZE * 7 + (unsigned int)0]; const float ffSE = dist[gi_low + DIST_SIZE * 8 + (unsigned int)0]; if (gi_high != INVALID_NODE && isfinite(ffS)) { dist[gi_high + DIST_SIZE * 4 + (unsigned int)0] = ffS; } if (isfinite(ffSW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1) { dist[gi_high + DIST_SIZE * 7 + (unsigned int)0] = ffSW; } else { if (0) {} } } if (isfinite(ffSE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256) { dist[gi_high + DIST_SIZE * 8 + (unsigned int)0] = ffSE; } else { if (0) {} } } } // low to high { // Load distributrions to be propagated from high idx to low idx. const float ffN = dist[gi_high + DIST_SIZE * 2 + (unsigned int)288]; const float ffNE = dist[gi_high + DIST_SIZE * 5 + (unsigned int)288]; const float ffNW = dist[gi_high + DIST_SIZE * 6 + (unsigned int)288]; if (isfinite(ffN) && gi_low != INVALID_NODE) { dist[gi_low + DIST_SIZE * 2 + (unsigned int)288] = ffN; } if (isfinite(ffNE)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 > 1 && idx1 <= 256 ) { dist[gi_low + DIST_SIZE * 5 + (unsigned int)288] = ffNE; } else { if (0) {} } } if (isfinite(ffNW)) { // Skip distributions which are not populated or cross multiple boundaries. if (idx1 < 256 && idx1 >= 1 ) { dist[gi_low + DIST_SIZE * 6 + (unsigned int)288] = ffNW; } else { if (0) {} } } } // high to low } } // Applies periodic boundary conditions to a scalar field within a single subdomain. // dist: pointer to the array with the field data // axis: along which axis the PBCs are to be applied (0:x, 1:y, 2:z) __global__ void ApplyMacroPeriodicBoundaryConditions( float *field, int axis) { const int idx1 = get_global_id(0); unsigned int gi_low, gi_high; // TODO(michalj): Generalize this for the case when envelope_size != 1. if (axis == 0) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(0, idx1); // ghost node gi_high = getGlobalIdx(256, idx1); // real node if ( isfinite(field[gi_high])) { field[gi_low] = field[gi_high]; } gi_low = getGlobalIdx(1, idx1); // real node gi_high = getGlobalIdx(257, idx1); // ghost node if ( isfinite(field[gi_low])) { field[gi_high] = field[gi_low]; } } else if (axis == 1) { if (idx1 >= 258) { return; } gi_low = getGlobalIdx(idx1, 0); // ghost node gi_high = getGlobalIdx(idx1, 256); // real node if ( isfinite(field[gi_high])) { field[gi_low] = field[gi_high]; } gi_low = getGlobalIdx(idx1, 1); // real node gi_high = getGlobalIdx(idx1, 257); // ghost node if ( isfinite(field[gi_low])) { field[gi_high] = field[gi_low]; } } } // Collects ghost node data for connections along axes other than X. // dist: distributions array // base_gy: where along the X axis to start collecting the data // face: see LBBlock class constants // buffer: buffer where the data is to be saved __global__ void CollectContinuousData( float *dist, int face, int base_gx, int max_lx, float *buffer) { const int idx = get_global_id(0); float tmp; if (idx >= max_lx) { return; } switch (face) { case 2: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; unsigned int gi = getGlobalIdx(base_gx + gx, 0); switch (dist_num) { case 0: { tmp = dist[gi + DIST_SIZE * 4 + (unsigned int)0]; break; } case 1: { tmp = dist[gi + DIST_SIZE * 7 + (unsigned int)0]; break; } case 2: { tmp = dist[gi + DIST_SIZE * 8 + (unsigned int)0]; break; } } buffer[idx] = tmp; break; } case 3: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; unsigned int gi = getGlobalIdx(base_gx + gx, 257); switch (dist_num) { case 0: { tmp = dist[gi + DIST_SIZE * 2 + (unsigned int)0]; break; } case 1: { tmp = dist[gi + DIST_SIZE * 5 + (unsigned int)0]; break; } case 2: { tmp = dist[gi + DIST_SIZE * 6 + (unsigned int)0]; break; } } buffer[idx] = tmp; break; } } } __global__ void DistributeContinuousData( float *dist, int face, int base_gx, int max_lx, float *buffer) { const int idx = get_global_id(0); if (idx >= max_lx) { return; } switch (face) { case 2: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; const float tmp = buffer[idx]; unsigned int gi = getGlobalIdx(base_gx + gx, 256); switch (dist_num) { case 0: { dist[gi + DIST_SIZE * 4 + (unsigned int)0] = tmp; break; } case 1: { dist[gi + DIST_SIZE * 7 + (unsigned int)0] = tmp; break; } case 2: { dist[gi + DIST_SIZE * 8 + (unsigned int)0] = tmp; break; } } break; } case 3: { const int dist_size = max_lx / 3; const int dist_num = idx / dist_size; const int gx = idx % dist_size; const float tmp = buffer[idx]; unsigned int gi = getGlobalIdx(base_gx + gx, 1); switch (dist_num) { case 0: { dist[gi + DIST_SIZE * 2 + (unsigned int)0] = tmp; break; } case 1: { dist[gi + DIST_SIZE * 5 + (unsigned int)0] = tmp; break; } case 2: { dist[gi + DIST_SIZE * 6 + (unsigned int)0] = tmp; break; } } break; } } } __global__ void CollectSparseData( unsigned int *idx_array, float *dist, float *buffer, int max_idx) { int idx = get_global_id(0); if (idx >= max_idx) { return; } unsigned int gi = idx_array[idx]; if (gi == INVALID_NODE) return; if (gi >= DIST_SIZE * 9) { printf("invalid node index detected in sparse coll %d (%d, %d)\n", gi, get_global_id(0), get_global_id(1)); return; } buffer[idx] = dist[gi]; } __global__ void DistributeSparseData( unsigned int *idx_array, float *dist, float *buffer, int max_idx) { int idx = get_global_id(0); if (idx >= max_idx) { return; } unsigned int gi = idx_array[idx]; if (gi == INVALID_NODE) return; if (gi >= DIST_SIZE * 9) { printf("invalid node index detected in sparse dist %d (%d, %d)\n", gi, get_global_id(0), get_global_id(1)); return; } dist[gi] = buffer[idx]; } __global__ void CollectContinuousMacroData( float *field, int base_gx, int max_lx, int gy, float *buffer) { const int idx = get_global_id(0); if (idx >= max_lx) { return; } unsigned int gi = getGlobalIdx(base_gx + idx, gy); buffer[idx] = field[gi]; } __global__ void DistributeContinuousMacroData( float *field, int base_gx, int max_lx, int gy, float *buffer) { const int idx = get_global_id(0); if (idx >= max_lx) { return; } unsigned int gi = getGlobalIdx(base_gx + idx, gy); field[gi] = buffer[idx]; }
8f672784c777fb8092ce302a4bf7e6fcdf8c4a81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel_x2 [5][2]; static int dims_advec_mom_kernel_x2_h [5][2] = {0}; //user function __device__ inline void advec_mom_kernel_x2_gpu(ACC<double> &pre_vol, ACC<double> &post_vol, const ACC<double> &volume, const ACC<double> &vol_flux_y, const ACC<double> &vol_flux_z) { post_vol(0,0,0) = volume(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0); pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_y(0,1,0) - vol_flux_y(0,0,0); } __global__ void ops_advec_mom_kernel_x2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[0][0] * dims_advec_mom_kernel_x2[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[1][0] * dims_advec_mom_kernel_x2[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[2][0] * dims_advec_mom_kernel_x2[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[3][0] * dims_advec_mom_kernel_x2[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[4][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[4][0] * dims_advec_mom_kernel_x2[4][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel_x2[0][0], dims_advec_mom_kernel_x2[0][1], arg0); ACC<double> argp1(dims_advec_mom_kernel_x2[1][0], dims_advec_mom_kernel_x2[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel_x2[2][0], dims_advec_mom_kernel_x2[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel_x2[3][0], dims_advec_mom_kernel_x2[3][1], arg3); const ACC<double> argp4(dims_advec_mom_kernel_x2[4][0], dims_advec_mom_kernel_x2[4][1], arg4); advec_mom_kernel_x2_gpu(argp0, argp1, argp2, argp3, argp4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_x2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,123)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(123,"advec_mom_kernel_x2"); OPS_kernels[123].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != dims_advec_mom_kernel_x2_h[0][0] || ydim0 != dims_advec_mom_kernel_x2_h[0][1] || xdim1 != dims_advec_mom_kernel_x2_h[1][0] || ydim1 != dims_advec_mom_kernel_x2_h[1][1] || xdim2 != dims_advec_mom_kernel_x2_h[2][0] || ydim2 != dims_advec_mom_kernel_x2_h[2][1] || xdim3 != dims_advec_mom_kernel_x2_h[3][0] || ydim3 != dims_advec_mom_kernel_x2_h[3][1] || xdim4 != dims_advec_mom_kernel_x2_h[4][0] || ydim4 != dims_advec_mom_kernel_x2_h[4][1]) { dims_advec_mom_kernel_x2_h[0][0] = xdim0; dims_advec_mom_kernel_x2_h[0][1] = ydim0; dims_advec_mom_kernel_x2_h[1][0] = xdim1; dims_advec_mom_kernel_x2_h[1][1] = ydim1; dims_advec_mom_kernel_x2_h[2][0] = xdim2; dims_advec_mom_kernel_x2_h[2][1] = ydim2; dims_advec_mom_kernel_x2_h[3][0] = xdim3; dims_advec_mom_kernel_x2_h[3][1] = ydim3; dims_advec_mom_kernel_x2_h[4][0] = xdim4; dims_advec_mom_kernel_x2_h[4][1] = ydim4; cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel_x2, dims_advec_mom_kernel_x2_h, sizeof(dims_advec_mom_kernel_x2))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[123].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel_x2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[123].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[123].mpi_time += t2-t1; OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 123; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 123; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_x2_execute; if (OPS_diags > 1) { ops_timing_realloc(123,"advec_mom_kernel_x2"); } ops_enqueue_kernel(desc); } #endif
8f672784c777fb8092ce302a4bf7e6fcdf8c4a81.cu
// // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel_x2 [5][2]; static int dims_advec_mom_kernel_x2_h [5][2] = {0}; //user function __device__ inline void advec_mom_kernel_x2_gpu(ACC<double> &pre_vol, ACC<double> &post_vol, const ACC<double> &volume, const ACC<double> &vol_flux_y, const ACC<double> &vol_flux_z) { post_vol(0,0,0) = volume(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0); pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_y(0,1,0) - vol_flux_y(0,0,0); } __global__ void ops_advec_mom_kernel_x2( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[0][0] * dims_advec_mom_kernel_x2[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[1][0] * dims_advec_mom_kernel_x2[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[2][0] * dims_advec_mom_kernel_x2[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[3][0] * dims_advec_mom_kernel_x2[3][1]; arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[4][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[4][0] * dims_advec_mom_kernel_x2[4][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel_x2[0][0], dims_advec_mom_kernel_x2[0][1], arg0); ACC<double> argp1(dims_advec_mom_kernel_x2[1][0], dims_advec_mom_kernel_x2[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel_x2[2][0], dims_advec_mom_kernel_x2[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel_x2[3][0], dims_advec_mom_kernel_x2[3][1], arg3); const ACC<double> argp4(dims_advec_mom_kernel_x2[4][0], dims_advec_mom_kernel_x2[4][1], arg4); advec_mom_kernel_x2_gpu(argp0, argp1, argp2, argp3, argp4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_x2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,123)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(123,"advec_mom_kernel_x2"); OPS_kernels[123].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != dims_advec_mom_kernel_x2_h[0][0] || ydim0 != dims_advec_mom_kernel_x2_h[0][1] || xdim1 != dims_advec_mom_kernel_x2_h[1][0] || ydim1 != dims_advec_mom_kernel_x2_h[1][1] || xdim2 != dims_advec_mom_kernel_x2_h[2][0] || ydim2 != dims_advec_mom_kernel_x2_h[2][1] || xdim3 != dims_advec_mom_kernel_x2_h[3][0] || ydim3 != dims_advec_mom_kernel_x2_h[3][1] || xdim4 != dims_advec_mom_kernel_x2_h[4][0] || ydim4 != dims_advec_mom_kernel_x2_h[4][1]) { dims_advec_mom_kernel_x2_h[0][0] = xdim0; dims_advec_mom_kernel_x2_h[0][1] = ydim0; dims_advec_mom_kernel_x2_h[1][0] = xdim1; dims_advec_mom_kernel_x2_h[1][1] = ydim1; dims_advec_mom_kernel_x2_h[2][0] = xdim2; dims_advec_mom_kernel_x2_h[2][1] = ydim2; dims_advec_mom_kernel_x2_h[3][0] = xdim3; dims_advec_mom_kernel_x2_h[3][1] = ydim3; dims_advec_mom_kernel_x2_h[4][0] = xdim4; dims_advec_mom_kernel_x2_h[4][1] = ydim4; cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel_x2, dims_advec_mom_kernel_x2_h, sizeof(dims_advec_mom_kernel_x2))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[123].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel_x2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[123].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[123].mpi_time += t2-t1; OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[123].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 123; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 123; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_x2_execute; if (OPS_diags > 1) { ops_timing_realloc(123,"advec_mom_kernel_x2"); } ops_enqueue_kernel(desc); } #endif
c61452361298922ff986ab46876950d08ad65429.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <stdlib.h> #include "lodepng.h" __global__ void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int BLUR_SIZE = 25, new_pos; if((y < n) && (x < m)) { int pixValR=0, pixValB=0,pixValG=0, pixels = 0; int blurRow, blurCol; for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){ for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){ int curRow = y + blurRow; int curCol = x + blurCol; new_pos = (curRow*m+curCol)*4; if(curRow > -1 && curRow < n && curCol > -1 && curCol < m){ pixValR += d_Pin[new_pos]; pixValG += d_Pin[new_pos+1]; pixValB += d_Pin[new_pos+2]; pixels++; } } new_pos = (y*m+x)*4; d_Pout[new_pos] = (unsigned char)(pixValR/pixels); d_Pout[new_pos+1] = (unsigned char)(pixValG/pixels); d_Pout[new_pos+2] = (unsigned char)(pixValB/pixels); d_Pout[new_pos+3] = d_Pin[new_pos+3]; } } } __global__ void PictureKernel1D(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int x = blockIdx.x * blockDim.x + threadIdx.x; int pixValR = 0, pixValG = 0, pixValB = 0; int BLUR_SIZE = 100, blurRow, blurCol; //x = x*4; if(x < n*m) { int pixels=0; for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){ for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){ int curX = blurCol + x; int new_x = (blurRow*m + curX)*4; if(curX > -1 && curX < n*m*4 && new_x > -1 && new_x < n*m*4){ pixValR += d_Pin[new_x]; pixValG += d_Pin[new_x+1]; pixValB += d_Pin[new_x+2]; pixels++; } } d_Pout[x*4] = (unsigned char)(pixValR/pixels); d_Pout[x*4+1] = (unsigned char)(pixValG/pixels); d_Pout[x*4+2] = (unsigned char)(pixValB/pixels); d_Pout[x*4+3] = d_Pin[x*4+3]; } } } void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){ unsigned char* d_Pout, *d_Pin; long int size = n*m*4; hipMalloc((void **) &d_Pin,size); hipMemcpy(d_Pin, Pin, size, hipMemcpyHostToDevice); hipMalloc((void **) &d_Pout,size); dim3 gridDim((m-1)/8+1,(n-1)/16+1,1); dim3 blockDim(8,16,1); hipLaunchKernelGGL(( PictureKernell), dim3(gridDim),dim3(blockDim), 0, 0, d_Pin,d_Pout,n,m); //PictureKernel1D<<<(size-1)/256+1,256>>>(d_Pin,d_Pout,n,m); hipMemcpy(Pout, d_Pout, size, hipMemcpyDeviceToHost); hipFree(d_Pin); hipFree(d_Pout); } int main(){ unsigned char *image, *out_image; int i; char name_in[100], name_out[100]; unsigned width, height; scanf("%s %s", name_in, name_out); i = lodepng_decode32_file(&image, &width, &height, name_in); if(i < 0) printf("NO\n"); out_image = (unsigned char*) malloc(width*height*4); /*for(i = 0; i < (width * height)*4; i++){ if(i%4==0) image[i] = 0; if(i%4==1) image[i] = 255; if(i%4==3) image[i] = 120; }*/ Picture(image,out_image,height,width); lodepng_encode32_file(name_out,out_image,width,height); free(image); free(out_image); return 0; }
c61452361298922ff986ab46876950d08ad65429.cu
#include <stdio.h> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <stdlib.h> #include "lodepng.h" __global__ void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int BLUR_SIZE = 25, new_pos; if((y < n) && (x < m)) { int pixValR=0, pixValB=0,pixValG=0, pixels = 0; int blurRow, blurCol; for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){ for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){ int curRow = y + blurRow; int curCol = x + blurCol; new_pos = (curRow*m+curCol)*4; if(curRow > -1 && curRow < n && curCol > -1 && curCol < m){ pixValR += d_Pin[new_pos]; pixValG += d_Pin[new_pos+1]; pixValB += d_Pin[new_pos+2]; pixels++; } } new_pos = (y*m+x)*4; d_Pout[new_pos] = (unsigned char)(pixValR/pixels); d_Pout[new_pos+1] = (unsigned char)(pixValG/pixels); d_Pout[new_pos+2] = (unsigned char)(pixValB/pixels); d_Pout[new_pos+3] = d_Pin[new_pos+3]; } } } __global__ void PictureKernel1D(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){ int x = blockIdx.x * blockDim.x + threadIdx.x; int pixValR = 0, pixValG = 0, pixValB = 0; int BLUR_SIZE = 100, blurRow, blurCol; //x = x*4; if(x < n*m) { int pixels=0; for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){ for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){ int curX = blurCol + x; int new_x = (blurRow*m + curX)*4; if(curX > -1 && curX < n*m*4 && new_x > -1 && new_x < n*m*4){ pixValR += d_Pin[new_x]; pixValG += d_Pin[new_x+1]; pixValB += d_Pin[new_x+2]; pixels++; } } d_Pout[x*4] = (unsigned char)(pixValR/pixels); d_Pout[x*4+1] = (unsigned char)(pixValG/pixels); d_Pout[x*4+2] = (unsigned char)(pixValB/pixels); d_Pout[x*4+3] = d_Pin[x*4+3]; } } } void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){ unsigned char* d_Pout, *d_Pin; long int size = n*m*4; cudaMalloc((void **) &d_Pin,size); cudaMemcpy(d_Pin, Pin, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_Pout,size); dim3 gridDim((m-1)/8+1,(n-1)/16+1,1); dim3 blockDim(8,16,1); PictureKernell<<<gridDim,blockDim>>>(d_Pin,d_Pout,n,m); //PictureKernel1D<<<(size-1)/256+1,256>>>(d_Pin,d_Pout,n,m); cudaMemcpy(Pout, d_Pout, size, cudaMemcpyDeviceToHost); cudaFree(d_Pin); cudaFree(d_Pout); } int main(){ unsigned char *image, *out_image; int i; char name_in[100], name_out[100]; unsigned width, height; scanf("%s %s", name_in, name_out); i = lodepng_decode32_file(&image, &width, &height, name_in); if(i < 0) printf("NO\n"); out_image = (unsigned char*) malloc(width*height*4); /*for(i = 0; i < (width * height)*4; i++){ if(i%4==0) image[i] = 0; if(i%4==1) image[i] = 255; if(i%4==3) image[i] = 120; }*/ Picture(image,out_image,height,width); lodepng_encode32_file(name_out,out_image,width,height); free(image); free(out_image); return 0; }
0cb444faf642952b59e38e143e686ecefd5853b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 30-May-2011 21:57:40 // // user function __device__ #include "res.h" // CUDA kernel function __global__ void op_cuda_res( float *ind_arg0, int *ind_arg0_maps, float *ind_arg1, int *ind_arg1_maps, float *arg0, short *arg1_maps, short *arg2_maps, const float *arg3, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors) { float arg2_l[1]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ float *ind_arg0_s; __shared__ float *ind_arg1_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*2]; ind_arg1_size = ind_arg_sizes[1+blockId*2]; ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*2]; ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*2]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*1); ind_arg1_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*1; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%1+ind_arg0_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg1_size*1; n+=blockDim.x) ind_arg1_s[n] = ZERO_float; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<1; d++) arg2_l[d] = ZERO_float; // user-supplied kernel call res( arg0+(n+offset_b)*1, ind_arg0_s+arg1_maps[n+offset_b]*1, arg2_l, arg3 ); col2 = colors[n+offset_b]; } // store local variables int arg2_map = arg2_maps[n+offset_b]; for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<1; d++) ind_arg1_s[d+arg2_map*1] += arg2_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg1_size*1; n+=blockDim.x) ind_arg1[n%1+ind_arg1_map[n/1]*1] += ind_arg1_s[n]; } // host stub function void op_par_loop_res(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3 ){ float *arg3h = (float *)arg3.data; int nargs = 4; op_arg args[4] = {arg0,arg1,arg2,arg3}; int ninds = 2; int inds[4] = {-1,0,1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: res \n"); } // get plan #ifdef OP_PART_SIZE_0 int part_size = OP_PART_SIZE_0; #else int part_size = OP_part_size; #endif op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(float)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg3.data = OP_consts_h + consts_bytes; arg3.data_d = OP_consts_d + consts_bytes; for (int d=0; d<1; d++) ((float *)arg3.data)[d] = arg3h[d]; consts_bytes += ROUND_UP(1*sizeof(float)); mvConstArraysToDevice(consts_bytes); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { #ifdef OP_BLOCK_SIZE_0 int nthread = OP_BLOCK_SIZE_0; #else int nthread = OP_block_size; #endif int nblocks = Plan->ncolblk[col]; int nshared = Plan->nshared; hipLaunchKernelGGL(( op_cuda_res), dim3(nblocks),dim3(nthread),nshared, 0, (float *)arg1.data_d, Plan->ind_maps[0], (float *)arg2.data_d, Plan->ind_maps[1], (float *)arg0.data_d, Plan->loc_maps[1], Plan->loc_maps[2], (float *)arg3.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_res execution failed\n"); block_offset += nblocks; } // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(0); OP_kernels[0].name = name; OP_kernels[0].count += 1; OP_kernels[0].time += wall_t2 - wall_t1; OP_kernels[0].transfer += Plan->transfer; OP_kernels[0].transfer2 += Plan->transfer2; }
0cb444faf642952b59e38e143e686ecefd5853b0.cu
// // auto-generated by op2.m on 30-May-2011 21:57:40 // // user function __device__ #include "res.h" // CUDA kernel function __global__ void op_cuda_res( float *ind_arg0, int *ind_arg0_maps, float *ind_arg1, int *ind_arg1_maps, float *arg0, short *arg1_maps, short *arg2_maps, const float *arg3, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors) { float arg2_l[1]; __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ int *ind_arg1_map, ind_arg1_size; __shared__ float *ind_arg0_s; __shared__ float *ind_arg1_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*2]; ind_arg1_size = ind_arg_sizes[1+blockId*2]; ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*2]; ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*2]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (float *) &shared[nbytes]; nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*1); ind_arg1_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*1; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%1+ind_arg0_map[n/1]*1]; for (int n=threadIdx.x; n<ind_arg1_size*1; n+=blockDim.x) ind_arg1_s[n] = ZERO_float; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) { int col2 = -1; if (n<nelem) { // initialise local variables for (int d=0; d<1; d++) arg2_l[d] = ZERO_float; // user-supplied kernel call res( arg0+(n+offset_b)*1, ind_arg0_s+arg1_maps[n+offset_b]*1, arg2_l, arg3 ); col2 = colors[n+offset_b]; } // store local variables int arg2_map = arg2_maps[n+offset_b]; for (int col=0; col<ncolor; col++) { if (col2==col) { for (int d=0; d<1; d++) ind_arg1_s[d+arg2_map*1] += arg2_l[d]; } __syncthreads(); } } // apply pointered write/increment for (int n=threadIdx.x; n<ind_arg1_size*1; n+=blockDim.x) ind_arg1[n%1+ind_arg1_map[n/1]*1] += ind_arg1_s[n]; } // host stub function void op_par_loop_res(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3 ){ float *arg3h = (float *)arg3.data; int nargs = 4; op_arg args[4] = {arg0,arg1,arg2,arg3}; int ninds = 2; int inds[4] = {-1,0,1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: res \n"); } // get plan #ifdef OP_PART_SIZE_0 int part_size = OP_PART_SIZE_0; #else int part_size = OP_part_size; #endif op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(float)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg3.data = OP_consts_h + consts_bytes; arg3.data_d = OP_consts_d + consts_bytes; for (int d=0; d<1; d++) ((float *)arg3.data)[d] = arg3h[d]; consts_bytes += ROUND_UP(1*sizeof(float)); mvConstArraysToDevice(consts_bytes); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { #ifdef OP_BLOCK_SIZE_0 int nthread = OP_BLOCK_SIZE_0; #else int nthread = OP_block_size; #endif int nblocks = Plan->ncolblk[col]; int nshared = Plan->nshared; op_cuda_res<<<nblocks,nthread,nshared>>>( (float *)arg1.data_d, Plan->ind_maps[0], (float *)arg2.data_d, Plan->ind_maps[1], (float *)arg0.data_d, Plan->loc_maps[1], Plan->loc_maps[2], (float *)arg3.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol); cutilSafeCall(cudaThreadSynchronize()); cutilCheckMsg("op_cuda_res execution failed\n"); block_offset += nblocks; } // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(0); OP_kernels[0].name = name; OP_kernels[0].count += 1; OP_kernels[0].time += wall_t2 - wall_t1; OP_kernels[0].transfer += Plan->transfer; OP_kernels[0].transfer2 += Plan->transfer2; }
a22f6d6255c44b5ac1eba1d27869888ff6b15cb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_align_kernel.h" // kernel #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void ROIAlignForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output // int n = index; // int pw = n % aligned_width; // n /= aligned_width; // int ph = n % aligned_height; // n /= aligned_height; // int c = n % channels; // n /= channels; // , n, c, pw, pw int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; // bottom_rois += n * 5; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { top_data[index] = 0.; } else { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; // +width, +1 int downright = downleft + 1; top_data[index] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio) + bottom_data[upright] * (1. - h_ratio) * w_ratio + bottom_data[downleft] * h_ratio * (1. - w_ratio) + bottom_data[downright] * h_ratio * w_ratio; } } } int ROIAlignForwardLaucher(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; hipError_t err; // cuda , https://blog.csdn.net/fishseeker/article/details/75093166 ... hipLaunchKernelGGL(( ROIAlignForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, bottom_data, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_rois, top_data); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ROIAlignBackward(const int nthreads, const float* top_diff, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; /* int roi_start_w = round(bottom_rois[1] * spatial_scale); */ /* int roi_start_h = round(bottom_rois[2] * spatial_scale); */ /* int roi_end_w = round(bottom_rois[3] * spatial_scale); */ /* int roi_end_h = round(bottom_rois[4] * spatial_scale); */ // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (!(h < 0 || h >= height || w < 0 || w >= width)) { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; atomicAdd(bottom_diff + upleft, top_diff[index] * (1. - h_ratio) * (1 - w_ratio)); atomicAdd(bottom_diff + upright, top_diff[index] * (1. - h_ratio) * w_ratio); atomicAdd(bottom_diff + downleft, top_diff[index] * h_ratio * (1 - w_ratio)); atomicAdd(bottom_diff + downright, top_diff[index] * h_ratio * w_ratio); } } } int ROIAlignBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* bottom_diff, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; hipError_t err; hipLaunchKernelGGL(( ROIAlignBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, top_diff, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_diff, bottom_rois); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
a22f6d6255c44b5ac1eba1d27869888ff6b15cb4.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_align_kernel.h" // kernel 函数 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void ROIAlignForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output // int n = index; // int pw = n % aligned_width; // n /= aligned_width; // int ph = n % aligned_height; // n /= aligned_height; // int c = n % channels; // n /= channels; // 同上, 得到 n, c, pw, pw 值 int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; // bottom_rois += n * 5; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { top_data[index] = 0.; } else { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; // +width, 即对应维度上的坐标值+1 int downright = downleft + 1; top_data[index] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio) + bottom_data[upright] * (1. - h_ratio) * w_ratio + bottom_data[downleft] * h_ratio * (1. - w_ratio) + bottom_data[downright] * h_ratio * w_ratio; } } } int ROIAlignForwardLaucher(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; // cuda 编程中的分配线程数设置, 细节看 https://blog.csdn.net/fishseeker/article/details/75093166 或自行百度... ROIAlignForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, bottom_data, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_rois, top_data); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ROIAlignBackward(const int nthreads, const float* top_diff, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; /* int roi_start_w = round(bottom_rois[1] * spatial_scale); */ /* int roi_start_h = round(bottom_rois[2] * spatial_scale); */ /* int roi_end_w = round(bottom_rois[3] * spatial_scale); */ /* int roi_end_h = round(bottom_rois[4] * spatial_scale); */ // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (!(h < 0 || h >= height || w < 0 || w >= width)) { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; atomicAdd(bottom_diff + upleft, top_diff[index] * (1. - h_ratio) * (1 - w_ratio)); atomicAdd(bottom_diff + upright, top_diff[index] * (1. - h_ratio) * w_ratio); atomicAdd(bottom_diff + downleft, top_diff[index] * h_ratio * (1 - w_ratio)); atomicAdd(bottom_diff + downright, top_diff[index] * h_ratio * w_ratio); } } } int ROIAlignBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* bottom_diff, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; ROIAlignBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, top_diff, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_diff, bottom_rois); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
30a64a7ddb1987607d8c48675f4c035e79433550.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #define INDEX_NUM 3 #define INDEX_SUM 0 #define INDEX_MAX 1 #define INDEX_MIN 2 #define NUM_MAX 1024 #define ITEMS_NUM (1024 * 1024) #define BLOCK_SIZE 256 using namespace std; // TODO-1 => ./task_no_atomic // 1 thread does all compute, no atomic/sync // thread.0 of block.0 computes everything __global__ void kernel_no_atomics(int *data, int *results) { if(threadIdx.x || blockIdx.x) return; for(int i = 0; i != ITEMS_NUM; ++i) { results[INDEX_SUM] += data[i]; results[INDEX_MAX] = (data[i] > results[INDEX_MAX]) ? data[i] : results[INDEX_MAX]; results[INDEX_MIN] = (data[i] < results[INDEX_MIN]) ? data[i] : results[INDEX_MIN]; } } // TODO-2 => ./task_partial_atomic // ITEMS_NUM / 256 threads, ITEMS_NUM / 256 * 3 atomic calls // thread.0 of each block does partial compute, than uses atomics to compute __global__ void kernel_partial_atomics(int *data, int *results) { if(threadIdx.x) return; int start = blockIdx.x * BLOCK_SIZE; int localRes[INDEX_NUM] = {0}; localRes[INDEX_MIN] = NUM_MAX; for (int i = 0; i != BLOCK_SIZE; ++i) { localRes[INDEX_SUM] += data[start + i]; localRes[INDEX_MAX] = (data[start + i] > localRes[INDEX_MAX]) ? data[start + i] : localRes[INDEX_MAX]; localRes[INDEX_MIN] = (data[start + i] < localRes[INDEX_MIN]) ? data[start + i] : localRes[INDEX_MIN]; } atomicAdd(results + INDEX_SUM, localRes[INDEX_SUM]); atomicMax(results + INDEX_MAX, localRes[INDEX_MAX]); atomicMin(results + INDEX_MIN, localRes[INDEX_MIN]); } // TODO-3 => ./task_full_atomic // ITEMS_NUM threads do compute, ITEMS_NUM * 3 atomic calls // all threads to atomics to compute __global__ void kernel_full_atomics(int *data, int *results) { int pos = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(results + INDEX_SUM, data[pos]); atomicMax(results + INDEX_MAX, data[pos]); atomicMin(results + INDEX_MIN, data[pos]); } int main(void) { int expResults[INDEX_NUM]; int *data = NULL; hipMallocManaged(&data, ITEMS_NUM * sizeof(int)); if (data == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // generate data and expected result expResults[INDEX_SUM] = 0; expResults[INDEX_MAX] = 0; expResults[INDEX_MIN] = NUM_MAX; for(int i = 0; i < ITEMS_NUM; i++) { // each generated number is lower than NUM_MAX as value data[i] = rand() % NUM_MAX; expResults[INDEX_SUM] += data[i]; expResults[INDEX_MAX] = (data[i] > expResults[INDEX_MAX]) ? data[i] : expResults[INDEX_MAX]; expResults[INDEX_MIN] = (data[i] < expResults[INDEX_MIN]) ? data[i] : expResults[INDEX_MIN]; } int *results = NULL; hipMallocManaged(&results, INDEX_NUM * sizeof(int)); if (results == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // compute 10 times the results for(int i = 0; i < 10; i++) { // init results[INDEX_SUM] = 0; results[INDEX_MAX] = 0; results[INDEX_MIN] = NUM_MAX; #ifdef NO_ATOMIC hipLaunchKernelGGL(( kernel_no_atomics), dim3(1) , dim3(1) , 0, 0, data, results); hipDeviceSynchronize(); #endif #ifdef PARTIAL_ATOMIC hipLaunchKernelGGL(( kernel_partial_atomics), dim3(ITEMS_NUM / 256) , dim3(1) , 0, 0, data, results); hipDeviceSynchronize(); #endif #ifdef FULL_ATOMIC hipLaunchKernelGGL(( kernel_full_atomics), dim3(ITEMS_NUM / 256) , dim3(256) , 0, 0, data, results); hipDeviceSynchronize(); #endif } cout << "SUM: " << results[INDEX_SUM] << endl; if(results[INDEX_SUM] != expResults[INDEX_SUM]) { cout << "Failed, SUM should be " << expResults[INDEX_SUM] << endl; } cout << "MAX: " << results[INDEX_MAX] << endl; if(results[INDEX_MAX] != expResults[INDEX_MAX]) { cout << "Failed, MAX should be " << expResults[INDEX_MAX] << endl; } cout << "MIN: " << results[INDEX_MIN] << endl; if(results[INDEX_MIN] != expResults[INDEX_MIN]) { cout << "Failed, MIN should be " << expResults[INDEX_MIN] << endl; } hipFree(results); return 0; }
30a64a7ddb1987607d8c48675f4c035e79433550.cu
#include <iostream> #define INDEX_NUM 3 #define INDEX_SUM 0 #define INDEX_MAX 1 #define INDEX_MIN 2 #define NUM_MAX 1024 #define ITEMS_NUM (1024 * 1024) #define BLOCK_SIZE 256 using namespace std; // TODO-1 => ./task_no_atomic // 1 thread does all compute, no atomic/sync // thread.0 of block.0 computes everything __global__ void kernel_no_atomics(int *data, int *results) { if(threadIdx.x || blockIdx.x) return; for(int i = 0; i != ITEMS_NUM; ++i) { results[INDEX_SUM] += data[i]; results[INDEX_MAX] = (data[i] > results[INDEX_MAX]) ? data[i] : results[INDEX_MAX]; results[INDEX_MIN] = (data[i] < results[INDEX_MIN]) ? data[i] : results[INDEX_MIN]; } } // TODO-2 => ./task_partial_atomic // ITEMS_NUM / 256 threads, ITEMS_NUM / 256 * 3 atomic calls // thread.0 of each block does partial compute, than uses atomics to compute __global__ void kernel_partial_atomics(int *data, int *results) { if(threadIdx.x) return; int start = blockIdx.x * BLOCK_SIZE; int localRes[INDEX_NUM] = {0}; localRes[INDEX_MIN] = NUM_MAX; for (int i = 0; i != BLOCK_SIZE; ++i) { localRes[INDEX_SUM] += data[start + i]; localRes[INDEX_MAX] = (data[start + i] > localRes[INDEX_MAX]) ? data[start + i] : localRes[INDEX_MAX]; localRes[INDEX_MIN] = (data[start + i] < localRes[INDEX_MIN]) ? data[start + i] : localRes[INDEX_MIN]; } atomicAdd(results + INDEX_SUM, localRes[INDEX_SUM]); atomicMax(results + INDEX_MAX, localRes[INDEX_MAX]); atomicMin(results + INDEX_MIN, localRes[INDEX_MIN]); } // TODO-3 => ./task_full_atomic // ITEMS_NUM threads do compute, ITEMS_NUM * 3 atomic calls // all threads to atomics to compute __global__ void kernel_full_atomics(int *data, int *results) { int pos = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(results + INDEX_SUM, data[pos]); atomicMax(results + INDEX_MAX, data[pos]); atomicMin(results + INDEX_MIN, data[pos]); } int main(void) { int expResults[INDEX_NUM]; int *data = NULL; cudaMallocManaged(&data, ITEMS_NUM * sizeof(int)); if (data == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // generate data and expected result expResults[INDEX_SUM] = 0; expResults[INDEX_MAX] = 0; expResults[INDEX_MIN] = NUM_MAX; for(int i = 0; i < ITEMS_NUM; i++) { // each generated number is lower than NUM_MAX as value data[i] = rand() % NUM_MAX; expResults[INDEX_SUM] += data[i]; expResults[INDEX_MAX] = (data[i] > expResults[INDEX_MAX]) ? data[i] : expResults[INDEX_MAX]; expResults[INDEX_MIN] = (data[i] < expResults[INDEX_MIN]) ? data[i] : expResults[INDEX_MIN]; } int *results = NULL; cudaMallocManaged(&results, INDEX_NUM * sizeof(int)); if (results == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // compute 10 times the results for(int i = 0; i < 10; i++) { // init results[INDEX_SUM] = 0; results[INDEX_MAX] = 0; results[INDEX_MIN] = NUM_MAX; #ifdef NO_ATOMIC kernel_no_atomics<<< 1 , 1 >>> (data, results); cudaDeviceSynchronize(); #endif #ifdef PARTIAL_ATOMIC kernel_partial_atomics<<< ITEMS_NUM / 256 , 1 >>> (data, results); cudaDeviceSynchronize(); #endif #ifdef FULL_ATOMIC kernel_full_atomics<<< ITEMS_NUM / 256 , 256 >>> (data, results); cudaDeviceSynchronize(); #endif } cout << "SUM: " << results[INDEX_SUM] << endl; if(results[INDEX_SUM] != expResults[INDEX_SUM]) { cout << "Failed, SUM should be " << expResults[INDEX_SUM] << endl; } cout << "MAX: " << results[INDEX_MAX] << endl; if(results[INDEX_MAX] != expResults[INDEX_MAX]) { cout << "Failed, MAX should be " << expResults[INDEX_MAX] << endl; } cout << "MIN: " << results[INDEX_MIN] << endl; if(results[INDEX_MIN] != expResults[INDEX_MIN]) { cout << "Failed, MIN should be " << expResults[INDEX_MIN] << endl; } cudaFree(results); return 0; }
1f0264fa5e88a1ab65b89fa9ee1653751514297c.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string> #include <time.h> #include <sys/time.h> #include "CKconverter/CKReader.h" #include "zerork/mechanism_cuda.h" #include <nvector/nvector_serial.h> // serial N_Vector types, fcts., and macros #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" typedef struct { int nSpc; double dens; double invDens; double dTemp_dt; double meanCvMass; double *netProdRate; double *stepROP; double *createRate; double *destroyRate; double *molWt; double *conc; double *cvSpc; double *internalEnergy; zerork::mechanism_cuda *mech; } ode_cv_param; double getHighResolutionTime(void) { struct timeval tod; gettimeofday(&tod, NULL); double time_seconds = (double) tod.tv_sec + ((double) tod.tv_usec / 1000000.0); return time_seconds; } using namespace std; using namespace ckr; #define MAX_LINE_LEN 1024 int GetLine(FILE *InFile, char *ReadLine, char UntilChar, int MaxChar); int const_vol_wsr(realtype t, N_Vector y, N_Vector ydot, void *user_data); int main(int argc, char *argv[]) { FILE *stateFptr,*outputFptr; CKReader *ckread; zerork::mechanism_cuda *mech; int j,k; int nEval,nReactors; int nSpc, nRxn, nmechSpc,nStep; char readLine[MAX_LINE_LEN]; double *moleFrac; double *massFrac,*cpSpc,*hSpc,*gSpc; double *Kfwd,*Krev;//,*stepROP; // double *netSpc,*createSpc,*destroySpc; // double dY; double pres,Temp,rvol,dens,molwtMix,presConvert,cpMix,cvMix,uMix,hMix; double gasConstant; double startTime,stopTime; ode_cv_param sysParam; if(argc != 8) { printf("ERROR: incorrect command line usage.\n"); printf(" use instead %s <ck2 mech file> <ck2 thermo file> " "<ck2 converter output file> <state file> <output file> " "<# reactors> <# func evals>\n",argv[0]); exit(-1); } stateFptr=fopen(argv[4],"r"); if(stateFptr==NULL) { printf("ERROR: could not open state vector file %s for read\n", argv[4]); exit(-1); } outputFptr=fopen(argv[5],"w"); if(outputFptr==NULL) { printf("ERROR: could not open output file %s for write\n", argv[5]); exit(-1); } nReactors=atoi(argv[6]); nEval=atoi(argv[7]); mech=new zerork::mechanism_cuda(argv[1],argv[2],argv[3],1,nReactors); //4th param is verbosity nmechSpc=mech->getNumSpecies(); nRxn=mech->getNumReactions(); nStep=mech->getNumSteps(); // parse the input state vector GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%d",&nSpc); if(nSpc != nmechSpc) { printf("WARNING: number of species in mechanism file %d\n",nmechSpc); printf(" differs from state file %d\n",nSpc); } if(nSpc < nmechSpc) { printf("ERROR: number of species in mechanism file %d\n",nmechSpc); printf(" more than from state file %d\n",nSpc); exit(-1); } nSpc = nmechSpc; GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%lf",&pres); GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%lf",&Temp); moleFrac = new double[nSpc]; massFrac = new double[nSpc]; cpSpc = new double[nSpc]; hSpc = new double[nSpc]; gSpc = new double[nSpc]; Kfwd = new double[nRxn]; Krev = new double[nRxn]; sysParam.cvSpc = new double[nSpc]; sysParam.conc = new double[nSpc*nReactors]; sysParam.internalEnergy = new double[nSpc]; sysParam.molWt = new double[nSpc]; sysParam.stepROP = new double[nStep*nReactors]; sysParam.netProdRate = new double[nSpc*nReactors]; sysParam.createRate = new double[nSpc*nReactors]; sysParam.destroyRate = new double[nSpc*nReactors]; for(j=0; j<nSpc-1; j++) { GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%lf",&moleFrac[j]); } // readin the last mole frac ignoring the carriage return fscanf(stateFptr,"%lf",&moleFrac[nSpc-1]); // first echo the input fprintf(outputFptr,"%24d ! [#] number of species\n",nSpc); fprintf(outputFptr,"%24.16e ! [Pa] input pressure\n",pres); fprintf(outputFptr,"%24.16e ! [K] input temperature\n",Temp); for(j=0; j<nSpc; j++) { fprintf(outputFptr,"%24.16e ! [-] input mole frac - %s\n", moleFrac[j],mech->getSpeciesName(j)); } // compute the mass fractions mech->getYfromX(moleFrac,massFrac); // compute the mixture properties dens=mech->getDensityFromTPY(Temp,pres,massFrac); rvol=1.0/dens; molwtMix=mech->getMolWtMixFromY(massFrac); presConvert=mech->getPressureFromTVY(Temp,rvol,massFrac); uMix=mech->getMassIntEnergyFromTY(Temp,massFrac,sysParam.internalEnergy); hMix=mech->getMassEnthalpyFromTY(Temp,massFrac,hSpc); cpMix=mech->getMassCpFromTY(Temp,massFrac,cpSpc); cvMix=mech->getMassCvFromTY(Temp,massFrac,sysParam.cvSpc); gasConstant=mech->getGasConstant(); fprintf(outputFptr,"%24.16e ! [kg/m^3] density\n",dens); fprintf(outputFptr,"%24.16e ! [m^3/kg] relative volume\n",rvol); fprintf(outputFptr,"%24.16e ! [kg/kmol] molecular weight\n",molwtMix); fprintf(outputFptr,"%24.16e ! [Pa] pressure (from rel volume)\n", presConvert); fprintf(outputFptr,"%24.16e ! [J/kg] mixture internal energy\n",uMix); fprintf(outputFptr,"%24.16e ! [J/kg] mixture enthalpy\n",hMix); fprintf(outputFptr,"%24.16e ! [J/kg/K] specific heat (const vol)\n", cvMix); fprintf(outputFptr,"%24.16e ! [J/kg/K] specific heat (const pres)\n", cpMix); fprintf(outputFptr,"%24.16e ! [J/kmol/K] univerisal gas const\n", gasConstant); // calculate species properties mech->getMolWtSpc(sysParam.molWt); mech->getCfromVY(rvol,massFrac,sysParam.conc); mech->getNonDimGibbsFromT(Temp,gSpc); double *Temp_array = new double[nReactors]; double *conc_ptr, *net_ptr, *cre_ptr, *des_ptr, *rop_ptr; int prevConcIdx = nSpc-1; //set to 0 for identical concentrations across reactors double Temp_mod = 0.1; //set to 0.0 for identical temps across reactors Temp_array[0] = Temp; for(j=1; j<nReactors; j+=1) { Temp_array[j] = Temp+j*Temp_mod; for( k = 0; k < nSpc; ++k ) { sysParam.conc[j*nSpc+k] = sysParam.conc[(j-1)*nSpc + prevConcIdx]; ++prevConcIdx; if(prevConcIdx >= nSpc) prevConcIdx=0; } } startTime=getHighResolutionTime(); for( k = 0; k < nEval; ++k) { for(j=0; j<nReactors; ++j) { conc_ptr = sysParam.conc + j * nSpc; net_ptr = sysParam.netProdRate + j * nSpc; cre_ptr = sysParam.createRate + j * nSpc; des_ptr = sysParam.destroyRate + j * nSpc; rop_ptr = sysParam.stepROP + j * nStep; // calculate the reaction rates mech->getReactionRates(Temp_array[j],conc_ptr,net_ptr, cre_ptr,des_ptr, rop_ptr); } } stopTime=getHighResolutionTime(); double cpuTime = stopTime - startTime; printf("# Elapsed time for %d*%d dy/dt calls on CPU [s]: %16.8e\n",nReactors,nEval, stopTime-startTime); startTime=getHighResolutionTime(); for(j=0; j<nEval; ++j) { // calculate the reaction rates mech->getReactionRates_CUDA_mr(nReactors,Temp_array,sysParam.conc,sysParam.netProdRate, sysParam.createRate,sysParam.destroyRate, sysParam.stepROP); } stopTime=getHighResolutionTime(); double gpuTime = stopTime - startTime; printf("# Elapsed time for %d*%d dy/dt calls on GPU [s]: %16.8e\n",nReactors,nEval, stopTime-startTime); double* Temp_array_dev; double* conc_dev; double* netProdRate_dev; double* createRate_dev; double* destroyRate_dev; double* stepROP_dev; hipMalloc((void**)&Temp_array_dev,sizeof(double)*nReactors); hipMalloc((void**)&conc_dev,sizeof(double)*nReactors*nSpc); hipMalloc((void**)&netProdRate_dev,sizeof(double)*nReactors*nSpc); hipMalloc((void**)&createRate_dev,sizeof(double)*nReactors*nSpc); hipMalloc((void**)&destroyRate_dev,sizeof(double)*nReactors*nSpc); hipMalloc((void**)&stepROP_dev,sizeof(double)*nReactors*nStep); hipMemcpy(Temp_array_dev,&Temp_array[0],sizeof(double)*nReactors,hipMemcpyHostToDevice); std::vector<double> transposedC(nSpc*nReactors); for( k = 0; k < nReactors; ++k) { for(j=0;j<nSpc;++j) { transposedC[j*nReactors+k] = sysParam.conc[k*nSpc+j]; } } hipMemcpy(conc_dev,&transposedC[0],sizeof(double)*nReactors*nSpc,hipMemcpyHostToDevice); startTime=getHighResolutionTime(); for(j=0; j<nEval; ++j) { // calculate the reaction rates mech->getReactionRates_CUDA_mr_dev(nReactors, Temp_array_dev, conc_dev, netProdRate_dev, createRate_dev, destroyRate_dev, stepROP_dev); } stopTime=getHighResolutionTime(); double gpu_devTime = stopTime - startTime; printf("# Elapsed time for %d*%d dy/dt calls on GPU_dev [s]: %16.8e\n",nReactors,nEval, stopTime-startTime); //Test results double *stepROP_cpu = new double[nStep*nReactors]; double *netProdRate_cpu = new double[nSpc*nReactors]; double *createRate_cpu = new double[nSpc*nReactors]; double *destroyRate_cpu = new double[nSpc*nReactors]; double *stepROP_gpu = new double[nStep*nReactors]; double *netProdRate_gpu = new double[nSpc*nReactors]; double *createRate_gpu = new double[nSpc*nReactors]; double *destroyRate_gpu = new double[nSpc*nReactors]; for(j=0; j<nReactors; ++j) { conc_ptr = sysParam.conc + j * nSpc; net_ptr = netProdRate_cpu + j * nSpc; cre_ptr = createRate_cpu + j * nSpc; des_ptr = destroyRate_cpu + j * nSpc; rop_ptr = stepROP_cpu + j * nStep; // calculate the reaction rates mech->getReactionRates(Temp_array[j],conc_ptr,net_ptr, cre_ptr,des_ptr, rop_ptr); } // double transposedC[nSpc*nReactors]; // for( k = 0; k < nReactors; ++k) // { // for(j=0;j<nSpc;++j) // { // transposedC[j*nReactors+k] = sysParam.conc[k*nSpc+j]; // } // } // mech->getReactionRates_CUDA_multiReactor(nReactors,Temp_array,transposedC,netProdRate_gpu, mech->getReactionRates_CUDA_mr(nReactors,Temp_array,sysParam.conc,netProdRate_gpu, createRate_gpu,destroyRate_gpu,stepROP_gpu,true,true); int max_diff_print = 50; int diffcount = 0; double thresh = 1.0e-10; for( j=0; j<nStep*nReactors; ++j) { double diff = fabs((stepROP_cpu[j]-stepROP_gpu[j])/(stepROP_cpu[j]+stepROP_gpu[j])); if( diff > thresh ) printf("Diff in ROP [%d]: %g, %g, %g\n",j,stepROP_cpu[j],stepROP_gpu[j],diff), ++diffcount; if(diffcount > max_diff_print) break; } diffcount = 0; for( j=0; j<nSpc*nReactors; ++j) { double cdiff,ddiff,ndiff,maxdiff; cdiff = fabs((createRate_cpu[j]-createRate_gpu[j])/(createRate_cpu[j]+createRate_gpu[j])); ddiff = fabs((destroyRate_cpu[j]-destroyRate_gpu[j])/(destroyRate_cpu[j]+destroyRate_gpu[j])); ndiff = fabs((netProdRate_cpu[j]-netProdRate_gpu[j])/(netProdRate_cpu[j]+netProdRate_gpu[j])); maxdiff = max(ndiff,max(cdiff,ddiff)); int currReactor = j/nSpc; if( maxdiff > thresh ) { printf("Diff in createRate [%d]: %g, %g, %g\n",j,createRate_cpu[j],createRate_gpu[j],cdiff); printf("Diff in destroyRate [%d]: %g, %g, %g\n",j,destroyRate_cpu[j],destroyRate_gpu[j],ddiff); printf("Diff in netRate [%d]: %g, %g, %g, %d\n",j,netProdRate_cpu[j],netProdRate_gpu[j],ndiff,currReactor); ++diffcount; } if(diffcount > max_diff_print) break; } printf(" *** %d %d %g \n",nSpc,nReactors,cpuTime/gpuTime); //We copy the values from the last call inside the testing loop hipMemcpy(stepROP_gpu, stepROP_dev,sizeof(double)*nReactors*nStep,hipMemcpyDeviceToHost); hipMemcpy(createRate_gpu, createRate_dev,sizeof(double)*nReactors*nSpc,hipMemcpyDeviceToHost); hipMemcpy(destroyRate_gpu, destroyRate_dev,sizeof(double)*nReactors*nSpc,hipMemcpyDeviceToHost); hipMemcpy(netProdRate_gpu, netProdRate_dev,sizeof(double)*nReactors*nSpc,hipMemcpyDeviceToHost); hipFree(Temp_array_dev); hipFree(conc_dev); hipFree(netProdRate_dev); hipFree(createRate_dev); hipFree(destroyRate_dev); hipFree(stepROP_dev); // Check _dev results diffcount = 0; for( j=0; j<nStep*nReactors; ++j) { int currReactor = j/nStep; int currStep = j%nStep; int j_gpu = currStep*nReactors+currReactor; double diff = fabs((stepROP_cpu[j]-stepROP_gpu[j_gpu])/(stepROP_cpu[j]+stepROP_gpu[j_gpu])); if( diff > thresh ) printf("Diff in ROP [%d]: %g, %g, %g\n",j,stepROP_cpu[j],stepROP_gpu[j_gpu],diff), ++diffcount; if(diffcount > max_diff_print) break; } diffcount = 0; for( j=0; j<nSpc*nReactors; ++j) { double cdiff,ddiff,ndiff,maxdiff; int currReactor = j/nSpc; int currSpc = j%nSpc; int j_gpu = currSpc*nReactors+currReactor; cdiff = fabs((createRate_cpu[j]-createRate_gpu[j_gpu])/(createRate_cpu[j]+createRate_gpu[j_gpu])); ddiff = fabs((destroyRate_cpu[j]-destroyRate_gpu[j_gpu])/(destroyRate_cpu[j]+destroyRate_gpu[j_gpu])); ndiff = fabs((netProdRate_cpu[j]-netProdRate_gpu[j_gpu])/(netProdRate_cpu[j]+netProdRate_gpu[j_gpu])); maxdiff = max(ndiff,max(cdiff,ddiff)); if( maxdiff > thresh ) { printf("Diff in createRate [%d]: %g, %g, %g\n",j,createRate_cpu[j],createRate_gpu[j_gpu],cdiff); printf("Diff in destroyRate [%d]: %g, %g, %g\n",j,destroyRate_cpu[j],destroyRate_gpu[j_gpu],ddiff); printf("Diff in netRate [%d]: %g, %g, %g, %d\n",j,netProdRate_cpu[j],netProdRate_gpu[j_gpu],ndiff,currReactor); ++diffcount; } if(diffcount > max_diff_print) break; } printf(" +++ %d %d %g \n",nSpc,nReactors,cpuTime/gpu_devTime); // diffcount = 0; // for( k=0; k<nReactors; ++k) // { // for( j=0; j<nSpc; ++j) // { // double cdiff,ddiff,ndiff,maxdiff; //// cdiff = fabs((createRate_cpu[k*nSpc+j]-createRate_gpu[j*nReactors+k])/(createRate_cpu[k*nSpc+j]+createRate_gpu[j*nReactors+k])); //// ddiff = fabs((destroyRate_cpu[k*nSpc+j]-destroyRate_gpu[j*nReactors+k])/(destroyRate_cpu[k*nSpc+j]+destroyRate_gpu[j*nReactors+k])); // cdiff = 0.0; // ddiff = 0.0; // ndiff = fabs((netProdRate_cpu[k*nSpc+j]-netProdRate_gpu[j*nReactors+k])/(netProdRate_cpu[k*nSpc+j]+netProdRate_gpu[j*nReactors+k])); //// ndiff = fabs((netProdRate_cpu[k*nSpc+j]-netProdRate_gpu[k*nSpc+j])/(netProdRate_cpu[k*nSpc+j]+netProdRate_gpu[k*nSpc+j])); // maxdiff = max(ndiff,max(cdiff,ddiff)); // if( maxdiff > thresh ) // { //// printf("Diff in createRate [%d,%d]: %g, %g, %g\n",k,j,createRate_cpu[k*nSpc+j],createRate_gpu[j*nReactors+k],cdiff); //// printf("Diff in destroyRate [%d,%d]: %g, %g, %g\n",k,j,destroyRate_cpu[k*nSpc+j],destroyRate_gpu[j*nReactors+k],ddiff); // printf("Diff in netRate [%d,%d]: %g, %g, %g\n",k,j,netProdRate_cpu[k*nSpc+j],netProdRate_gpu[j*nReactors+k],ndiff); //// printf("Diff in netRate [%d,%d]: %g, %g, %g\n",k,j,netProdRate_cpu[k*nSpc+j],netProdRate_gpu[k*nSpc+j],ndiff); // ++diffcount; // } // if(diffcount > max_diff_print) break; // } // if(diffcount > max_diff_print) break; // } delete mech; delete [] moleFrac; delete [] massFrac; delete [] sysParam.conc; delete [] sysParam.cvSpc; delete [] cpSpc; delete [] sysParam.internalEnergy; delete [] hSpc; delete [] gSpc; delete [] sysParam.molWt; delete [] sysParam.stepROP; delete [] sysParam.netProdRate; delete [] sysParam.createRate; delete [] sysParam.destroyRate; delete [] Kfwd; delete [] Krev; fclose(outputFptr); fclose(stateFptr); hipDeviceSynchronize(); hipDeviceReset(); return 0; } int GetLine(FILE *InFile,char *ReadLine, char UntilChar, int MaxChar) { int CharCtr=0; char LocalReadChar='\0'; while((LocalReadChar != UntilChar) && CharCtr < (MaxChar-1)) { fscanf(InFile,"%c",&LocalReadChar); //printf("LocalReadChar[%d]: %c UntilChar: %c\n",CharCtr, // LocalReadChar,UntilChar); ReadLine[CharCtr]=LocalReadChar; CharCtr++; } if(CharCtr == (MaxChar-1) && LocalReadChar != UntilChar) // ran out of space {ReadLine[0]='\0'; return -1;} ReadLine[CharCtr]='\0'; return CharCtr; // exit normally }
1f0264fa5e88a1ab65b89fa9ee1653751514297c.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string> #include <time.h> #include <sys/time.h> #include "CKconverter/CKReader.h" #include "zerork/mechanism_cuda.h" #include <nvector/nvector_serial.h> // serial N_Vector types, fcts., and macros #include "cuda_runtime.h" #include "cuda_profiler_api.h" typedef struct { int nSpc; double dens; double invDens; double dTemp_dt; double meanCvMass; double *netProdRate; double *stepROP; double *createRate; double *destroyRate; double *molWt; double *conc; double *cvSpc; double *internalEnergy; zerork::mechanism_cuda *mech; } ode_cv_param; double getHighResolutionTime(void) { struct timeval tod; gettimeofday(&tod, NULL); double time_seconds = (double) tod.tv_sec + ((double) tod.tv_usec / 1000000.0); return time_seconds; } using namespace std; using namespace ckr; #define MAX_LINE_LEN 1024 int GetLine(FILE *InFile, char *ReadLine, char UntilChar, int MaxChar); int const_vol_wsr(realtype t, N_Vector y, N_Vector ydot, void *user_data); int main(int argc, char *argv[]) { FILE *stateFptr,*outputFptr; CKReader *ckread; zerork::mechanism_cuda *mech; int j,k; int nEval,nReactors; int nSpc, nRxn, nmechSpc,nStep; char readLine[MAX_LINE_LEN]; double *moleFrac; double *massFrac,*cpSpc,*hSpc,*gSpc; double *Kfwd,*Krev;//,*stepROP; // double *netSpc,*createSpc,*destroySpc; // double dY; double pres,Temp,rvol,dens,molwtMix,presConvert,cpMix,cvMix,uMix,hMix; double gasConstant; double startTime,stopTime; ode_cv_param sysParam; if(argc != 8) { printf("ERROR: incorrect command line usage.\n"); printf(" use instead %s <ck2 mech file> <ck2 thermo file> " "<ck2 converter output file> <state file> <output file> " "<# reactors> <# func evals>\n",argv[0]); exit(-1); } stateFptr=fopen(argv[4],"r"); if(stateFptr==NULL) { printf("ERROR: could not open state vector file %s for read\n", argv[4]); exit(-1); } outputFptr=fopen(argv[5],"w"); if(outputFptr==NULL) { printf("ERROR: could not open output file %s for write\n", argv[5]); exit(-1); } nReactors=atoi(argv[6]); nEval=atoi(argv[7]); mech=new zerork::mechanism_cuda(argv[1],argv[2],argv[3],1,nReactors); //4th param is verbosity nmechSpc=mech->getNumSpecies(); nRxn=mech->getNumReactions(); nStep=mech->getNumSteps(); // parse the input state vector GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%d",&nSpc); if(nSpc != nmechSpc) { printf("WARNING: number of species in mechanism file %d\n",nmechSpc); printf(" differs from state file %d\n",nSpc); } if(nSpc < nmechSpc) { printf("ERROR: number of species in mechanism file %d\n",nmechSpc); printf(" more than from state file %d\n",nSpc); exit(-1); } nSpc = nmechSpc; GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%lf",&pres); GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%lf",&Temp); moleFrac = new double[nSpc]; massFrac = new double[nSpc]; cpSpc = new double[nSpc]; hSpc = new double[nSpc]; gSpc = new double[nSpc]; Kfwd = new double[nRxn]; Krev = new double[nRxn]; sysParam.cvSpc = new double[nSpc]; sysParam.conc = new double[nSpc*nReactors]; sysParam.internalEnergy = new double[nSpc]; sysParam.molWt = new double[nSpc]; sysParam.stepROP = new double[nStep*nReactors]; sysParam.netProdRate = new double[nSpc*nReactors]; sysParam.createRate = new double[nSpc*nReactors]; sysParam.destroyRate = new double[nSpc*nReactors]; for(j=0; j<nSpc-1; j++) { GetLine(stateFptr,readLine,'\n',MAX_LINE_LEN); sscanf(readLine,"%lf",&moleFrac[j]); } // readin the last mole frac ignoring the carriage return fscanf(stateFptr,"%lf",&moleFrac[nSpc-1]); // first echo the input fprintf(outputFptr,"%24d ! [#] number of species\n",nSpc); fprintf(outputFptr,"%24.16e ! [Pa] input pressure\n",pres); fprintf(outputFptr,"%24.16e ! [K] input temperature\n",Temp); for(j=0; j<nSpc; j++) { fprintf(outputFptr,"%24.16e ! [-] input mole frac - %s\n", moleFrac[j],mech->getSpeciesName(j)); } // compute the mass fractions mech->getYfromX(moleFrac,massFrac); // compute the mixture properties dens=mech->getDensityFromTPY(Temp,pres,massFrac); rvol=1.0/dens; molwtMix=mech->getMolWtMixFromY(massFrac); presConvert=mech->getPressureFromTVY(Temp,rvol,massFrac); uMix=mech->getMassIntEnergyFromTY(Temp,massFrac,sysParam.internalEnergy); hMix=mech->getMassEnthalpyFromTY(Temp,massFrac,hSpc); cpMix=mech->getMassCpFromTY(Temp,massFrac,cpSpc); cvMix=mech->getMassCvFromTY(Temp,massFrac,sysParam.cvSpc); gasConstant=mech->getGasConstant(); fprintf(outputFptr,"%24.16e ! [kg/m^3] density\n",dens); fprintf(outputFptr,"%24.16e ! [m^3/kg] relative volume\n",rvol); fprintf(outputFptr,"%24.16e ! [kg/kmol] molecular weight\n",molwtMix); fprintf(outputFptr,"%24.16e ! [Pa] pressure (from rel volume)\n", presConvert); fprintf(outputFptr,"%24.16e ! [J/kg] mixture internal energy\n",uMix); fprintf(outputFptr,"%24.16e ! [J/kg] mixture enthalpy\n",hMix); fprintf(outputFptr,"%24.16e ! [J/kg/K] specific heat (const vol)\n", cvMix); fprintf(outputFptr,"%24.16e ! [J/kg/K] specific heat (const pres)\n", cpMix); fprintf(outputFptr,"%24.16e ! [J/kmol/K] univerisal gas const\n", gasConstant); // calculate species properties mech->getMolWtSpc(sysParam.molWt); mech->getCfromVY(rvol,massFrac,sysParam.conc); mech->getNonDimGibbsFromT(Temp,gSpc); double *Temp_array = new double[nReactors]; double *conc_ptr, *net_ptr, *cre_ptr, *des_ptr, *rop_ptr; int prevConcIdx = nSpc-1; //set to 0 for identical concentrations across reactors double Temp_mod = 0.1; //set to 0.0 for identical temps across reactors Temp_array[0] = Temp; for(j=1; j<nReactors; j+=1) { Temp_array[j] = Temp+j*Temp_mod; for( k = 0; k < nSpc; ++k ) { sysParam.conc[j*nSpc+k] = sysParam.conc[(j-1)*nSpc + prevConcIdx]; ++prevConcIdx; if(prevConcIdx >= nSpc) prevConcIdx=0; } } startTime=getHighResolutionTime(); for( k = 0; k < nEval; ++k) { for(j=0; j<nReactors; ++j) { conc_ptr = sysParam.conc + j * nSpc; net_ptr = sysParam.netProdRate + j * nSpc; cre_ptr = sysParam.createRate + j * nSpc; des_ptr = sysParam.destroyRate + j * nSpc; rop_ptr = sysParam.stepROP + j * nStep; // calculate the reaction rates mech->getReactionRates(Temp_array[j],conc_ptr,net_ptr, cre_ptr,des_ptr, rop_ptr); } } stopTime=getHighResolutionTime(); double cpuTime = stopTime - startTime; printf("# Elapsed time for %d*%d dy/dt calls on CPU [s]: %16.8e\n",nReactors,nEval, stopTime-startTime); startTime=getHighResolutionTime(); for(j=0; j<nEval; ++j) { // calculate the reaction rates mech->getReactionRates_CUDA_mr(nReactors,Temp_array,sysParam.conc,sysParam.netProdRate, sysParam.createRate,sysParam.destroyRate, sysParam.stepROP); } stopTime=getHighResolutionTime(); double gpuTime = stopTime - startTime; printf("# Elapsed time for %d*%d dy/dt calls on GPU [s]: %16.8e\n",nReactors,nEval, stopTime-startTime); double* Temp_array_dev; double* conc_dev; double* netProdRate_dev; double* createRate_dev; double* destroyRate_dev; double* stepROP_dev; cudaMalloc((void**)&Temp_array_dev,sizeof(double)*nReactors); cudaMalloc((void**)&conc_dev,sizeof(double)*nReactors*nSpc); cudaMalloc((void**)&netProdRate_dev,sizeof(double)*nReactors*nSpc); cudaMalloc((void**)&createRate_dev,sizeof(double)*nReactors*nSpc); cudaMalloc((void**)&destroyRate_dev,sizeof(double)*nReactors*nSpc); cudaMalloc((void**)&stepROP_dev,sizeof(double)*nReactors*nStep); cudaMemcpy(Temp_array_dev,&Temp_array[0],sizeof(double)*nReactors,cudaMemcpyHostToDevice); std::vector<double> transposedC(nSpc*nReactors); for( k = 0; k < nReactors; ++k) { for(j=0;j<nSpc;++j) { transposedC[j*nReactors+k] = sysParam.conc[k*nSpc+j]; } } cudaMemcpy(conc_dev,&transposedC[0],sizeof(double)*nReactors*nSpc,cudaMemcpyHostToDevice); startTime=getHighResolutionTime(); for(j=0; j<nEval; ++j) { // calculate the reaction rates mech->getReactionRates_CUDA_mr_dev(nReactors, Temp_array_dev, conc_dev, netProdRate_dev, createRate_dev, destroyRate_dev, stepROP_dev); } stopTime=getHighResolutionTime(); double gpu_devTime = stopTime - startTime; printf("# Elapsed time for %d*%d dy/dt calls on GPU_dev [s]: %16.8e\n",nReactors,nEval, stopTime-startTime); //Test results double *stepROP_cpu = new double[nStep*nReactors]; double *netProdRate_cpu = new double[nSpc*nReactors]; double *createRate_cpu = new double[nSpc*nReactors]; double *destroyRate_cpu = new double[nSpc*nReactors]; double *stepROP_gpu = new double[nStep*nReactors]; double *netProdRate_gpu = new double[nSpc*nReactors]; double *createRate_gpu = new double[nSpc*nReactors]; double *destroyRate_gpu = new double[nSpc*nReactors]; for(j=0; j<nReactors; ++j) { conc_ptr = sysParam.conc + j * nSpc; net_ptr = netProdRate_cpu + j * nSpc; cre_ptr = createRate_cpu + j * nSpc; des_ptr = destroyRate_cpu + j * nSpc; rop_ptr = stepROP_cpu + j * nStep; // calculate the reaction rates mech->getReactionRates(Temp_array[j],conc_ptr,net_ptr, cre_ptr,des_ptr, rop_ptr); } // double transposedC[nSpc*nReactors]; // for( k = 0; k < nReactors; ++k) // { // for(j=0;j<nSpc;++j) // { // transposedC[j*nReactors+k] = sysParam.conc[k*nSpc+j]; // } // } // mech->getReactionRates_CUDA_multiReactor(nReactors,Temp_array,transposedC,netProdRate_gpu, mech->getReactionRates_CUDA_mr(nReactors,Temp_array,sysParam.conc,netProdRate_gpu, createRate_gpu,destroyRate_gpu,stepROP_gpu,true,true); int max_diff_print = 50; int diffcount = 0; double thresh = 1.0e-10; for( j=0; j<nStep*nReactors; ++j) { double diff = fabs((stepROP_cpu[j]-stepROP_gpu[j])/(stepROP_cpu[j]+stepROP_gpu[j])); if( diff > thresh ) printf("Diff in ROP [%d]: %g, %g, %g\n",j,stepROP_cpu[j],stepROP_gpu[j],diff), ++diffcount; if(diffcount > max_diff_print) break; } diffcount = 0; for( j=0; j<nSpc*nReactors; ++j) { double cdiff,ddiff,ndiff,maxdiff; cdiff = fabs((createRate_cpu[j]-createRate_gpu[j])/(createRate_cpu[j]+createRate_gpu[j])); ddiff = fabs((destroyRate_cpu[j]-destroyRate_gpu[j])/(destroyRate_cpu[j]+destroyRate_gpu[j])); ndiff = fabs((netProdRate_cpu[j]-netProdRate_gpu[j])/(netProdRate_cpu[j]+netProdRate_gpu[j])); maxdiff = max(ndiff,max(cdiff,ddiff)); int currReactor = j/nSpc; if( maxdiff > thresh ) { printf("Diff in createRate [%d]: %g, %g, %g\n",j,createRate_cpu[j],createRate_gpu[j],cdiff); printf("Diff in destroyRate [%d]: %g, %g, %g\n",j,destroyRate_cpu[j],destroyRate_gpu[j],ddiff); printf("Diff in netRate [%d]: %g, %g, %g, %d\n",j,netProdRate_cpu[j],netProdRate_gpu[j],ndiff,currReactor); ++diffcount; } if(diffcount > max_diff_print) break; } printf(" *** %d %d %g \n",nSpc,nReactors,cpuTime/gpuTime); //We copy the values from the last call inside the testing loop cudaMemcpy(stepROP_gpu, stepROP_dev,sizeof(double)*nReactors*nStep,cudaMemcpyDeviceToHost); cudaMemcpy(createRate_gpu, createRate_dev,sizeof(double)*nReactors*nSpc,cudaMemcpyDeviceToHost); cudaMemcpy(destroyRate_gpu, destroyRate_dev,sizeof(double)*nReactors*nSpc,cudaMemcpyDeviceToHost); cudaMemcpy(netProdRate_gpu, netProdRate_dev,sizeof(double)*nReactors*nSpc,cudaMemcpyDeviceToHost); cudaFree(Temp_array_dev); cudaFree(conc_dev); cudaFree(netProdRate_dev); cudaFree(createRate_dev); cudaFree(destroyRate_dev); cudaFree(stepROP_dev); // Check _dev results diffcount = 0; for( j=0; j<nStep*nReactors; ++j) { int currReactor = j/nStep; int currStep = j%nStep; int j_gpu = currStep*nReactors+currReactor; double diff = fabs((stepROP_cpu[j]-stepROP_gpu[j_gpu])/(stepROP_cpu[j]+stepROP_gpu[j_gpu])); if( diff > thresh ) printf("Diff in ROP [%d]: %g, %g, %g\n",j,stepROP_cpu[j],stepROP_gpu[j_gpu],diff), ++diffcount; if(diffcount > max_diff_print) break; } diffcount = 0; for( j=0; j<nSpc*nReactors; ++j) { double cdiff,ddiff,ndiff,maxdiff; int currReactor = j/nSpc; int currSpc = j%nSpc; int j_gpu = currSpc*nReactors+currReactor; cdiff = fabs((createRate_cpu[j]-createRate_gpu[j_gpu])/(createRate_cpu[j]+createRate_gpu[j_gpu])); ddiff = fabs((destroyRate_cpu[j]-destroyRate_gpu[j_gpu])/(destroyRate_cpu[j]+destroyRate_gpu[j_gpu])); ndiff = fabs((netProdRate_cpu[j]-netProdRate_gpu[j_gpu])/(netProdRate_cpu[j]+netProdRate_gpu[j_gpu])); maxdiff = max(ndiff,max(cdiff,ddiff)); if( maxdiff > thresh ) { printf("Diff in createRate [%d]: %g, %g, %g\n",j,createRate_cpu[j],createRate_gpu[j_gpu],cdiff); printf("Diff in destroyRate [%d]: %g, %g, %g\n",j,destroyRate_cpu[j],destroyRate_gpu[j_gpu],ddiff); printf("Diff in netRate [%d]: %g, %g, %g, %d\n",j,netProdRate_cpu[j],netProdRate_gpu[j_gpu],ndiff,currReactor); ++diffcount; } if(diffcount > max_diff_print) break; } printf(" +++ %d %d %g \n",nSpc,nReactors,cpuTime/gpu_devTime); // diffcount = 0; // for( k=0; k<nReactors; ++k) // { // for( j=0; j<nSpc; ++j) // { // double cdiff,ddiff,ndiff,maxdiff; //// cdiff = fabs((createRate_cpu[k*nSpc+j]-createRate_gpu[j*nReactors+k])/(createRate_cpu[k*nSpc+j]+createRate_gpu[j*nReactors+k])); //// ddiff = fabs((destroyRate_cpu[k*nSpc+j]-destroyRate_gpu[j*nReactors+k])/(destroyRate_cpu[k*nSpc+j]+destroyRate_gpu[j*nReactors+k])); // cdiff = 0.0; // ddiff = 0.0; // ndiff = fabs((netProdRate_cpu[k*nSpc+j]-netProdRate_gpu[j*nReactors+k])/(netProdRate_cpu[k*nSpc+j]+netProdRate_gpu[j*nReactors+k])); //// ndiff = fabs((netProdRate_cpu[k*nSpc+j]-netProdRate_gpu[k*nSpc+j])/(netProdRate_cpu[k*nSpc+j]+netProdRate_gpu[k*nSpc+j])); // maxdiff = max(ndiff,max(cdiff,ddiff)); // if( maxdiff > thresh ) // { //// printf("Diff in createRate [%d,%d]: %g, %g, %g\n",k,j,createRate_cpu[k*nSpc+j],createRate_gpu[j*nReactors+k],cdiff); //// printf("Diff in destroyRate [%d,%d]: %g, %g, %g\n",k,j,destroyRate_cpu[k*nSpc+j],destroyRate_gpu[j*nReactors+k],ddiff); // printf("Diff in netRate [%d,%d]: %g, %g, %g\n",k,j,netProdRate_cpu[k*nSpc+j],netProdRate_gpu[j*nReactors+k],ndiff); //// printf("Diff in netRate [%d,%d]: %g, %g, %g\n",k,j,netProdRate_cpu[k*nSpc+j],netProdRate_gpu[k*nSpc+j],ndiff); // ++diffcount; // } // if(diffcount > max_diff_print) break; // } // if(diffcount > max_diff_print) break; // } delete mech; delete [] moleFrac; delete [] massFrac; delete [] sysParam.conc; delete [] sysParam.cvSpc; delete [] cpSpc; delete [] sysParam.internalEnergy; delete [] hSpc; delete [] gSpc; delete [] sysParam.molWt; delete [] sysParam.stepROP; delete [] sysParam.netProdRate; delete [] sysParam.createRate; delete [] sysParam.destroyRate; delete [] Kfwd; delete [] Krev; fclose(outputFptr); fclose(stateFptr); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; } int GetLine(FILE *InFile,char *ReadLine, char UntilChar, int MaxChar) { int CharCtr=0; char LocalReadChar='\0'; while((LocalReadChar != UntilChar) && CharCtr < (MaxChar-1)) { fscanf(InFile,"%c",&LocalReadChar); //printf("LocalReadChar[%d]: %c UntilChar: %c\n",CharCtr, // LocalReadChar,UntilChar); ReadLine[CharCtr]=LocalReadChar; CharCtr++; } if(CharCtr == (MaxChar-1) && LocalReadChar != UntilChar) // ran out of space {ReadLine[0]='\0'; return -1;} ReadLine[CharCtr]='\0'; return CharCtr; // exit normally }
bb8adf562caff206d715a0a7fce58b863d0987af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/ScatterGatherChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> namespace at { namespace native { // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( _scatter_gather_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f); AT_CUDA_CHECK(hipGetLastError()); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); char* self_data = self_ptr + offsets[0]; char* src_data = src_ptr + offsets[1]; f( (scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0), (scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } at::assert_no_internal_overlap(self); dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index, src); if (is_scatter_like) { scatter_shape_check(self, dim, index, src); } else { gather_shape_check(self, dim, index, src); } auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); char* self_data = self_ptr + offsets[0]; f( (scalar_t*)self_data + idx_dim * index_stride, &src_val ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } at::assert_no_internal_overlap(self); dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index); scatter_shape_check(self, dim, index); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_fill_base_kernel void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) { cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()( result, dim, index, self, "gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("scatter_add_cuda_kernel"); cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()( self, dim, index, src, "scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { gpuAtomicAdd(lhs, *rhs); } ); } REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel); REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel); }} // namespace at::native
bb8adf562caff206d715a0a7fce58b863d0987af.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/ScatterGatherChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> namespace at { namespace native { // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::cuda::getCurrentCUDAStream(); _scatter_gather_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f); AT_CUDA_CHECK(cudaGetLastError()); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); char* self_data = self_ptr + offsets[0]; char* src_data = src_ptr + offsets[1]; f( (scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0), (scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } at::assert_no_internal_overlap(self); dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index, src); if (is_scatter_like) { scatter_shape_check(self, dim, index, src); } else { gather_shape_check(self, dim, index, src); } auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); auto self_strides = ensure_nonempty_vec(self.strides().vec()); auto src_strides = ensure_nonempty_vec(src.strides().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index_sizes, self_strides); auto src_restrided = is_scatter_like ? src.as_strided(index_sizes, src_strides) : restride_dim(src, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(src_restrided) .add_input(index) .build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); char* self_data = self_ptr + offsets[0]; f( (scalar_t*)self_data + idx_dim * index_stride, &src_val ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } at::assert_no_internal_overlap(self); dim = maybe_wrap_dim(dim, self.dim()); scatter_gather_dtype_check(method_name, self, index); scatter_shape_check(self, dim, index); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self_restrided) .add_input(index) .build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_fill_base_kernel void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) { cuda_scatter_gather_base_kernel</*is_scatter_like=*/false>()( result, dim, index, self, "gather_out_cuda", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("scatter_add_cuda_kernel"); cuda_scatter_gather_base_kernel</*is_scatter_like=*/true, /*cast_to_opaque=*/false>()( self, dim, index, src, "scatter_add_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { gpuAtomicAdd(lhs, *rhs); } ); } REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel); REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel); }} // namespace at::native
64577501906df64956e070be3b4efb88bb698c41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ void _sum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _sum_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _sum_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _sum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _sum_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _sum_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _prod_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai*xi; ai=x[i]; xi=x[i+16]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi; } __global__ void _prod_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 1; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai*xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _prod_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); } if(tid<32) { _prod_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float prod_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _prod_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _prod_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _prod_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai*xi; ai=x[i]; xi=x[i+16]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi; } __global__ void _prod_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 1; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai*xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _prod_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); } if(tid<32) { _prod_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double prod_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _prod_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _prod_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _maximum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maximum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = (-INFINITY); for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maximum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maximum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float maximum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _maximum_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _maximum_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _maximum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maximum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = (-INFINITY); for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maximum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maximum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double maximum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _maximum_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _maximum_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _minimum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minimum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minimum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minimum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float minimum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _minimum_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _minimum_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _minimum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minimum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minimum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minimum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double minimum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _minimum_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _minimum_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _sumabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sumabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _sumabs_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _sumabs_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _sumabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sumabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _sumabs_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _sumabs_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _sumabs2_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs2_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi*xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs2_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs2_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sumabs2_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _sumabs2_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _sumabs2_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _sumabs2_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs2_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi*xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs2_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs2_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sumabs2_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _sumabs2_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _sumabs2_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _maxabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maxabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maxabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maxabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maxabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float maxabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _maxabs_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _maxabs_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _maxabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maxabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maxabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maxabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maxabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double maxabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _maxabs_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _maxabs_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _minabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float minabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _minabs_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _minabs_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _minabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double minabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _minabs_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _minabs_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }} __device__ void _countnz_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _countnz_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi!=0); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _countnz_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _countnz_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float countnz_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) hipMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum hipLaunchKernelGGL(( _countnz_32_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _countnz_32_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost); return r; }} __device__ void _countnz_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _countnz_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi!=0); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _countnz_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _countnz_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double countnz_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) hipMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum hipLaunchKernelGGL(( _countnz_64_20_1), dim3(128),dim3(128), 0, 0, n,x,y); hipLaunchKernelGGL(( _countnz_64_20_2), dim3(1),dim3(128), 0, 0, y,z); hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost); return r; }}
64577501906df64956e070be3b4efb88bb698c41.cu
__device__ void _sum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _sum_32_20_1<<<128,128>>>(n,x,y); _sum_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _sum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _sum_64_20_1<<<128,128>>>(n,x,y); _sum_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _prod_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai*xi; ai=x[i]; xi=x[i+16]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi; } __global__ void _prod_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 1; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai*xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _prod_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); } if(tid<32) { _prod_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float prod_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _prod_32_20_1<<<128,128>>>(n,x,y); _prod_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _prod_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai*xi; ai=x[i]; xi=x[i+16]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi; } __global__ void _prod_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 1; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai*xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _prod_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); } if(tid<32) { _prod_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double prod_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _prod_64_20_1<<<128,128>>>(n,x,y); _prod_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _maximum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maximum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = (-INFINITY); for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maximum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maximum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float maximum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _maximum_32_20_1<<<128,128>>>(n,x,y); _maximum_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _maximum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maximum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = (-INFINITY); for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maximum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maximum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double maximum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _maximum_64_20_1<<<128,128>>>(n,x,y); _maximum_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _minimum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minimum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minimum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minimum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float minimum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _minimum_32_20_1<<<128,128>>>(n,x,y); _minimum_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _minimum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minimum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minimum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minimum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double minimum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _minimum_64_20_1<<<128,128>>>(n,x,y); _minimum_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sumabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _sumabs_32_20_1<<<128,128>>>(n,x,y); _sumabs_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sumabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _sumabs_64_20_1<<<128,128>>>(n,x,y); _sumabs_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs2_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs2_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi*xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs2_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs2_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sumabs2_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _sumabs2_32_20_1<<<128,128>>>(n,x,y); _sumabs2_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs2_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs2_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi*xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs2_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs2_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sumabs2_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _sumabs2_64_20_1<<<128,128>>>(n,x,y); _sumabs2_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _maxabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maxabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maxabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maxabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maxabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float maxabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _maxabs_32_20_1<<<128,128>>>(n,x,y); _maxabs_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _maxabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maxabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maxabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maxabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maxabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double maxabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _maxabs_64_20_1<<<128,128>>>(n,x,y); _maxabs_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _minabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float minabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _minabs_32_20_1<<<128,128>>>(n,x,y); _minabs_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _minabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double minabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _minabs_64_20_1<<<128,128>>>(n,x,y); _minabs_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _countnz_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _countnz_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi!=0); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _countnz_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _countnz_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float countnz_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _countnz_32_20_1<<<128,128>>>(n,x,y); _countnz_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _countnz_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _countnz_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi!=0); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _countnz_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _countnz_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double countnz_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _countnz_64_20_1<<<128,128>>>(n,x,y); _countnz_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }}
2fcd0bab15739b83f9b9c076d7c413d8dc1dccf3.hip
// !!! This is a file automatically generated by hipify!!! // // include files // #include <stdio.h> #include <hip/hip_runtime.h> #include "cutil_inline.h" #include <hip/hip_runtime_api.h> // // kernel routine // __global__ void my_first_kernel (float* x, float* y, float* z) { int tid = threadIdx.x + blockIdx.x * blockDim.x; printf("%d\n", tid); z[tid] = x[tid] + y[tid]; } // // main code // int main(int argc, char **argv) { //Initialising inputs float* h_x; float* h_y; float* h_z; float* d_x; float* d_y; float* d_z; int nblocks, nthreads, nsize; // initialise card cutilDeviceInit(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads; //Allocating memory on the host h_x = (float*)malloc(nsize*sizeof(float)); h_y = (float*)malloc(nsize*sizeof(float)); h_z = (float*)malloc(nsize*sizeof(float)); for (int i = 0; i < nsize; ++i) { h_x[i] = (float)i; h_y[i] = (float)i; h_z[i] = 0.0; } // allocate memory cudaSafeCall(hipMalloc( (void**)&d_x, nsize*sizeof(float) )); cudaSafeCall(hipMalloc( (void**)&d_y, nsize*sizeof(float) )); cudaSafeCall(hipMalloc( (void**)&d_z, nsize*sizeof(float) )); // copy data from host to device cudaSafeCall(hipMemcpy(d_x, h_x, nsize*sizeof(float), hipMemcpyHostToDevice)); cudaSafeCall(hipMemcpy(d_y, h_y, nsize*sizeof(float), hipMemcpyHostToDevice)); cudaSafeCall(hipMemcpy(d_z, h_z, nsize*sizeof(float), hipMemcpyHostToDevice)); // execute kernel hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x, d_y, d_z); cudaCheckMsg("my_first_kernel execution failed\n"); hipDeviceSynchronize(); // copy back results and print them out cudaSafeCall(hipMemcpy(h_z, d_z, nsize*sizeof(float), hipMemcpyDeviceToHost)); for(int n = 0; n < nsize; n++) { printf("n, x = %d %f\n",n,h_z[n]); } // free memory hipFree(d_x); hipFree(d_y); hipFree(d_z); free(h_x); free(h_y); free(h_z); // CUDA exit -- needed to flush printf write buffer hipDeviceReset(); return 0; }
2fcd0bab15739b83f9b9c076d7c413d8dc1dccf3.cu
// // include files // #include <stdio.h> #include <cuda_runtime.h> #include "cutil_inline.h" #include <cuda_runtime_api.h> // // kernel routine // __global__ void my_first_kernel (float* x, float* y, float* z) { int tid = threadIdx.x + blockIdx.x * blockDim.x; printf("%d\n", tid); z[tid] = x[tid] + y[tid]; } // // main code // int main(int argc, char **argv) { //Initialising inputs float* h_x; float* h_y; float* h_z; float* d_x; float* d_y; float* d_z; int nblocks, nthreads, nsize; // initialise card cutilDeviceInit(argc, argv); // set number of blocks, and threads per block nblocks = 2; nthreads = 8; nsize = nblocks*nthreads; //Allocating memory on the host h_x = (float*)malloc(nsize*sizeof(float)); h_y = (float*)malloc(nsize*sizeof(float)); h_z = (float*)malloc(nsize*sizeof(float)); for (int i = 0; i < nsize; ++i) { h_x[i] = (float)i; h_y[i] = (float)i; h_z[i] = 0.0; } // allocate memory cudaSafeCall(cudaMalloc( (void**)&d_x, nsize*sizeof(float) )); cudaSafeCall(cudaMalloc( (void**)&d_y, nsize*sizeof(float) )); cudaSafeCall(cudaMalloc( (void**)&d_z, nsize*sizeof(float) )); // copy data from host to device cudaSafeCall(cudaMemcpy(d_x, h_x, nsize*sizeof(float), cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(d_y, h_y, nsize*sizeof(float), cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(d_z, h_z, nsize*sizeof(float), cudaMemcpyHostToDevice)); // execute kernel my_first_kernel<<<nblocks,nthreads>>>(d_x, d_y, d_z); cudaCheckMsg("my_first_kernel execution failed\n"); cudaThreadSynchronize(); // copy back results and print them out cudaSafeCall(cudaMemcpy(h_z, d_z, nsize*sizeof(float), cudaMemcpyDeviceToHost)); for(int n = 0; n < nsize; n++) { printf("n, x = %d %f\n",n,h_z[n]); } // free memory cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); free(h_x); free(h_y); free(h_z); // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 0; }
077486844a5482674525b96070ba4ccd973c75b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define EMPTY 0xffffffff __device__ bool is_equal(int *key_columns, int a, int b, int num_key_columns, int num_key_rows) { for (int i = 0; i < num_key_columns, ++i) if (key_columns[i*num_key_rows + a] != key_columns[i*num_key_rows + b]) return false; return true; } __device__ void perform_op(int bucket, int *hash_table_aggregate, int hash_table_length, int i, reductionType op, int val) { switch (op) { case rmax: atomicMax(&hash_table_aggregate[i * hash_table_length + bucket], val); case rmin: atomicMin(&hash_table_aggregate[i * hash_table_length + bucket], val); case rcount: atomicAdd(&hash_table_aggregate[i * hash_table_length + bucket], 1); case rsum: atomicAdd(&hash_table_aggregate[i * hash_table_length + bucket], val); } } __global__ void insert_and_aggregate(int *key_columns, int num_key_columns, int num_key_rows, int *val_columns, int num_val_columns, int num_val_rows, reductionType *op_columns, int num_op_columns, uint32_t *hash_table_keys, int *hash_table_aggregate, int hash_table_length, int *insert_result) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; uint32_t hashed = hash(key_columns, num_key_columns, num_key_rows); int bucket = hashed % hash_table_length; int attempts = 0; int result = 0; while (attempts < hash_table_length) { uint32_t val = hash_table_keys[bucket]; if (val == EMPTY) { // try insert key uint32_t old = atomicCAS(&hash_table_keys[bucket], idx, EMPTY); if (old != EMPTY) { if (!is_equal(key_columns, idx, old, num_key_columns, num_key_rows)) result = 1; // lost race condition and collision } } else if (!is_equal(key_columns, idx, val, num_key_columns, num_key_rows)) result = 1; // collision if (result == 0) { // no collision, update aggregate for (int i < 0; i < num_op_columns; ++i) { perform_op(bucket, hash_table_aggregate, hash_table_length, i, op_columns[i], val_columns[i * num_val_rows + idx]); } break; // while loop } ++attempts; bucket = (bucket+1) % hash_table_length; } if ((result == 1) && (attempts == hash_table_length)) { // failed to insert atomicAdd(insert_result, 1); } }
077486844a5482674525b96070ba4ccd973c75b6.cu
#define EMPTY 0xffffffff __device__ bool is_equal(int *key_columns, int a, int b, int num_key_columns, int num_key_rows) { for (int i = 0; i < num_key_columns, ++i) if (key_columns[i*num_key_rows + a] != key_columns[i*num_key_rows + b]) return false; return true; } __device__ void perform_op(int bucket, int *hash_table_aggregate, int hash_table_length, int i, reductionType op, int val) { switch (op) { case rmax: atomicMax(&hash_table_aggregate[i * hash_table_length + bucket], val); case rmin: atomicMin(&hash_table_aggregate[i * hash_table_length + bucket], val); case rcount: atomicAdd(&hash_table_aggregate[i * hash_table_length + bucket], 1); case rsum: atomicAdd(&hash_table_aggregate[i * hash_table_length + bucket], val); } } __global__ void insert_and_aggregate(int *key_columns, int num_key_columns, int num_key_rows, int *val_columns, int num_val_columns, int num_val_rows, reductionType *op_columns, int num_op_columns, uint32_t *hash_table_keys, int *hash_table_aggregate, int hash_table_length, int *insert_result) { uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x; uint32_t hashed = hash(key_columns, num_key_columns, num_key_rows); int bucket = hashed % hash_table_length; int attempts = 0; int result = 0; while (attempts < hash_table_length) { uint32_t val = hash_table_keys[bucket]; if (val == EMPTY) { // try insert key uint32_t old = atomicCAS(&hash_table_keys[bucket], idx, EMPTY); if (old != EMPTY) { if (!is_equal(key_columns, idx, old, num_key_columns, num_key_rows)) result = 1; // lost race condition and collision } } else if (!is_equal(key_columns, idx, val, num_key_columns, num_key_rows)) result = 1; // collision if (result == 0) { // no collision, update aggregate for (int i < 0; i < num_op_columns; ++i) { perform_op(bucket, hash_table_aggregate, hash_table_length, i, op_columns[i], val_columns[i * num_val_rows + idx]); } break; // while loop } ++attempts; bucket = (bucket+1) % hash_table_length; } if ((result == 1) && (attempts == hash_table_length)) { // failed to insert atomicAdd(insert_result, 1); } }
a6c37b9a110a93b65f683fb6d4da5b7c3d695a4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "util.hip" #include "tabs/sbox.tab" union u32_t { uint i; uchar c[4]; }; #if TTABLE == 128 #define STE_128_LH(state) (sbox[(state >> 1) ] >> ((state & 0x1) << 2)) #define STE_128_HH(state) (sbox[(state >> 1) + 128] >> ((state & 0x1) << 2)) #define STE(state) ((STE_128_LH(state) & 0x0f) | (STE_128_HH(state) << 4)) #elif TTABLE == 64 #define STE_64_0(state) (sbox[(state >> 2) ] >> ((state & 0x3) << 1)) #define STE_64_1(state) (sbox[(state >> 2) + 64] >> ((state & 0x3) << 1)) #define STE_64_2(state) (sbox[(state >> 2) + 128] >> ((state & 0x3) << 1)) #define STE_64_3(state) (sbox[(state >> 2) + 192] >> ((state & 0x3) << 1)) #define STE(state) ((STE_64_0(state) & 0x03) \ | ((STE_64_1(state) & 0x03) << 2) \ | ((STE_64_2(state) & 0x03) << 4) \ | ((STE_64_3(state) & 0x03) << 6)) #elif TTABLE == 32 #define STE_32_0(state) (sbox[(state >> 3) ] >> (state & 0x7)) #define STE_32_1(state) (sbox[(state >> 3) + 32] >> (state & 0x7)) #define STE_32_2(state) (sbox[(state >> 3) + 64] >> (state & 0x7)) #define STE_32_3(state) (sbox[(state >> 3) + 96] >> (state & 0x7)) #define STE_32_4(state) (sbox[(state >> 3) + 128] >> (state & 0x7)) #define STE_32_5(state) (sbox[(state >> 3) + 160] >> (state & 0x7)) #define STE_32_6(state) (sbox[(state >> 3) + 192] >> (state & 0x7)) #define STE_32_7(state) (sbox[(state >> 3) + 224] >> (state & 0x7)) #define STE(state) ((STE_32_0(state) & 0x01) \ | ((STE_32_1(state) & 0x01) << 1) \ | ((STE_32_2(state) & 0x01) << 2) \ | ((STE_32_3(state) & 0x01) << 3) \ | ((STE_32_4(state) & 0x01) << 4) \ | ((STE_32_5(state) & 0x01) << 5) \ | ((STE_32_6(state) & 0x01) << 6) \ | ((STE_32_7(state) & 0x01) << 7)) #else #define STE(state) (sbox[state]) #endif #define SWAP(a, b) (a) ^= (b); (b) ^= (a); (a) ^= (b); __device__ void TransposeSelf(uchar *state) { SWAP(state[1], state[4]); SWAP(state[2], state[8]); SWAP(state[3], state[12]); SWAP(state[6], state[9]); SWAP(state[7], state[13]); SWAP(state[11], state[14]); } __device__ void Transpose(uchar *dst, uchar *src) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { dst[j*4+i] = src[i*4+j]; } } } __device__ void AddRoundKey(uchar *state, uchar *rek) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { state[j*4+i] ^= rek[i*4+3-j]; } } } __device__ void SubBytesSecure(uchar *state, uchar *sbox) { for (int i = 0; i < 16; i++) { state[i] = STE(state[i]);//sbox[state[i]]; } } __device__ void SubBytes(uchar *state, uchar *sbox) { for (int i = 0; i < 16; i++) { state[i] = sbox[state[i]]; } } #define xtime(x) ((x << 1) ^ ((x >> 7) * 0x1b)) __device__ void MixColumns(uchar *state) { uchar Tmp, Tm, t; for(int i = 0; i < 4; i++) { t = state[i]; Tmp = state[i] ^ state[4+i] ^ state[8+i] ^ state[12+i] ; Tm = state[i] ^ state[4+i] ; Tm = xtime(Tm); state[i] ^= Tm ^ Tmp ; Tm = state[4+i] ^ state[8+i] ; Tm = xtime(Tm); state[4+i] ^= Tm ^ Tmp ; Tm = state[8+i] ^ state[12+i] ; Tm = xtime(Tm); state[8+i] ^= Tm ^ Tmp ; Tm = state[12+i] ^ t ; Tm = xtime(Tm); state[12+i] ^= Tm ^ Tmp ; } } __device__ void ShiftRows(uchar *state) { uchar temp; // Rotate first row 1 columns to left temp = state[4]; state[4] = state[5]; state[5] = state[6]; state[6] = state[7]; state[7] = temp; // Rotate second row 2 columns to left temp = state[8]; state[8] = state[10]; state[10] = temp; temp = state[9]; state[9] = state[11]; state[11] = temp; // Rotate third row 3 columns to left temp = state[12]; state[12] = state[15]; state[15] = state[14]; state[14] = state[13]; state[13] = temp; } #define REV_ENDIAN(x) (((x)>>24)&0x000000FF) | (((x)>>8)&0x0000FF00) | (((x)<<8)&0x00FF0000) | (((x)<<24)&0xFF000000) __global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr, uint size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; u32_t state[4]; //uchar state[16]; uchar *_rk = (uchar *)rek; #ifdef USE_SMEM __shared__ uchar sbox[256]; #if TTABLE == 256 load_smem_sbox(sbox, Tsbox_256); #elif TTABLE == 128 load_smem_sbox(sbox, Tsbox_128); #elif TTABLE == 64 load_smem_sbox(sbox, Tsbox_64); #elif TTABLE == 32 load_smem_sbox(sbox, Tsbox_32); #endif // TTABLE #else #if TTABLE == 256 uchar *sbox = Tsbox_256; #elif TTABLE == 128 uchar *sbox = Tsbox_128; #elif TTABLE == 64 uchar *sbox = Tsbox_64; #elif TTABLE == 32 uchar *sbox = Tsbox_32; #endif // TTABLE #endif // USE_SMEM uchar *sbox_256 = Tsbox_256; int iter = 0; BEGIN: int offset = (iter * NUM_THREADS * NUM_BLOCKS + tid) << 2; if (offset >= size) return; state[0].i = REV_ENDIAN(pt[offset + 0]); state[1].i = REV_ENDIAN(pt[offset + 1]); state[2].i = REV_ENDIAN(pt[offset + 2]); state[3].i = REV_ENDIAN(pt[offset + 3]); TransposeSelf((uchar*)state); AddRoundKey((uchar*)state, (uchar*)_rk); for (int i = 1; i < Nr; i++) { SubBytes((uchar*)state, sbox_256); ShiftRows((uchar*)state); MixColumns((uchar*)state); AddRoundKey((uchar*)state, (uchar*)(rek + i*4)); } SubBytesSecure((uchar*)state, sbox); ShiftRows((uchar*)state); AddRoundKey((uchar*)state, (uchar*)(rek + Nr*4)); TransposeSelf((uchar*)state); ct[offset + 0] = REV_ENDIAN(state[0].i); ct[offset + 1] = REV_ENDIAN(state[1].i); ct[offset + 2] = REV_ENDIAN(state[2].i); ct[offset + 3] = REV_ENDIAN(state[3].i); iter++; goto BEGIN; }
a6c37b9a110a93b65f683fb6d4da5b7c3d695a4f.cu
#include "util.cu" #include "tabs/sbox.tab" union u32_t { uint i; uchar c[4]; }; #if TTABLE == 128 #define STE_128_LH(state) (sbox[(state >> 1) ] >> ((state & 0x1) << 2)) #define STE_128_HH(state) (sbox[(state >> 1) + 128] >> ((state & 0x1) << 2)) #define STE(state) ((STE_128_LH(state) & 0x0f) | (STE_128_HH(state) << 4)) #elif TTABLE == 64 #define STE_64_0(state) (sbox[(state >> 2) ] >> ((state & 0x3) << 1)) #define STE_64_1(state) (sbox[(state >> 2) + 64] >> ((state & 0x3) << 1)) #define STE_64_2(state) (sbox[(state >> 2) + 128] >> ((state & 0x3) << 1)) #define STE_64_3(state) (sbox[(state >> 2) + 192] >> ((state & 0x3) << 1)) #define STE(state) ((STE_64_0(state) & 0x03) \ | ((STE_64_1(state) & 0x03) << 2) \ | ((STE_64_2(state) & 0x03) << 4) \ | ((STE_64_3(state) & 0x03) << 6)) #elif TTABLE == 32 #define STE_32_0(state) (sbox[(state >> 3) ] >> (state & 0x7)) #define STE_32_1(state) (sbox[(state >> 3) + 32] >> (state & 0x7)) #define STE_32_2(state) (sbox[(state >> 3) + 64] >> (state & 0x7)) #define STE_32_3(state) (sbox[(state >> 3) + 96] >> (state & 0x7)) #define STE_32_4(state) (sbox[(state >> 3) + 128] >> (state & 0x7)) #define STE_32_5(state) (sbox[(state >> 3) + 160] >> (state & 0x7)) #define STE_32_6(state) (sbox[(state >> 3) + 192] >> (state & 0x7)) #define STE_32_7(state) (sbox[(state >> 3) + 224] >> (state & 0x7)) #define STE(state) ((STE_32_0(state) & 0x01) \ | ((STE_32_1(state) & 0x01) << 1) \ | ((STE_32_2(state) & 0x01) << 2) \ | ((STE_32_3(state) & 0x01) << 3) \ | ((STE_32_4(state) & 0x01) << 4) \ | ((STE_32_5(state) & 0x01) << 5) \ | ((STE_32_6(state) & 0x01) << 6) \ | ((STE_32_7(state) & 0x01) << 7)) #else #define STE(state) (sbox[state]) #endif #define SWAP(a, b) (a) ^= (b); (b) ^= (a); (a) ^= (b); __device__ void TransposeSelf(uchar *state) { SWAP(state[1], state[4]); SWAP(state[2], state[8]); SWAP(state[3], state[12]); SWAP(state[6], state[9]); SWAP(state[7], state[13]); SWAP(state[11], state[14]); } __device__ void Transpose(uchar *dst, uchar *src) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { dst[j*4+i] = src[i*4+j]; } } } __device__ void AddRoundKey(uchar *state, uchar *rek) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { state[j*4+i] ^= rek[i*4+3-j]; } } } __device__ void SubBytesSecure(uchar *state, uchar *sbox) { for (int i = 0; i < 16; i++) { state[i] = STE(state[i]);//sbox[state[i]]; } } __device__ void SubBytes(uchar *state, uchar *sbox) { for (int i = 0; i < 16; i++) { state[i] = sbox[state[i]]; } } #define xtime(x) ((x << 1) ^ ((x >> 7) * 0x1b)) __device__ void MixColumns(uchar *state) { uchar Tmp, Tm, t; for(int i = 0; i < 4; i++) { t = state[i]; Tmp = state[i] ^ state[4+i] ^ state[8+i] ^ state[12+i] ; Tm = state[i] ^ state[4+i] ; Tm = xtime(Tm); state[i] ^= Tm ^ Tmp ; Tm = state[4+i] ^ state[8+i] ; Tm = xtime(Tm); state[4+i] ^= Tm ^ Tmp ; Tm = state[8+i] ^ state[12+i] ; Tm = xtime(Tm); state[8+i] ^= Tm ^ Tmp ; Tm = state[12+i] ^ t ; Tm = xtime(Tm); state[12+i] ^= Tm ^ Tmp ; } } __device__ void ShiftRows(uchar *state) { uchar temp; // Rotate first row 1 columns to left temp = state[4]; state[4] = state[5]; state[5] = state[6]; state[6] = state[7]; state[7] = temp; // Rotate second row 2 columns to left temp = state[8]; state[8] = state[10]; state[10] = temp; temp = state[9]; state[9] = state[11]; state[11] = temp; // Rotate third row 3 columns to left temp = state[12]; state[12] = state[15]; state[15] = state[14]; state[14] = state[13]; state[13] = temp; } #define REV_ENDIAN(x) (((x)>>24)&0x000000FF) | (((x)>>8)&0x0000FF00) | (((x)<<8)&0x00FF0000) | (((x)<<24)&0xFF000000) __global__ void AES_encrypt(const uint *pt, uint *ct, uint *rek, uint Nr, uint size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; u32_t state[4]; //uchar state[16]; uchar *_rk = (uchar *)rek; #ifdef USE_SMEM __shared__ uchar sbox[256]; #if TTABLE == 256 load_smem_sbox(sbox, Tsbox_256); #elif TTABLE == 128 load_smem_sbox(sbox, Tsbox_128); #elif TTABLE == 64 load_smem_sbox(sbox, Tsbox_64); #elif TTABLE == 32 load_smem_sbox(sbox, Tsbox_32); #endif // TTABLE #else #if TTABLE == 256 uchar *sbox = Tsbox_256; #elif TTABLE == 128 uchar *sbox = Tsbox_128; #elif TTABLE == 64 uchar *sbox = Tsbox_64; #elif TTABLE == 32 uchar *sbox = Tsbox_32; #endif // TTABLE #endif // USE_SMEM uchar *sbox_256 = Tsbox_256; int iter = 0; BEGIN: int offset = (iter * NUM_THREADS * NUM_BLOCKS + tid) << 2; if (offset >= size) return; state[0].i = REV_ENDIAN(pt[offset + 0]); state[1].i = REV_ENDIAN(pt[offset + 1]); state[2].i = REV_ENDIAN(pt[offset + 2]); state[3].i = REV_ENDIAN(pt[offset + 3]); TransposeSelf((uchar*)state); AddRoundKey((uchar*)state, (uchar*)_rk); for (int i = 1; i < Nr; i++) { SubBytes((uchar*)state, sbox_256); ShiftRows((uchar*)state); MixColumns((uchar*)state); AddRoundKey((uchar*)state, (uchar*)(rek + i*4)); } SubBytesSecure((uchar*)state, sbox); ShiftRows((uchar*)state); AddRoundKey((uchar*)state, (uchar*)(rek + Nr*4)); TransposeSelf((uchar*)state); ct[offset + 0] = REV_ENDIAN(state[0].i); ct[offset + 1] = REV_ENDIAN(state[1].i); ct[offset + 2] = REV_ENDIAN(state[2].i); ct[offset + 3] = REV_ENDIAN(state[3].i); iter++; goto BEGIN; }
dd8d9435f2086ad587115f72fea203dfcc5e7182.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_mask_ops.h" #include <hipcub/hipcub.hpp> namespace caffe2 { namespace { __global__ void BooleanMaskCopyKernel( const int64_t numOfOutput, const int64_t numBytes, const int64_t* indices, const uint8_t* src, uint8_t* dest) { for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) { const auto srcBase = indices[i] * numBytes; const auto destBase = i * numBytes; for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) { dest[destBase + j] = src[srcBase + j]; } } } } template <> class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} bool RunOnDevice() override { const auto& src = Input(0); const auto& mask = Input(1); auto* dest = Output(0); CAFFE_ENFORCE(src.ndim() >= 1); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE(src.dims()[0] == mask.dims()[0]); const auto* maskData = mask.data<bool>(); const auto outerSize = mask.dims()[0]; indices_.Resize(outerSize); auto* indicesData = indices_.mutable_data<int64_t>(); size_t numBytes = 0; hipcub::CountingInputIterator<int> itr(0); hipcub::DeviceSelect::Flagged( nullptr, numBytes, itr, maskData, indicesData, static_cast<int64_t*>(nullptr), outerSize, context_.cuda_stream()); auto numint64_t = static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t)); // allocate one more int64_t at the end of scratch for storing numOfOutput scratch_.Resize(numint64_t + 1); auto* scratchData = scratch_.mutable_data<int64_t>(); auto* numOfOutputData = scratchData + numint64_t; hipcub::DeviceSelect::Flagged( static_cast<void*>(scratchData), numBytes, itr, maskData, indicesData, numOfOutputData, outerSize, context_.cuda_stream()); // Copy numOfOutput from gpu to cpu int64_t numOfOutput; context_.CopyToCPU(1, numOfOutputData, &numOfOutput); indices_.Resize(numOfOutput); std::vector<int64_t> dims = src.dims().vec(); dims[0] = numOfOutput; dest->Resize(dims); auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta()); const auto* srcData = (uint8_t*)src.raw_data(); if (OutputSize() == 2) { auto* indicesOut = Output(1); indicesOut->Resize(numOfOutput); indicesOut->template mutable_data<int64_t>(); } if (numOfOutput > 0) { hipLaunchKernelGGL(( BooleanMaskCopyKernel), dim3(min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS))), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), numOfOutput, src.size_from_dim(1) * src.meta().itemsize(), indicesData, srcData, destData); if (OutputSize() == 2) { Output(1)->CopyFrom(indices_, &context_); } } return true; } private: Tensor indices_{CUDA}; Tensor scratch_{CUDA}; }; REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>); namespace { #define minf (-1.0f * std::numeric_limits<float>::infinity()) template <typename T> __global__ void sequenceMaskKernel( int N, int M, int B, const T* in, const int* seq_lengths, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } } template <typename T> __global__ void repeatedSequenceMaskKernel( int N, int M, int D, const T* in, const int* seq_lengths, T fill_val, T* out) { CUDA_1D_KERNEL_LOOP(index, N * M * D) { int i = index / (D * M); int j = (index / D) % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } template <typename T> __global__ void windowMaskKernel( int N, int M, int B, const T* in, const int* window_centers, const int radius, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < window_centers[j] - radius || k > window_centers[j] + radius ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < window_centers[i] - radius || j > window_centers[i] + radius ? fill_val : in[index]); } } } template <typename T> __global__ void upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k > j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j > i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < i ? fill_val : in[index]); } } } template <typename T> __global__ void upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k <= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j <= i ? fill_val : in[index]); } } } } // namespace template <> bool SequenceMaskOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } template <> template <class T> bool SequenceMaskOp<CUDAContext>::DoRunWithType() { const Tensor* input = &Input(0); const Tensor* sequence_lengths = nullptr; const Tensor* window_centers = nullptr; if (mode_ == "sequence") { sequence_lengths = &Input(1); } else if (mode_ == "window") { window_centers = &Input(1); } auto* output = Output(0); output->ResizeLike(*input); const auto canonical_axis = input->canonical_axis_index(axis_); // canonical_batch is non-negative if batching, -1 otherwise int canonical_batch = -1; if ((HasArgument("batch"))) { canonical_batch = input->canonical_axis_index(batch_); } // make sure batch < axis if (canonical_batch >= 0) { CAFFE_ENFORCE_LT(canonical_batch, canonical_axis); } // if no batch, then left is product of dims up to axis // otherwise, left is product of dims between batch and axis const int left = (canonical_batch >= 0 ? input->size_between_dim(canonical_batch, canonical_axis) : input->size_to_dim(canonical_axis)); const int right = input->size_from_dim(canonical_axis); // product of dims from 1 to batch const int batch_dim = (canonical_batch >= 0 ? input->size_to_dim(canonical_batch) * input->dim(canonical_batch) : -1); T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_); if (mode_ == "sequence") { if (HasArgument("repeat_from_axis")) { const int canonical_repeat_from = input->canonical_axis_index(repeat_from_); const int repeated_dims = input->size_from_dim(canonical_repeat_from); const int masked_dims = right / repeated_dims; hipLaunchKernelGGL(( repeatedSequenceMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, masked_dims, repeated_dims, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } else { hipLaunchKernelGGL(( sequenceMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } } else if (mode_ == "window") { hipLaunchKernelGGL(( windowMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), window_centers->data<int>(), radius_, fill_val, output->template mutable_data<T>()); } else if (mode_ == "upper") { hipLaunchKernelGGL(( upperMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lower") { hipLaunchKernelGGL(( lowerMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "upperdiag") { hipLaunchKernelGGL(( upperDiagMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lowerdiag") { hipLaunchKernelGGL(( lowerDiagMaskKernel), dim3(CAFFE_GET_BLOCKS(left * right)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else { CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!"); } return true; } REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>); } // namespace caffe2
dd8d9435f2086ad587115f72fea203dfcc5e7182.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_mask_ops.h" #include <cub/cub.cuh> namespace caffe2 { namespace { __global__ void BooleanMaskCopyKernel( const int64_t numOfOutput, const int64_t numBytes, const int64_t* indices, const uint8_t* src, uint8_t* dest) { for (int64_t i = blockIdx.x; i < numOfOutput; i += gridDim.x) { const auto srcBase = indices[i] * numBytes; const auto destBase = i * numBytes; for (int64_t j = threadIdx.x; j < numBytes; j += blockDim.x) { dest[destBase + j] = src[srcBase + j]; } } } } template <> class BooleanMaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanMaskOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} bool RunOnDevice() override { const auto& src = Input(0); const auto& mask = Input(1); auto* dest = Output(0); CAFFE_ENFORCE(src.ndim() >= 1); CAFFE_ENFORCE_EQ(mask.ndim(), 1); CAFFE_ENFORCE(src.dims()[0] == mask.dims()[0]); const auto* maskData = mask.data<bool>(); const auto outerSize = mask.dims()[0]; indices_.Resize(outerSize); auto* indicesData = indices_.mutable_data<int64_t>(); size_t numBytes = 0; cub::CountingInputIterator<int> itr(0); cub::DeviceSelect::Flagged( nullptr, numBytes, itr, maskData, indicesData, static_cast<int64_t*>(nullptr), outerSize, context_.cuda_stream()); auto numint64_t = static_cast<int64_t>((numBytes + sizeof(int64_t) - 1) / sizeof(int64_t)); // allocate one more int64_t at the end of scratch for storing numOfOutput scratch_.Resize(numint64_t + 1); auto* scratchData = scratch_.mutable_data<int64_t>(); auto* numOfOutputData = scratchData + numint64_t; cub::DeviceSelect::Flagged( static_cast<void*>(scratchData), numBytes, itr, maskData, indicesData, numOfOutputData, outerSize, context_.cuda_stream()); // Copy numOfOutput from gpu to cpu int64_t numOfOutput; context_.CopyToCPU(1, numOfOutputData, &numOfOutput); indices_.Resize(numOfOutput); std::vector<int64_t> dims = src.dims().vec(); dims[0] = numOfOutput; dest->Resize(dims); auto* destData = (uint8_t*)dest->raw_mutable_data(src.meta()); const auto* srcData = (uint8_t*)src.raw_data(); if (OutputSize() == 2) { auto* indicesOut = Output(1); indicesOut->Resize(numOfOutput); indicesOut->template mutable_data<int64_t>(); } if (numOfOutput > 0) { BooleanMaskCopyKernel<<< min(numOfOutput, static_cast<int64_t>(CAFFE_MAXIMUM_NUM_BLOCKS)), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( numOfOutput, src.size_from_dim(1) * src.meta().itemsize(), indicesData, srcData, destData); if (OutputSize() == 2) { Output(1)->CopyFrom(indices_, &context_); } } return true; } private: Tensor indices_{CUDA}; Tensor scratch_{CUDA}; }; REGISTER_CUDA_OPERATOR(BooleanMask, BooleanMaskOp<CUDAContext>); namespace { #define minf (-1.0f * std::numeric_limits<float>::infinity()) template <typename T> __global__ void sequenceMaskKernel( int N, int M, int B, const T* in, const int* seq_lengths, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= seq_lengths[j] ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } } template <typename T> __global__ void repeatedSequenceMaskKernel( int N, int M, int D, const T* in, const int* seq_lengths, T fill_val, T* out) { CUDA_1D_KERNEL_LOOP(index, N * M * D) { int i = index / (D * M); int j = (index / D) % M; out[index] = (j >= seq_lengths[i] ? fill_val : in[index]); } } template <typename T> __global__ void windowMaskKernel( int N, int M, int B, const T* in, const int* window_centers, const int radius, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < window_centers[j] - radius || k > window_centers[j] + radius ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < window_centers[i] - radius || j > window_centers[i] + radius ? fill_val : in[index]); } } } template <typename T> __global__ void upperMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k > j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j > i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k < j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j < i ? fill_val : in[index]); } } } template <typename T> __global__ void upperDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k >= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j >= i ? fill_val : in[index]); } } } template <typename T> __global__ void lowerDiagMaskKernel(int N, int M, int B, const T* in, T fill_val, T* out) { if (B >= 0) { CUDA_1D_KERNEL_LOOP(index, B * N * M) { int k = index % M; int j = (index - k) / M % N; int i = (index - M * j - k) / (N * M); int ind = N * M * i + M * j + k; out[ind] = (k <= j ? fill_val : in[ind]); } } else { CUDA_1D_KERNEL_LOOP(index, N * M) { int i = index / M; int j = index % M; out[index] = (j <= i ? fill_val : in[index]); } } } } // namespace template <> bool SequenceMaskOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<at::Half, float>>::call(this, Input(0)); } template <> template <class T> bool SequenceMaskOp<CUDAContext>::DoRunWithType() { const Tensor* input = &Input(0); const Tensor* sequence_lengths = nullptr; const Tensor* window_centers = nullptr; if (mode_ == "sequence") { sequence_lengths = &Input(1); } else if (mode_ == "window") { window_centers = &Input(1); } auto* output = Output(0); output->ResizeLike(*input); const auto canonical_axis = input->canonical_axis_index(axis_); // canonical_batch is non-negative if batching, -1 otherwise int canonical_batch = -1; if ((HasArgument("batch"))) { canonical_batch = input->canonical_axis_index(batch_); } // make sure batch < axis if (canonical_batch >= 0) { CAFFE_ENFORCE_LT(canonical_batch, canonical_axis); } // if no batch, then left is product of dims up to axis // otherwise, left is product of dims between batch and axis const int left = (canonical_batch >= 0 ? input->size_between_dim(canonical_batch, canonical_axis) : input->size_to_dim(canonical_axis)); const int right = input->size_from_dim(canonical_axis); // product of dims from 1 to batch const int batch_dim = (canonical_batch >= 0 ? input->size_to_dim(canonical_batch) * input->dim(canonical_batch) : -1); T fill_val = convert::To<float, T>(grad_ ? 0.0f : fill_val_); if (mode_ == "sequence") { if (HasArgument("repeat_from_axis")) { const int canonical_repeat_from = input->canonical_axis_index(repeat_from_); const int repeated_dims = input->size_from_dim(canonical_repeat_from); const int masked_dims = right / repeated_dims; repeatedSequenceMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, masked_dims, repeated_dims, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } else { sequenceMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), sequence_lengths->data<int>(), fill_val, output->template mutable_data<T>()); } } else if (mode_ == "window") { windowMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), window_centers->data<int>(), radius_, fill_val, output->template mutable_data<T>()); } else if (mode_ == "upper") { upperMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lower") { lowerMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "upperdiag") { upperDiagMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else if (mode_ == "lowerdiag") { lowerDiagMaskKernel<<< CAFFE_GET_BLOCKS(left * right), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( left, right, batch_dim, input->data<T>(), fill_val, output->template mutable_data<T>()); } else { CAFFE_ENFORCE(false, "Unsupported mode for SequenceMaskOp!"); } return true; } REGISTER_CUDA_OPERATOR(SequenceMask, SequenceMaskOp<CUDAContext>); } // namespace caffe2
5ac078d47d6a832004f5a8a7fb2d986c4de42434.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_arithemetic_interface.h" #include "oneflow/core/common/switch_func.h" #include "oneflow/core/kernel/util/host_arithemetic_interface.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/register/blob.h" #include "oneflow/core/kernel/util/cuda_half_util.h" namespace oneflow { namespace { template<int32_t NDIMS> struct Int32Array { int32_t val[NDIMS]; }; template<int32_t NDIMS> __device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) { int32_t x_idx = 0; for (int32_t i = NDIMS - 1; i >= 0; --i) { x_idx += (y_idx % y_shape[i]) * x_strides[i]; y_idx /= y_shape[i]; } return x_idx; } template<int32_t NDIMS, typename T> __global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides, const int32_t elem_cnt, const T* x, T* y) { __shared__ int32_t x_strides_shared[NDIMS]; __shared__ int32_t y_dims_shared[NDIMS]; const int32_t tid = threadIdx.x; if (tid < NDIMS) { y_dims_shared[tid] = y_shape.val[tid]; x_strides_shared[tid] = x_strides.val[tid]; } __syncthreads(); CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) { const int32_t x_idx = GetXIndex<NDIMS>(y_dims_shared, x_strides_shared, y_idx); #if __CUDA_ARCH__ >= 350 y[y_idx] = __ldg(x + x_idx); #else y[y_idx] = x[x_idx]; #endif } } template<int32_t NDIMS, typename T> void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const T* x, T* y) { CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); Int32Array<NDIMS> y_shape_struct; FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); } Int32Array<NDIMS> x_strides; int32_t buff[NDIMS]; int32_t cur_stride = 1; for (int32_t i = NDIMS - 1; i >= 0; --i) { buff[i] = cur_stride; cur_stride *= x_shape.At(i); } for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; } hipLaunchKernelGGL(( TransposeGpu<NDIMS, T>) , dim3(SMBlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), y_shape_struct, x_strides, elem_cnt, x, y); } template<typename T> struct TransposeUtil final { #define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T> DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ)); }; } // namespace #define TRANSPOSE_CHECK \ CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \ CHECK_EQ(num_axis, y_shape.NumAxes()); \ CHECK_EQ(num_axis, x_shape.NumAxes()) void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float* x, float* y) { TRANSPOSE_CHECK; TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const double* x, double* y) { TRANSPOSE_CHECK; TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float16* x, float16* y) { TRANSPOSE_CHECK; TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int8_t* x, int8_t* y) { TRANSPOSE_CHECK; TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int32_t* x, int32_t* y) { TRANSPOSE_CHECK; TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int64_t* x, int64_t* y) { TRANSPOSE_CHECK; TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } #undef TRANSPOSE_CHECK void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf( DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) { WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) { ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob); }); } namespace { template<typename T> __global__ void MulByScalarGpu(const int64_t n, const T* x, const T y, T* z) { CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y; } } template<> __global__ void MulByScalarGpu<half>(const int64_t n, const half* x, const half y, half* z) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, n) { z[i] = __hmul(x[i], y); } #else HALF_CHECK_FAILED; #endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) } template<typename T> __global__ void MulByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y_value; } } template<typename T> __global__ void AddByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] + y_value; } } template<typename T> __global__ void SubByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] - y_value; } } template<typename T> __global__ void DivByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] / y_value; } } template<typename T> __global__ void FillGpu(const int64_t n, const T value, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; } } template<typename T> __global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x, const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, const int64_t y_lda) { CUDA_1D_KERNEL_LOOP(index, row_num * col_num) { const int64_t i = index / col_num; const int64_t j = index % col_num; y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j]; } } } // namespace #define MUL_BY_SCALAR(T) \ void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \ const T y, T* z) { \ hipLaunchKernelGGL(( MulByScalarGpu<T>) \ , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \ } MUL_BY_SCALAR(float) MUL_BY_SCALAR(double) MUL_BY_SCALAR(int32_t) MUL_BY_SCALAR(int64_t) #undef MUL_BY_SCALAR void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const float16* x, const float16 y, float16* z) { hipLaunchKernelGGL(( MulByScalarGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, reinterpret_cast<const half*>(x), float16_2half(y), reinterpret_cast<half*>(z)); } #define MUL_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::MulByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ hipLaunchKernelGGL(( MulByScalarPtrGpu<T>) \ , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \ } MUL_BY_SCALAR_PTR(float) MUL_BY_SCALAR_PTR(double) MUL_BY_SCALAR_PTR(int8_t) MUL_BY_SCALAR_PTR(int32_t) MUL_BY_SCALAR_PTR(int64_t) #undef MUL_BY_SCALAR_PTR #define ADD_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::AddByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ hipLaunchKernelGGL(( AddByScalarPtrGpu<T>) \ , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \ } ADD_BY_SCALAR_PTR(float) ADD_BY_SCALAR_PTR(double) ADD_BY_SCALAR_PTR(int8_t) ADD_BY_SCALAR_PTR(int32_t) ADD_BY_SCALAR_PTR(int64_t) #undef ADD_BY_SCALAR_PTR #define SUB_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::SubByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ hipLaunchKernelGGL(( SubByScalarPtrGpu<T>) \ , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \ } SUB_BY_SCALAR_PTR(float) SUB_BY_SCALAR_PTR(double) SUB_BY_SCALAR_PTR(int8_t) SUB_BY_SCALAR_PTR(int32_t) SUB_BY_SCALAR_PTR(int64_t) #undef SUB_BY_SCALAR_PTR #define DIV_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::DivByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ hipLaunchKernelGGL(( DivByScalarPtrGpu<T>) \ , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, x, y, z); \ } DIV_BY_SCALAR_PTR(float) DIV_BY_SCALAR_PTR(double) DIV_BY_SCALAR_PTR(int8_t) DIV_BY_SCALAR_PTR(int32_t) DIV_BY_SCALAR_PTR(int64_t) #undef DIV_BY_SCALAR_PTR #define FILL(T) \ void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \ T* y) { \ hipLaunchKernelGGL(( FillGpu<T>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), \ n, value, y); \ } FILL(float) FILL(double) FILL(int8_t) FILL(int32_t) FILL(int64_t) #undef FILL void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value, float16* y) { hipLaunchKernelGGL(( FillGpu<half>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, float16_2half(value), reinterpret_cast<half*>(y)); } #define COPY_COLS_REGION(T) \ void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \ DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \ const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \ const int64_t y_lda) { \ hipLaunchKernelGGL(( CopyColsRegionGpu<T>), dim3(BlocksNum4ThreadsNum(row_num* col_num)), dim3(kCudaThreadsNumPerBlock), 0, \ ctx->cuda_stream(), row_num, col_num, x, x_col_offset, x_lda, y, \ y_col_offset, y_lda); \ } COPY_COLS_REGION(float) COPY_COLS_REGION(double) COPY_COLS_REGION(int8_t) COPY_COLS_REGION(int32_t) COPY_COLS_REGION(int64_t) #undef COPY_COLS_REGION void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const float16* x, const int64_t x_col_offset, const int64_t x_lda, float16* y, const int64_t y_col_offset, const int64_t y_lda) { hipLaunchKernelGGL(( CopyColsRegionGpu<half>) , dim3(BlocksNum4ThreadsNum(row_num * col_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda, reinterpret_cast<half*>(y), y_col_offset, y_lda); } } // namespace oneflow
5ac078d47d6a832004f5a8a7fb2d986c4de42434.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_arithemetic_interface.h" #include "oneflow/core/common/switch_func.h" #include "oneflow/core/kernel/util/host_arithemetic_interface.h" #include "oneflow/core/kernel/new_kernel_util.h" #include "oneflow/core/register/blob.h" #include "oneflow/core/kernel/util/cuda_half_util.h" namespace oneflow { namespace { template<int32_t NDIMS> struct Int32Array { int32_t val[NDIMS]; }; template<int32_t NDIMS> __device__ int32_t GetXIndex(const int32_t* y_shape, const int32_t* x_strides, int32_t y_idx) { int32_t x_idx = 0; for (int32_t i = NDIMS - 1; i >= 0; --i) { x_idx += (y_idx % y_shape[i]) * x_strides[i]; y_idx /= y_shape[i]; } return x_idx; } template<int32_t NDIMS, typename T> __global__ void TransposeGpu(const Int32Array<NDIMS> y_shape, const Int32Array<NDIMS> x_strides, const int32_t elem_cnt, const T* x, T* y) { __shared__ int32_t x_strides_shared[NDIMS]; __shared__ int32_t y_dims_shared[NDIMS]; const int32_t tid = threadIdx.x; if (tid < NDIMS) { y_dims_shared[tid] = y_shape.val[tid]; x_strides_shared[tid] = x_strides.val[tid]; } __syncthreads(); CUDA_1D_KERNEL_LOOP(y_idx, elem_cnt) { const int32_t x_idx = GetXIndex<NDIMS>(y_dims_shared, x_strides_shared, y_idx); #if __CUDA_ARCH__ >= 350 y[y_idx] = __ldg(x + x_idx); #else y[y_idx] = x[x_idx]; #endif } } template<int32_t NDIMS, typename T> void TransposeImpl(DeviceCtx* ctx, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const T* x, T* y) { CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); Int32Array<NDIMS> y_shape_struct; FOR_RANGE(int32_t, i, 0, NDIMS) { y_shape_struct.val[i] = y_shape.At(i); } Int32Array<NDIMS> x_strides; int32_t buff[NDIMS]; int32_t cur_stride = 1; for (int32_t i = NDIMS - 1; i >= 0; --i) { buff[i] = cur_stride; cur_stride *= x_shape.At(i); } for (int32_t i = 0; i < NDIMS; ++i) { x_strides.val[i] = buff[permutation[i]]; } TransposeGpu<NDIMS, T> <<<SMBlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( y_shape_struct, x_strides, elem_cnt, x, y); } template<typename T> struct TransposeUtil final { #define MAKE_TRANSPOSE_SWITCH_ENTRY(func_name, NDIMS) func_name<NDIMS, T> DEFINE_STATIC_SWITCH_FUNC(void, TransposeImpl, MAKE_TRANSPOSE_SWITCH_ENTRY, MAKE_NDIM_CTRV_SEQ(DIM_SEQ)); }; } // namespace #define TRANSPOSE_CHECK \ CHECK_LE(y_shape.elem_cnt(), GetMaxVal<int32_t>()); \ CHECK_EQ(num_axis, y_shape.NumAxes()); \ CHECK_EQ(num_axis, x_shape.NumAxes()) void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float* x, float* y) { TRANSPOSE_CHECK; TransposeUtil<float>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const double* x, double* y) { TRANSPOSE_CHECK; TransposeUtil<double>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const float16* x, float16* y) { TRANSPOSE_CHECK; TransposeUtil<half>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int8_t* x, int8_t* y) { TRANSPOSE_CHECK; TransposeUtil<int8_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int32_t* x, int32_t* y) { TRANSPOSE_CHECK; TransposeUtil<int32_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } void ArithemeticIf<DeviceType::kGPU>::Transpose(DeviceCtx* ctx, const int32_t num_axis, const ShapeView& x_shape, const ShapeView& y_shape, const PbRf<int32_t>& permutation, const int64_t elem_cnt, const int64_t* x, int64_t* y) { TRANSPOSE_CHECK; TransposeUtil<int64_t>::SwitchTransposeImpl(SwitchCase(num_axis), ctx, x_shape, y_shape, permutation, elem_cnt, x, y); } #undef TRANSPOSE_CHECK void ArithemeticIf<DeviceType::kGPU>::InitializeWithConstConf( DeviceCtx* ctx, const ConstantInitializerConf& initializer_conf, Blob* blob) { WithHostBlobAndStreamSynchronizeEnv(ctx, blob, [&](Blob* host_blob) { ArithemeticIf<DeviceType::kCPU>::InitializeWithConstConf(nullptr, initializer_conf, host_blob); }); } namespace { template<typename T> __global__ void MulByScalarGpu(const int64_t n, const T* x, const T y, T* z) { CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y; } } template<> __global__ void MulByScalarGpu<half>(const int64_t n, const half* x, const half y, half* z) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, n) { z[i] = __hmul(x[i], y); } #else HALF_CHECK_FAILED; #endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) } template<typename T> __global__ void MulByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] * y_value; } } template<typename T> __global__ void AddByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] + y_value; } } template<typename T> __global__ void SubByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] - y_value; } } template<typename T> __global__ void DivByScalarPtrGpu(const int64_t n, const T* x, const T* y, T* z) { const T y_value = y[0]; CUDA_1D_KERNEL_LOOP(i, n) { z[i] = x[i] / y_value; } } template<typename T> __global__ void FillGpu(const int64_t n, const T value, T* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = value; } } template<typename T> __global__ void CopyColsRegionGpu(const int64_t row_num, const int64_t col_num, const T* x, const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, const int64_t y_lda) { CUDA_1D_KERNEL_LOOP(index, row_num * col_num) { const int64_t i = index / col_num; const int64_t j = index % col_num; y[i * y_lda + y_col_offset + j] = x[i * x_lda + x_col_offset + j]; } } } // namespace #define MUL_BY_SCALAR(T) \ void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const T* x, \ const T y, T* z) { \ MulByScalarGpu<T> \ <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \ } MUL_BY_SCALAR(float) MUL_BY_SCALAR(double) MUL_BY_SCALAR(int32_t) MUL_BY_SCALAR(int64_t) #undef MUL_BY_SCALAR void ArithemeticIf<DeviceType::kGPU>::MulByScalar(DeviceCtx* ctx, const int64_t n, const float16* x, const float16 y, float16* z) { MulByScalarGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, reinterpret_cast<const half*>(x), float16_2half(y), reinterpret_cast<half*>(z)); } #define MUL_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::MulByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ MulByScalarPtrGpu<T> \ <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \ } MUL_BY_SCALAR_PTR(float) MUL_BY_SCALAR_PTR(double) MUL_BY_SCALAR_PTR(int8_t) MUL_BY_SCALAR_PTR(int32_t) MUL_BY_SCALAR_PTR(int64_t) #undef MUL_BY_SCALAR_PTR #define ADD_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::AddByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ AddByScalarPtrGpu<T> \ <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \ } ADD_BY_SCALAR_PTR(float) ADD_BY_SCALAR_PTR(double) ADD_BY_SCALAR_PTR(int8_t) ADD_BY_SCALAR_PTR(int32_t) ADD_BY_SCALAR_PTR(int64_t) #undef ADD_BY_SCALAR_PTR #define SUB_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::SubByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ SubByScalarPtrGpu<T> \ <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \ } SUB_BY_SCALAR_PTR(float) SUB_BY_SCALAR_PTR(double) SUB_BY_SCALAR_PTR(int8_t) SUB_BY_SCALAR_PTR(int32_t) SUB_BY_SCALAR_PTR(int64_t) #undef SUB_BY_SCALAR_PTR #define DIV_BY_SCALAR_PTR(T) \ void ArithemeticIf<DeviceType::kGPU>::DivByScalarPtr(DeviceCtx* ctx, const int64_t n, \ const T* x, const T* y, T* z) { \ DivByScalarPtrGpu<T> \ <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(n, x, y, z); \ } DIV_BY_SCALAR_PTR(float) DIV_BY_SCALAR_PTR(double) DIV_BY_SCALAR_PTR(int8_t) DIV_BY_SCALAR_PTR(int32_t) DIV_BY_SCALAR_PTR(int64_t) #undef DIV_BY_SCALAR_PTR #define FILL(T) \ void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const T value, \ T* y) { \ FillGpu<T><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( \ n, value, y); \ } FILL(float) FILL(double) FILL(int8_t) FILL(int32_t) FILL(int64_t) #undef FILL void ArithemeticIf<DeviceType::kGPU>::Fill(DeviceCtx* ctx, const int64_t n, const float16 value, float16* y) { FillGpu<half><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, float16_2half(value), reinterpret_cast<half*>(y)); } #define COPY_COLS_REGION(T) \ void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion( \ DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const T* x, \ const int64_t x_col_offset, const int64_t x_lda, T* y, const int64_t y_col_offset, \ const int64_t y_lda) { \ CopyColsRegionGpu<T><<<BlocksNum4ThreadsNum(row_num* col_num), kCudaThreadsNumPerBlock, 0, \ ctx->cuda_stream()>>>(row_num, col_num, x, x_col_offset, x_lda, y, \ y_col_offset, y_lda); \ } COPY_COLS_REGION(float) COPY_COLS_REGION(double) COPY_COLS_REGION(int8_t) COPY_COLS_REGION(int32_t) COPY_COLS_REGION(int64_t) #undef COPY_COLS_REGION void ArithemeticIf<DeviceType::kGPU>::CopyColsRegion(DeviceCtx* ctx, const int64_t row_num, const int64_t col_num, const float16* x, const int64_t x_col_offset, const int64_t x_lda, float16* y, const int64_t y_col_offset, const int64_t y_lda) { CopyColsRegionGpu<half> <<<BlocksNum4ThreadsNum(row_num * col_num), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( row_num, col_num, reinterpret_cast<const half*>(x), x_col_offset, x_lda, reinterpret_cast<half*>(y), y_col_offset, y_lda); } } // namespace oneflow
41edfef6a94e984ac6bab647f06947340928e9b1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "computeTemporalSmoothRmatrices.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *Rmatrices = NULL; hipMalloc(&Rmatrices, XSIZE*YSIZE); uint32_t numSamples = 1; uint32_t subArraySize = XSIZE*YSIZE; uint32_t numSubArrays = 1; const uint32_t *subArraySizes = NULL; hipMalloc(&subArraySizes, XSIZE*YSIZE); uint32_t temporalSmoothing = 1; float *TempRmatrices = NULL; hipMalloc(&TempRmatrices, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( computeTemporalSmoothRmatrices), dim3(gridBlock),dim3(threadBlock), 0, 0, Rmatrices,numSamples,subArraySize,numSubArrays,subArraySizes,temporalSmoothing,TempRmatrices); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( computeTemporalSmoothRmatrices), dim3(gridBlock),dim3(threadBlock), 0, 0, Rmatrices,numSamples,subArraySize,numSubArrays,subArraySizes,temporalSmoothing,TempRmatrices); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( computeTemporalSmoothRmatrices), dim3(gridBlock),dim3(threadBlock), 0, 0, Rmatrices,numSamples,subArraySize,numSubArrays,subArraySizes,temporalSmoothing,TempRmatrices); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
41edfef6a94e984ac6bab647f06947340928e9b1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "computeTemporalSmoothRmatrices.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *Rmatrices = NULL; cudaMalloc(&Rmatrices, XSIZE*YSIZE); uint32_t numSamples = 1; uint32_t subArraySize = XSIZE*YSIZE; uint32_t numSubArrays = 1; const uint32_t *subArraySizes = NULL; cudaMalloc(&subArraySizes, XSIZE*YSIZE); uint32_t temporalSmoothing = 1; float *TempRmatrices = NULL; cudaMalloc(&TempRmatrices, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); computeTemporalSmoothRmatrices<<<gridBlock,threadBlock>>>(Rmatrices,numSamples,subArraySize,numSubArrays,subArraySizes,temporalSmoothing,TempRmatrices); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { computeTemporalSmoothRmatrices<<<gridBlock,threadBlock>>>(Rmatrices,numSamples,subArraySize,numSubArrays,subArraySizes,temporalSmoothing,TempRmatrices); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { computeTemporalSmoothRmatrices<<<gridBlock,threadBlock>>>(Rmatrices,numSamples,subArraySize,numSubArrays,subArraySizes,temporalSmoothing,TempRmatrices); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0a43888c3236fc791bd48d3df1916cab2ef23da8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/copy_if_else.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/scalar/scalar.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> template <typename T> struct CopyTest : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint); #define wrapper cudf::test::fixed_width_column_wrapper TYPED_TEST(CopyTest, CopyIfElseTestShort) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestManyNulls) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct copy_if_else_tiny_grid_functor { template <typename T, typename Filter, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // output std::unique_ptr<cudf::column> out = cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr); // device views auto lhs_view = cudf::column_device_view::create(lhs); auto rhs_view = cudf::column_device_view::create(rhs); auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view); auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view); auto out_dv = cudf::mutable_column_device_view::create(*out); // call the kernel with an artificially small grid hipLaunchKernelGGL(( cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false>) , dim3(1), dim3(32), 0, stream.value(), lhs_iter, rhs_iter, filter, *out_dv, nullptr); return out; } template <typename T, typename Filter, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<T>())> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("Unexpected test execution"); } }; std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& boolean_mask) { auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask); cudf::column_device_view bool_mask_device = *bool_mask_device_p; auto filter = [bool_mask_device] __device__(cudf::size_type i) { return bool_mask_device.element<bool>(i); }; return cudf::type_dispatcher(lhs.type(), copy_if_else_tiny_grid_functor{}, lhs, rhs, filter, rmm::cuda_stream_default, rmm::mr::get_current_device_resource()); } TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestLong) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, lhs_v); bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}, rhs_v); bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, exp_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{}; wrapper<T> lhs_w{}; wrapper<T> rhs_w{}; wrapper<T> expected_w{}; auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseBadInputLength) { using T = TypeParam; // mask length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } // column length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } } template <typename T> struct CopyTestNumeric : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes); TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6}); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; bool mask_v[] = {1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v); const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5}); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v); cudf::numeric_scalar<T> rhs_w(6); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6}); wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); cudf::numeric_scalar<T> rhs_w(6, false); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } template <typename T> struct create_chrono_scalar { template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value, cudf::timestamp_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::timestamp_scalar<T>(std::forward<Args>(args)...); } template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value, cudf::duration_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::duration_scalar<T>(std::forward<Args>(args)...); } }; template <typename T> struct CopyTestChrono : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes); TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct CopyTestUntyped : public cudf::test::BaseFixture { }; TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch) { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<float> lhs_w{5, 5, 5, 5}; wrapper<int32_t> rhs_w{6, 6, 6, 6}; EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } struct StringsCopyIfElseTest : public cudf::test::BaseFixture { }; TEST_F(StringsCopyIfElseTest, CopyIfElse) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", ""}; cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids); std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 1, 0, 1, 0, 1}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) { if (mask[idx] and mask_v[idx]) h_expected.push_back(h_strings1[idx]); else h_expected.push_back(h_strings2[idx]); } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 0, 1, 0, 1, 0}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx] and mask_v[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_strings2[idx]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {0, 1, 1, 1, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6); auto results = cudf::copy_if_else(strings2, strings1, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx]) { h_expected.push_back(h_strings2[idx]); } else { h_expected.push_back(h_string1[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar string1{h_string1[0]}; std::vector<const char*> h_string2{"aaa"}; cudf::string_scalar string2{h_string2[0], false}; constexpr cudf::size_type mask_size = 6; bool mask[] = {1, 0, 1, 0, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size); auto results = cudf::copy_if_else(string1, string2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) { if (mask[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_string2[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } template <typename T> struct FixedPointTypes : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(FixedPointTypes, cudf::test::FixedPointTypes); TYPED_TEST(FixedPointTypes, FixedPointSimple) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-2}}; auto const expected = fp_wrapper{{0, 220, 330, 440, 0, 0}, scale_type{-2}}; auto const result = cudf::copy_if_else(a, b, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointLarge) { using namespace numeric; using namespace cudf::test; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto a = thrust::make_counting_iterator(-1000); auto b = thrust::make_constant_iterator(0); auto m = cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return i > 0; }); auto e = cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return ::max(0, i); }); auto const mask = cudf::test::fixed_width_column_wrapper<bool>(m, m + 2000); auto const A = fp_wrapper{a, a + 2000, scale_type{-3}}; auto const B = fp_wrapper{b, b + 2000, scale_type{-3}}; auto const expected = fp_wrapper{e, e + 2000, scale_type{-3}}; auto const result = cudf::copy_if_else(A, B, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointScaleMismatch) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-1}}; EXPECT_THROW(cudf::copy_if_else(a, b, mask), cudf::logic_error); }
0a43888c3236fc791bd48d3df1916cab2ef23da8.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf_test/base_fixture.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/copying.hpp> #include <cudf/detail/copy_if_else.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/scalar/scalar.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> template <typename T> struct CopyTest : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypesWithoutFixedPoint); #define wrapper cudf::test::fixed_width_column_wrapper TYPED_TEST(CopyTest, CopyIfElseTestShort) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 0, 0}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 1, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestManyNulls) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{{1, 0, 0, 0, 0, 0, 1}, {1, 1, 1, 1, 1, 1, 0}}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5}, {1, 1, 1, 1, 1, 1, 1}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); wrapper<T, int32_t> expected_w({5, 6, 6, 6, 6, 6, 6}, {1, 0, 0, 0, 0, 0, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct copy_if_else_tiny_grid_functor { template <typename T, typename Filter, CUDF_ENABLE_IF(cudf::is_rep_layout_compatible<T>())> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // output std::unique_ptr<cudf::column> out = cudf::allocate_like(lhs, lhs.size(), cudf::mask_allocation_policy::RETAIN, mr); // device views auto lhs_view = cudf::column_device_view::create(lhs); auto rhs_view = cudf::column_device_view::create(rhs); auto lhs_iter = cudf::detail::make_pair_iterator<T>(*lhs_view); auto rhs_iter = cudf::detail::make_pair_iterator<T>(*rhs_view); auto out_dv = cudf::mutable_column_device_view::create(*out); // call the kernel with an artificially small grid cudf::detail::copy_if_else_kernel<32, T, decltype(lhs_iter), decltype(rhs_iter), Filter, false> <<<1, 32, 0, stream.value()>>>(lhs_iter, rhs_iter, filter, *out_dv, nullptr); return out; } template <typename T, typename Filter, CUDF_ENABLE_IF(not cudf::is_rep_layout_compatible<T>())> std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs, cudf::column_view const& rhs, Filter filter, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("Unexpected test execution"); } }; std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& boolean_mask) { auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask); cudf::column_device_view bool_mask_device = *bool_mask_device_p; auto filter = [bool_mask_device] __device__(cudf::size_type i) { return bool_mask_device.element<bool>(i); }; return cudf::type_dispatcher(lhs.type(), copy_if_else_tiny_grid_functor{}, lhs, rhs, filter, rmm::cuda_stream_default, rmm::mr::get_current_device_resource()); } TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}); auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestLong) { using T = TypeParam; // make sure we span at least 2 warps int num_els = 64; bool mask[] = {1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, lhs_v); bool rhs_v[] = {1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6}, rhs_v); bool exp_v[] = {1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; wrapper<T, int32_t> expected_w({5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}, exp_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{}; wrapper<T> lhs_w{}; wrapper<T> rhs_w{}; wrapper<T> expected_w{}; auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity2) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, {1, 1, 1, 0}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 1, 1, 0}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity3) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, {1, 0, 1, 1}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}, {1, 0, 1, 1}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity4) { using T = TypeParam; cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 0, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); wrapper<T, int32_t> expected_w({5, 6, 5, 5}); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTest, CopyIfElseBadInputLength) { using T = TypeParam; // mask length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } // column length mismatch { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5}); wrapper<T, int32_t> rhs_w({6, 6, 6, 6}); EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } } template <typename T> struct CopyTestNumeric : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestNumeric, cudf::test::NumericTypes); TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); const auto rhs = cudf::test::make_type_param_vector<T>({6, 6, 6, 6}); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T> rhs_w(rhs.begin(), rhs.end(), rhs_v); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; bool mask_v[] = {1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els, mask_v); const auto lhs = cudf::test::make_type_param_vector<T>({5, 5, 5, 5}); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T> lhs_w(lhs.begin(), lhs.end(), lhs_v); cudf::numeric_scalar<T> rhs_w(6); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 6}); wrapper<T> expected_w(expected.begin(), expected.end(), lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestNumeric, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); cudf::numeric_scalar<T> lhs_w(5); cudf::numeric_scalar<T> rhs_w(6, false); const auto expected = cudf::test::make_type_param_vector<T>({5, 6, 6, 5}); wrapper<T> expected_w(expected.begin(), expected.end(), mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } template <typename T> struct create_chrono_scalar { template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_timestamp_t<ChronoT>::type, std::true_type>::value, cudf::timestamp_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::timestamp_scalar<T>(std::forward<Args>(args)...); } template <typename ChronoT = T, typename... Args> typename std::enable_if_t< std::is_same<typename cudf::is_duration_t<ChronoT>::type, std::true_type>::value, cudf::duration_scalar<ChronoT>> operator()(Args&&... args) const { return cudf::duration_scalar<T>(std::forward<Args>(args)...); } }; template <typename T> struct CopyTestChrono : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(CopyTestChrono, cudf::test::ChronoTypes); TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarColumn) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); bool rhs_v[] = {1, 0, 1, 1}; wrapper<T, int32_t> rhs_w({6, 6, 6, 6}, rhs_v); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, rhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestColumnScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); bool lhs_v[] = {0, 1, 1, 1}; wrapper<T, int32_t> lhs_w({5, 5, 5, 5}, lhs_v); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), true); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, lhs_v); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } TYPED_TEST(CopyTestChrono, CopyIfElseTestScalarScalar) { using T = TypeParam; int num_els = 4; bool mask[] = {1, 0, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + num_els); auto lhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(5), true); auto rhs_w = create_chrono_scalar<T>{}(cudf::test::make_type_param_scalar<T>(6), false); wrapper<T, int32_t> expected_w({5, 6, 6, 5}, mask); auto out = cudf::copy_if_else(lhs_w, rhs_w, mask_w); CUDF_TEST_EXPECT_COLUMNS_EQUAL(out->view(), expected_w); } struct CopyTestUntyped : public cudf::test::BaseFixture { }; TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch) { cudf::test::fixed_width_column_wrapper<bool> mask_w{1, 1, 1, 1}; wrapper<float> lhs_w{5, 5, 5, 5}; wrapper<int32_t> rhs_w{6, 6, 6, 6}; EXPECT_THROW(cudf::copy_if_else(lhs_w, rhs_w, mask_w), cudf::logic_error); } struct StringsCopyIfElseTest : public cudf::test::BaseFixture { }; TEST_F(StringsCopyIfElseTest, CopyIfElse) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_strings1{"eee", "bb", "", "aa", "bbb", "ééé"}; cudf::test::strings_column_wrapper strings1(h_strings1.begin(), h_strings1.end(), valids); std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 1, 0, 1, 0, 1}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx) { if (mask[idx] and mask_v[idx]) h_expected.push_back(h_strings1[idx]); else h_expected.push_back(h_strings2[idx]); } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarColumn) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {1, 0, 1, 0, 1, 0}; bool mask_v[] = {1, 1, 1, 1, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6, mask_v); auto results = cudf::copy_if_else(strings1, strings2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx] and mask_v[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_strings2[idx]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseColumnScalar) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar strings1{h_string1[0]}; std::vector<const char*> h_strings2{"zz", "", "yyy", "w", "ééé", "ooo"}; cudf::test::strings_column_wrapper strings2(h_strings2.begin(), h_strings2.end(), valids); bool mask[] = {0, 1, 1, 1, 0, 1}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + 6); auto results = cudf::copy_if_else(strings2, strings1, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(h_strings2.size()); ++idx) { if (mask[idx]) { h_expected.push_back(h_strings2[idx]); } else { h_expected.push_back(h_string1[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } TEST_F(StringsCopyIfElseTest, CopyIfElseScalarScalar) { auto valids = cudf::detail::make_counting_transform_iterator( 0, [](auto i) { return i % 2 == 0 ? true : false; }); std::vector<const char*> h_string1{"eee"}; cudf::string_scalar string1{h_string1[0]}; std::vector<const char*> h_string2{"aaa"}; cudf::string_scalar string2{h_string2[0], false}; constexpr cudf::size_type mask_size = 6; bool mask[] = {1, 0, 1, 0, 1, 0}; cudf::test::fixed_width_column_wrapper<bool> mask_w(mask, mask + mask_size); auto results = cudf::copy_if_else(string1, string2, mask_w); std::vector<const char*> h_expected; for (cudf::size_type idx = 0; idx < static_cast<cudf::size_type>(mask_size); ++idx) { if (mask[idx]) { h_expected.push_back(h_string1[0]); } else { h_expected.push_back(h_string2[0]); } } cudf::test::strings_column_wrapper expected(h_expected.begin(), h_expected.end(), valids); CUDF_TEST_EXPECT_COLUMNS_EQUAL(*results, expected); } template <typename T> struct FixedPointTypes : public cudf::test::BaseFixture { }; TYPED_TEST_CASE(FixedPointTypes, cudf::test::FixedPointTypes); TYPED_TEST(FixedPointTypes, FixedPointSimple) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-2}}; auto const expected = fp_wrapper{{0, 220, 330, 440, 0, 0}, scale_type{-2}}; auto const result = cudf::copy_if_else(a, b, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointLarge) { using namespace numeric; using namespace cudf::test; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto a = thrust::make_counting_iterator(-1000); auto b = thrust::make_constant_iterator(0); auto m = cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return i > 0; }); auto e = cudf::detail::make_counting_transform_iterator(-1000, [](int i) { return std::max(0, i); }); auto const mask = cudf::test::fixed_width_column_wrapper<bool>(m, m + 2000); auto const A = fp_wrapper{a, a + 2000, scale_type{-3}}; auto const B = fp_wrapper{b, b + 2000, scale_type{-3}}; auto const expected = fp_wrapper{e, e + 2000, scale_type{-3}}; auto const result = cudf::copy_if_else(A, B, mask); CUDF_TEST_EXPECT_COLUMNS_EQUAL(expected, result->view()); } TYPED_TEST(FixedPointTypes, FixedPointScaleMismatch) { using namespace numeric; using decimalXX = TypeParam; using RepType = cudf::device_storage_type_t<decimalXX>; using fp_wrapper = cudf::test::fixed_point_column_wrapper<RepType>; auto const mask = cudf::test::fixed_width_column_wrapper<bool>{0, 1, 1, 1, 0, 0}; auto const a = fp_wrapper{{110, 220, 330, 440, 550, 660}, scale_type{-2}}; auto const b = fp_wrapper{{0, 0, 0, 0, 0, 0}, scale_type{-1}}; EXPECT_THROW(cudf::copy_if_else(a, b, mask), cudf::logic_error); }
13e477f96ec5ce7f8888e3b93f87703e3bc59233.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "activations.h" #include "hip/hip_runtime.h" } __device__ float lhtan_activate_kernel(float x) { <<<<<<< HEAD if(x < 0) return .001f*x; if(x > 1) return .001f*(x-1.f) + 1.f; ======= if(x < 0) return .001*x; if(x > 1) return .001*(x-1) + 1; >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 return x; } __device__ float lhtan_gradient_kernel(float x) { if(x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} <<<<<<< HEAD __device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));} __device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;} __device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01f * (x + 4); if(x > 4) return .01f * (x - 4) + 1; return .125f*x + .5f; } __device__ float stair_activate_kernel(float x) { int n = floorf(x); if (n%2 == 0) return floorf(x/2); else return (x - n) + floorf(x/2); ======= __device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));} __device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;} __device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01 * (x + 4); if(x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } __device__ float stair_activate_kernel(float x) { int n = floor(x); if (n%2 == 0) return floor(x/2.); else return (x - n) + floor(x/2.); >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 } __device__ float hardtan_gradient_kernel(float x) { if (x > -1 && x < 1) return 1; return 0; } __device__ float linear_gradient_kernel(float x){return 1;} __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} __device__ float loggy_gradient_kernel(float x) { <<<<<<< HEAD float y = (x+1)/2; ======= float y = (x+1.)/2.; >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 return 2*(1-y)*y; } __device__ float relu_gradient_kernel(float x){return (x>0);} __device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);} <<<<<<< HEAD __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;} __device__ float stair_gradient_kernel(float x) { if (floorf(x) == x) return 0; ======= __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01 : .125;} __device__ float stair_gradient_kernel(float x) { if (floor(x) == x) return 0; >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 return 1; } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __device__ float gradient_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient_kernel(x); case LOGISTIC: return logistic_gradient_kernel(x); case LOGGY: return loggy_gradient_kernel(x); case RELU: return relu_gradient_kernel(x); case ELU: return elu_gradient_kernel(x); case RELIE: return relie_gradient_kernel(x); case RAMP: return ramp_gradient_kernel(x); case LEAKY: return leaky_gradient_kernel(x); case TANH: return tanh_gradient_kernel(x); case PLSE: return plse_gradient_kernel(x); case STAIR: return stair_gradient_kernel(x); case HARDTAN: return hardtan_gradient_kernel(x); case LHTAN: return lhtan_gradient_kernel(x); } returnhipLaunchKernelGGL(( 0); } , , < HEAD __global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) { float de = dy[id]; dx[b*s + i] = x2*de; dx[b*s + s/2 + i] = x1*de; } } extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y) { binary_gradient_array_kernel, cuda_gridsize(n/2), BLOCK,( 0),( 0),( 0),( 0), x, dx, n/2, size, a, y); check_error(hipPeekAtLastError()); } __global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) y[id] = x1*x2; } extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y) { hipLaunchKernelGGL(( binary_activate_array_kernel), dim3(cuda_gridsize(n/2)), dim3(BLOCK), 0, 0, x, n/2, size, a, y); check_error(hipPeekAtLastError()); } ======= >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } __global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) delta[i] *= gradient_kernel(x[i], a); } extern "C" void activate_array_gpu(float *x, int n, ACTIVATION a) { hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a); check_error(hipPeekAtLastError()); } extern "C" void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta) { hipLaunchKernelGGL(( gradient_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, a, delta); check_error(hipPeekAtLastError()); }
13e477f96ec5ce7f8888e3b93f87703e3bc59233.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "activations.h" #include "cuda.h" } __device__ float lhtan_activate_kernel(float x) { <<<<<<< HEAD if(x < 0) return .001f*x; if(x > 1) return .001f*(x-1.f) + 1.f; ======= if(x < 0) return .001*x; if(x > 1) return .001*(x-1) + 1; >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 return x; } __device__ float lhtan_gradient_kernel(float x) { if(x > 0 && x < 1) return 1; return .001; } __device__ float hardtan_activate_kernel(float x) { if (x < -1) return -1; if (x > 1) return 1; return x; } __device__ float linear_activate_kernel(float x){return x;} <<<<<<< HEAD __device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));} __device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;} __device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01f * (x + 4); if(x > 4) return .01f * (x - 4) + 1; return .125f*x + .5f; } __device__ float stair_activate_kernel(float x) { int n = floorf(x); if (n%2 == 0) return floorf(x/2); else return (x - n) + floorf(x/2); ======= __device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));} __device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;} __device__ float relu_activate_kernel(float x){return x*(x>0);} __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;} __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;} __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;} __device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);} __device__ float plse_activate_kernel(float x) { if(x < -4) return .01 * (x + 4); if(x > 4) return .01 * (x - 4) + 1; return .125*x + .5; } __device__ float stair_activate_kernel(float x) { int n = floor(x); if (n%2 == 0) return floor(x/2.); else return (x - n) + floor(x/2.); >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 } __device__ float hardtan_gradient_kernel(float x) { if (x > -1 && x < 1) return 1; return 0; } __device__ float linear_gradient_kernel(float x){return 1;} __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} __device__ float loggy_gradient_kernel(float x) { <<<<<<< HEAD float y = (x+1)/2; ======= float y = (x+1.)/2.; >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 return 2*(1-y)*y; } __device__ float relu_gradient_kernel(float x){return (x>0);} __device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);} <<<<<<< HEAD __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;} __device__ float stair_gradient_kernel(float x) { if (floorf(x) == x) return 0; ======= __device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01;} __device__ float ramp_gradient_kernel(float x){return (x>0)+.1;} __device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1;} __device__ float tanh_gradient_kernel(float x){return 1-x*x;} __device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01 : .125;} __device__ float stair_gradient_kernel(float x) { if (floor(x) == x) return 0; >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 return 1; } __device__ float activate_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate_kernel(x); case LOGISTIC: return logistic_activate_kernel(x); case LOGGY: return loggy_activate_kernel(x); case RELU: return relu_activate_kernel(x); case ELU: return elu_activate_kernel(x); case RELIE: return relie_activate_kernel(x); case RAMP: return ramp_activate_kernel(x); case LEAKY: return leaky_activate_kernel(x); case TANH: return tanh_activate_kernel(x); case PLSE: return plse_activate_kernel(x); case STAIR: return stair_activate_kernel(x); case HARDTAN: return hardtan_activate_kernel(x); case LHTAN: return lhtan_activate_kernel(x); } return 0; } __device__ float gradient_kernel(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient_kernel(x); case LOGISTIC: return logistic_gradient_kernel(x); case LOGGY: return loggy_gradient_kernel(x); case RELU: return relu_gradient_kernel(x); case ELU: return elu_gradient_kernel(x); case RELIE: return relie_gradient_kernel(x); case RAMP: return ramp_gradient_kernel(x); case LEAKY: return leaky_gradient_kernel(x); case TANH: return tanh_gradient_kernel(x); case PLSE: return plse_gradient_kernel(x); case STAIR: return stair_gradient_kernel(x); case HARDTAN: return hardtan_gradient_kernel(x); case LHTAN: return lhtan_gradient_kernel(x); } return 0; } <<<<<<< HEAD __global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) { float de = dy[id]; dx[b*s + i] = x2*de; dx[b*s + s/2 + i] = x1*de; } } extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y) { binary_gradient_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>(x, dx, n/2, size, a, y); check_error(cudaPeekAtLastError()); } __global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; int i = id % s; int b = id / s; float x1 = x[b*s + i]; float x2 = x[b*s + s/2 + i]; if(id < n) y[id] = x1*x2; } extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y) { binary_activate_array_kernel<<<cuda_gridsize(n/2), BLOCK>>>(x, n/2, size, a, y); check_error(cudaPeekAtLastError()); } ======= >>>>>>> 4383fc19d5d5b411122ff4d8b878b399ee093ab1 __global__ void activate_array_kernel(float *x, int n, ACTIVATION a) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) x[i] = activate_kernel(x[i], a); } __global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i < n) delta[i] *= gradient_kernel(x[i], a); } extern "C" void activate_array_gpu(float *x, int n, ACTIVATION a) { activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a); check_error(cudaPeekAtLastError()); } extern "C" void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta) { gradient_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a, delta); check_error(cudaPeekAtLastError()); }
c693c669734d1ea04d9288f46baf80696bcce341.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_zero_x; int xdim0_initialise_chunk_kernel_zero_x_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_zero_x*(y)) //user function __device__ void initialise_chunk_kernel_zero_x_gpu(double *var) { *var = 0.0; } #undef OPS_ACC0 __global__ void ops_initialise_chunk_kernel_zero_x( double* __restrict arg0, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_initialise_chunk_kernel_zero_x; if (idx_x < size0 && idx_y < size1) { initialise_chunk_kernel_zero_x_gpu(arg0); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0) { #else void ops_par_loop_initialise_chunk_kernel_zero_x_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; #endif //Timing double t1,t2,c1,c2; ops_arg args[1] = { arg0}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,1,range,6)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(6,"initialise_chunk_kernel_zero_x"); OPS_kernels[6].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; if (xdim0 != xdim0_initialise_chunk_kernel_zero_x_h) { hipMemcpyToSymbol( xdim0_initialise_chunk_kernel_zero_x, &xdim0, sizeof(int) ); xdim0_initialise_chunk_kernel_zero_x_h = xdim0; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[1]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 1); ops_halo_exchanges(args,1,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[6].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) hipLaunchKernelGGL(( ops_initialise_chunk_kernel_zero_x), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0],x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[6].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 1); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[6].mpi_time += t2-t1; OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 6; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 6; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 1; desc->args = (ops_arg*)malloc(1*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->function = ops_par_loop_initialise_chunk_kernel_zero_x_execute; if (OPS_diags > 1) { ops_timing_realloc(6,"initialise_chunk_kernel_zero_x"); } ops_enqueue_kernel(desc); } #endif
c693c669734d1ea04d9288f46baf80696bcce341.cu
// // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_zero_x; int xdim0_initialise_chunk_kernel_zero_x_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_zero_x*(y)) //user function __device__ void initialise_chunk_kernel_zero_x_gpu(double *var) { *var = 0.0; } #undef OPS_ACC0 __global__ void ops_initialise_chunk_kernel_zero_x( double* __restrict arg0, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 0*1 * xdim0_initialise_chunk_kernel_zero_x; if (idx_x < size0 && idx_y < size1) { initialise_chunk_kernel_zero_x_gpu(arg0); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0) { #else void ops_par_loop_initialise_chunk_kernel_zero_x_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; #endif //Timing double t1,t2,c1,c2; ops_arg args[1] = { arg0}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,1,range,6)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(6,"initialise_chunk_kernel_zero_x"); OPS_kernels[6].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; if (xdim0 != xdim0_initialise_chunk_kernel_zero_x_h) { cudaMemcpyToSymbol( xdim0_initialise_chunk_kernel_zero_x, &xdim0, sizeof(int) ); xdim0_initialise_chunk_kernel_zero_x_h = xdim0; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[1]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 1); ops_halo_exchanges(args,1,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[6].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) ops_initialise_chunk_kernel_zero_x<<<grid, tblock >>> ( (double *)p_a[0],x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[6].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 1); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[6].mpi_time += t2-t1; OPS_kernels[6].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_initialise_chunk_kernel_zero_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 6; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 6; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 1; desc->args = (ops_arg*)malloc(1*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->function = ops_par_loop_initialise_chunk_kernel_zero_x_execute; if (OPS_diags > 1) { ops_timing_realloc(6,"initialise_chunk_kernel_zero_x"); } ops_enqueue_kernel(desc); } #endif
798956a4bc8d6c0c413474c40c2d095a89bdbaf1.hip
// !!! This is a file automatically generated by hipify!!! /** * Program name: Mosaic filter with average color calculation * Compiling envrionment: VS2013 + CUDA7.0 * * @Author: Jincao Zhang * @Version 1.0 (22 Sep 2016) */ #ifndef __HIPCC__ #define __HIPCC__ #endif #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_vector_types.h> #include <vector_functions.h> #include "hip/hip_texture_types.h" #include "texture_fetch_functions.hpp" #include <conio.h> #include <math.h> #include <time.h> #include <omp.h> #define FAILURE 0 #define SUCCESS !FAILURE #define USER_NAME "acq15jz" void print_help();//print command line help int process_command_line(int argc, char *argv[]); int getImageSize(char* filename);// get input image dimentions void output_image_file(char* filename, uchar4* image);//output image as ppm void input_image_file(char* filename, uchar4* image);//input image as ppm void checkCUDAError(const char *msg); //texture<uchar4, hipTextureType2D, hipReadModeElementType> sample2D; typedef enum MODE { CPU, OPENMP, CUDA, ALL } MODE; //define program mode int IMAGE_DIM; int c; // mosaic cell size MODE execution_mode = CPU; char* input_FileName; char* output_FileName; unsigned int ppm_format = 0; /** * CUDA kernel of mosaic filter * * @param image input * image_output output * Tile_size Mosaic cell size * IMAGE_DIM size of input images */ __global__ void Mosaic(uchar4 *image, uchar4 *image_output, int Tile_size, int IMAGE_DIM) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int i = x + y * blockDim.x * gridDim.x; //Mosaic index offset int Mosaic_IndX = (x / Tile_size)*Tile_size; int Mosaic_IndY = (y / Tile_size)*Tile_size; int redsum; // sum of red int greensum; // sum of green int bluesum; // sum of blue int count; // pixel count in a mosaic block uchar4* pixel_temp; float4 average = make_float4(0, 0, 0, 0); redsum = greensum = bluesum = count = 0; for (int i = 0; i <= Tile_size; i++){ for (int j = 0; j <= Tile_size; j++){ int x_offset = Mosaic_IndX + i; int y_offset = Mosaic_IndY + j; //handle boundry condition if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * blockDim.x * gridDim.x; pixel_temp = &image[offset]; //pixel_temp = tex2D(sample2D, x_offset, y_offset); //sum values redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } //calculate average average.x = redsum / count; average.y = greensum / count; average.z = bluesum / count; //output image image_output[i].x = (unsigned char)average.x; image_output[i].y = (unsigned char)average.y; image_output[i].z = (unsigned char)average.z; image_output[i].w = 255; } int main(int argc, char *argv[]) { if (process_command_line(argc, argv) == FAILURE) return 1; unsigned int image_size; uchar4 *d_image, *d_image_output; uchar4 *h_image, *h_output; hipEvent_t start, stop; clock_t begin, end; double seconds;//cpu and openmp timer float ms;//cuda timer IMAGE_DIM = getImageSize(input_FileName); image_size = IMAGE_DIM*IMAGE_DIM*sizeof(uchar4); //TODO: read input image file (either binary or plain text PPM) h_image = (uchar4*)malloc(image_size); input_image_file(input_FileName, h_image); //TODO: execute the mosaic filter based on the mode switch (execution_mode){ case (CPU) : { h_output = (uchar4*)malloc(image_size); int redsum; // sum of red int greensum; // sum of green int bluesum; // sum of blue int count; // pixel count in a mosaic block int x, y, tx, ty; uchar4* pixel_temp; int r, g, b; r = 0; g = 0; b = 0; //TODO: starting timing here begin = clock(); //-----------------------------------------------// for (y = 0; y < IMAGE_DIM; y++){ for (x = 0; x < IMAGE_DIM; x++){ int block_indX = (x / c)*c; int block_indY = (y / c)*c; redsum = greensum = bluesum = count = 0; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; pixel_temp = &h_image[offset]; //sum values redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; h_output[offset].x = (unsigned char)r; h_output[offset].y = (unsigned char)g; h_output[offset].z = (unsigned char)b; h_output[offset].w = 255; } } } } //TODO:calculate the average colour value r = 0; g = 0; b = 0; for (int j = 0; j < IMAGE_DIM; j += c){ for (int i = 0; i < IMAGE_DIM; i += c){ int offset = i + j*IMAGE_DIM; pixel_temp = &h_output[offset]; redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; //-----------------------------------------------// end = clock(); seconds = (end - begin) / (double)CLOCKS_PER_SEC; output_image_file(output_FileName, h_output); free(h_output); // Output the average colour value for the image printf("Serial CPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b); //TODO: implement part 3 of the assignment //TODO: end timing here printf("Serial CPU mode execution time took %f s \n", seconds); break; } case (OPENMP) : { h_output = (uchar4*)malloc(image_size); int x, y; //TODO: starting timing here begin = clock(); omp_set_nested(1); #pragma omp parallel for private(y,x) for (y = 0; y < IMAGE_DIM; y++){ for (x = 0; x < IMAGE_DIM; x++){ int block_indX = (x / c)*c; int block_indY = (y / c)*c; int redsum = 0; int greensum = 0; int bluesum = 0; int count = 0; int r, g, b; r = 0; g = 0; b = 0; int tx, ty; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; uchar4* pixel_temp = &h_image[offset]; redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; h_output[offset].x = (unsigned char)r; h_output[offset].y = (unsigned char)g; h_output[offset].z = (unsigned char)b; h_output[offset].w = 255; } } } } int r = 0; int g = 0; int b = 0; int redsum = 0, greensum = 0, bluesum = 0, count = 0; #pragma omp parallel for reduction(+:redsum,greensum,bluesum,count) for (int j = 0; j < IMAGE_DIM; j += c){ for (int i = 0; i < IMAGE_DIM; i += c){ int offset = i + j*IMAGE_DIM; uchar4* pixel_temp = &h_output[offset]; redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; end = clock(); //TODO: starting timing here seconds = (end - begin) / (double)CLOCKS_PER_SEC; //TODO: calculate the average colour value output_image_file(output_FileName,h_output); // Output the average colour value for the image printf("OpenMP CPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b); printf("OpenMP CPU mode execution time took %f s \n", seconds); free(h_output); break; } case (CUDA) : { h_output = (uchar4*)malloc(image_size); // create timers hipEventCreate(&start); hipEventCreate(&stop); // allocate memory on the GPU for the output image hipMalloc((void**)&d_image, image_size); hipMalloc((void**)&d_image_output, image_size); checkCUDAError("CUDA malloc"); // copy image to device memory hipMemcpy(d_image, h_image, image_size, hipMemcpyHostToDevice); checkCUDAError("CUDA memcpy to device"); //cuda layout and execution dim3 blocksPerGrid(IMAGE_DIM / 16, IMAGE_DIM / 16); dim3 threadsPerBlock(16, 16); //hipBindTexture(0, sample1D, d_image, image_size); //hipBindTexture2D(0, sample2D, d_image, desc, IMAGE_DIM, IMAGE_DIM, IMAGE_DIM*sizeof(uchar4)); hipEventRecord(start, 0); Mosaic << <blocksPerGrid, threadsPerBlock >> >(d_image, d_image_output, c, IMAGE_DIM); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); //hipUnbindTexture(sample1D); //hipUnbindTexture(sample2D); checkCUDAError("kernel"); hipMemcpy(h_output, d_image_output, image_size, hipMemcpyDeviceToHost); checkCUDAError("CUDA memcpy from device"); output_image_file(output_FileName, h_output); //cleanup hipEventDestroy(start); hipEventDestroy(stop); hipFree(d_image); hipFree(d_image_output); //TODO: calculate the average colour value // Output the average colour value for the image printf("CUDA Average image colour red = ???, green = ???, blue = ??? \n"); printf("CUDA mode execution time took %f s \n", ms/1000); break; } case (ALL) : { //ToDo break; } } free(h_image); return 0; } void print_help(){ printf("mosaic_%s C M -i input_file -o output_file [options]\n", USER_NAME); printf("where:\n"); printf("\tC Is the mosaic cell size in range 0->32\n"); printf("\tM Is the mode with a value of either CPU, OPENMP, CUDA or\n" "\t ALL. The mode specifies which version of the simulation\n" "\t code should execute. ALL should execute each mode in\n" "\t turn.\n"); printf("\t-i input_file Specifies an input image file\n"); printf("\t-o output_file Specifies an output image file which will be used\n" "\t to write the mosaic image\n"); printf("[options]:\n"); printf("\t-f ppm_format PPM image output format either PPM_BINARY (default) or \n" "\t PPM_PLAIN_TEXT\n " "\t -part3 Any options for part 3 of the assignment\n"); } int process_command_line(int argc, char *argv[]){ if (argc < 7){ fprintf(stderr, "Error: Missing program arguments. Correct usage is...\n"); print_help(); return FAILURE; } //first argument is always the executable name if (strcmp(argv[0],"Mosaic_acq15jz.exe")!=0){ fprintf(stderr, "Error: Wrong program name arguments. Correct usage is...\n"); print_help(); return FAILURE; } //read in the non optional command line arguments c = (unsigned int)atoi(argv[1]); printf("cell size %d\n",c); //TODO: read in the mode if (strcmp(argv[2], "CPU") == 0)execution_mode = CPU; else if (strcmp(argv[2], "OPENMP") == 0)execution_mode = OPENMP; else if (strcmp(argv[2], "CUDA") == 0)execution_mode = CUDA; else{ fprintf(stderr, "Error: Wrong Mode arguments. Correct usage is...\n"); print_help(); return FAILURE; } //TODO: read in the input image name if (strcmp(argv[3], "-i") == 0)input_FileName = argv[4]; else{ fprintf(stderr, "Error: Wrong program arguments. Correct usage is...\n"); print_help(); return FAILURE; }printf("filenameinput %s \n", input_FileName); //TODO: read in the output image name if (strcmp(argv[5], "-o") == 0)output_FileName = argv[6]; else{ fprintf(stderr, "Error: Wrong program arguments. Correct usage is...\n"); print_help(); return FAILURE; }printf("filenameoutput %s \n", output_FileName); return SUCCESS; } void output_image_file(char* filename,uchar4* image) { FILE *f; //output file handle //open the output file and write header info for PPM filetype f = fopen(filename, "wb"); if (f == NULL){ fprintf(stderr, "Error opening %s output file\n", output_FileName); exit(1); } fprintf(f, "P6\n"); fprintf(f, "%d \n%d\n%d\n", IMAGE_DIM, IMAGE_DIM, 255); for (int x = 0; x < IMAGE_DIM; x++){ for (int y = 0; y < IMAGE_DIM; y++){ int i = x + y*IMAGE_DIM; fwrite(&image[i], sizeof(unsigned char), 3, f); //only write rgb (ignoring a) } } fclose(f); } void input_image_file(char* filename, uchar4* image) { FILE *f; //input file handle char temp[256]; int h, w, s; //open the input file and write header info for PPM filetype f = fopen(filename, "rb"); if (f == NULL){ fprintf(stderr, "Error opening %s input file\n",input_FileName); exit(1); } fscanf(f, "%s\n", &temp); fscanf(f, "%d\n", &h); fscanf(f, "%d\n", &w); fscanf(f, "%d\n", &s); if (h != w){ fprintf(stderr, "Error: Input image file has wrong dimensions\n"); exit(1); } for (int x = 0; x < h; x++){ for (int y = 0; y < w; y++){ int i = x + y*w; fread(&image[i], sizeof(unsigned char), 3, f); //only read rgb //image[i].w = 255; } } fclose(f); } int getImageSize(char* filename) { FILE *f; //input file handle char temp[256]; int x, y, s; //open the input file and write header info for PPM filetype f = fopen(filename, "rb"); if (f == NULL){ fprintf(stderr, "Error opening %s input file for getting image size\n", input_FileName); exit(1); } fscanf(f, "%s\n", &temp); fscanf(f, "%d\n %d\n", &x, &y); printf("img dim is: %d\n", x); fscanf(f, "%d\n", &s); if (x != y){ fprintf(stderr, "Error: Input image file has wrong dimensions\n"); exit(1); } fclose(f); return x; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA ERR1OR: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } ////launching parameters //dim3 blocksPerGrid((IMAGE_DIM + Mosaic_size - 1) / 32, (IMAGE_DIM + Mosaic_size - 1) / 32); // //dim3 threadsPerBlock(32, 32); //__global__ void _Mosaic(uchar4 *image, uchar4 *image_output, int Tile_size) { // // map from threadIdx/BlockIdx to pixel position // int x, y, i; // __shared__ uchar4 sdata[32][32]; // if (threadIdx.x < Tile_size&&threadIdx.y < Tile_size) // { // if (threadIdx.x + Tile_size * blockIdx.x < IMAGE_DIM&&threadIdx.y + Tile_size * blockIdx.y < IMAGE_DIM){ // x = threadIdx.x + Tile_size * blockIdx.x; // y = threadIdx.y + Tile_size * blockIdx.y; // i = x + y * blockDim.x * gridDim.x; // sdata[threadIdx.x][threadIdx.y] = image[i]; // } // // } // // __syncthreads(); // // int m, n; // int redsum; // int greensum; // int bluesum; // int count; // uchar4 pixel_temp; // float4 average = make_float4(0, 0, 0, 0); // // redsum = greensum = bluesum = count = 0; // if (threadIdx.x < Tile_size&&threadIdx.y < Tile_size) // { // if (threadIdx.x + Tile_size * blockIdx.x < IMAGE_DIM&&threadIdx.y + Tile_size * blockIdx.y < IMAGE_DIM){ // for (m = 0; m < Tile_size; m++){ // for (n = 0; n < Tile_size; n++){ // // //pixel_temp = sdata[m][n]; // pixel_temp = image[i]; // // //sum values // redsum += pixel_temp.x; // greensum += pixel_temp.y; // bluesum += pixel_temp.z; // count++; // // } // // } // // //calculate average // average.x = redsum / count; // average.y = greensum / count; // average.z = bluesum / count; // // image_output[i].x = (unsigned char)average.x; // image_output[i].y = (unsigned char)average.y; // image_output[i].z = (unsigned char)average.z; // image_output[i].w = 255; // } // // } // //}
798956a4bc8d6c0c413474c40c2d095a89bdbaf1.cu
/** * Program name: Mosaic filter with average color calculation * Compiling envrionment: VS2013 + CUDA7.0 * * @Author: Jincao Zhang * @Version 1.0 (22 Sep 2016) */ #ifndef __CUDACC__ #define __CUDACC__ #endif #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector_types.h> #include <vector_functions.h> #include "cuda_texture_types.h" #include "texture_fetch_functions.hpp" #include <conio.h> #include <math.h> #include <time.h> #include <omp.h> #define FAILURE 0 #define SUCCESS !FAILURE #define USER_NAME "acq15jz" void print_help();//print command line help int process_command_line(int argc, char *argv[]); int getImageSize(char* filename);// get input image dimentions void output_image_file(char* filename, uchar4* image);//output image as ppm void input_image_file(char* filename, uchar4* image);//input image as ppm void checkCUDAError(const char *msg); //texture<uchar4, cudaTextureType2D, cudaReadModeElementType> sample2D; typedef enum MODE { CPU, OPENMP, CUDA, ALL } MODE; //define program mode int IMAGE_DIM; int c; // mosaic cell size MODE execution_mode = CPU; char* input_FileName; char* output_FileName; unsigned int ppm_format = 0; /** * CUDA kernel of mosaic filter * * @param image input * image_output output * Tile_size Mosaic cell size * IMAGE_DIM size of input images */ __global__ void Mosaic(uchar4 *image, uchar4 *image_output, int Tile_size, int IMAGE_DIM) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int i = x + y * blockDim.x * gridDim.x; //Mosaic index offset int Mosaic_IndX = (x / Tile_size)*Tile_size; int Mosaic_IndY = (y / Tile_size)*Tile_size; int redsum; // sum of red int greensum; // sum of green int bluesum; // sum of blue int count; // pixel count in a mosaic block uchar4* pixel_temp; float4 average = make_float4(0, 0, 0, 0); redsum = greensum = bluesum = count = 0; for (int i = 0; i <= Tile_size; i++){ for (int j = 0; j <= Tile_size; j++){ int x_offset = Mosaic_IndX + i; int y_offset = Mosaic_IndY + j; //handle boundry condition if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * blockDim.x * gridDim.x; pixel_temp = &image[offset]; //pixel_temp = tex2D(sample2D, x_offset, y_offset); //sum values redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } //calculate average average.x = redsum / count; average.y = greensum / count; average.z = bluesum / count; //output image image_output[i].x = (unsigned char)average.x; image_output[i].y = (unsigned char)average.y; image_output[i].z = (unsigned char)average.z; image_output[i].w = 255; } int main(int argc, char *argv[]) { if (process_command_line(argc, argv) == FAILURE) return 1; unsigned int image_size; uchar4 *d_image, *d_image_output; uchar4 *h_image, *h_output; cudaEvent_t start, stop; clock_t begin, end; double seconds;//cpu and openmp timer float ms;//cuda timer IMAGE_DIM = getImageSize(input_FileName); image_size = IMAGE_DIM*IMAGE_DIM*sizeof(uchar4); //TODO: read input image file (either binary or plain text PPM) h_image = (uchar4*)malloc(image_size); input_image_file(input_FileName, h_image); //TODO: execute the mosaic filter based on the mode switch (execution_mode){ case (CPU) : { h_output = (uchar4*)malloc(image_size); int redsum; // sum of red int greensum; // sum of green int bluesum; // sum of blue int count; // pixel count in a mosaic block int x, y, tx, ty; uchar4* pixel_temp; int r, g, b; r = 0; g = 0; b = 0; //TODO: starting timing here begin = clock(); //-----------------------------------------------// for (y = 0; y < IMAGE_DIM; y++){ for (x = 0; x < IMAGE_DIM; x++){ int block_indX = (x / c)*c; int block_indY = (y / c)*c; redsum = greensum = bluesum = count = 0; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; pixel_temp = &h_image[offset]; //sum values redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; h_output[offset].x = (unsigned char)r; h_output[offset].y = (unsigned char)g; h_output[offset].z = (unsigned char)b; h_output[offset].w = 255; } } } } //TODO:calculate the average colour value r = 0; g = 0; b = 0; for (int j = 0; j < IMAGE_DIM; j += c){ for (int i = 0; i < IMAGE_DIM; i += c){ int offset = i + j*IMAGE_DIM; pixel_temp = &h_output[offset]; redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; //-----------------------------------------------// end = clock(); seconds = (end - begin) / (double)CLOCKS_PER_SEC; output_image_file(output_FileName, h_output); free(h_output); // Output the average colour value for the image printf("Serial CPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b); //TODO: implement part 3 of the assignment //TODO: end timing here printf("Serial CPU mode execution time took %f s \n", seconds); break; } case (OPENMP) : { h_output = (uchar4*)malloc(image_size); int x, y; //TODO: starting timing here begin = clock(); omp_set_nested(1); #pragma omp parallel for private(y,x) for (y = 0; y < IMAGE_DIM; y++){ for (x = 0; x < IMAGE_DIM; x++){ int block_indX = (x / c)*c; int block_indY = (y / c)*c; int redsum = 0; int greensum = 0; int bluesum = 0; int count = 0; int r, g, b; r = 0; g = 0; b = 0; int tx, ty; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; uchar4* pixel_temp = &h_image[offset]; redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; for (ty = 0; ty < c; ty++){ for (tx = 0; tx < c; tx++){ int x_offset = block_indX + tx; int y_offset = block_indY + ty; if (x_offset >= IMAGE_DIM) x_offset = IMAGE_DIM-1; if (y_offset >= IMAGE_DIM) y_offset = IMAGE_DIM-1; int offset = x_offset + y_offset * IMAGE_DIM; h_output[offset].x = (unsigned char)r; h_output[offset].y = (unsigned char)g; h_output[offset].z = (unsigned char)b; h_output[offset].w = 255; } } } } int r = 0; int g = 0; int b = 0; int redsum = 0, greensum = 0, bluesum = 0, count = 0; #pragma omp parallel for reduction(+:redsum,greensum,bluesum,count) for (int j = 0; j < IMAGE_DIM; j += c){ for (int i = 0; i < IMAGE_DIM; i += c){ int offset = i + j*IMAGE_DIM; uchar4* pixel_temp = &h_output[offset]; redsum += pixel_temp->x; greensum += pixel_temp->y; bluesum += pixel_temp->z; count++; } } r = redsum / count; g = greensum / count; b = bluesum / count; end = clock(); //TODO: starting timing here seconds = (end - begin) / (double)CLOCKS_PER_SEC; //TODO: calculate the average colour value output_image_file(output_FileName,h_output); // Output the average colour value for the image printf("OpenMP CPU Average image colour red = %d, green = %d, blue = %d \n", r, g, b); printf("OpenMP CPU mode execution time took %f s \n", seconds); free(h_output); break; } case (CUDA) : { h_output = (uchar4*)malloc(image_size); // create timers cudaEventCreate(&start); cudaEventCreate(&stop); // allocate memory on the GPU for the output image cudaMalloc((void**)&d_image, image_size); cudaMalloc((void**)&d_image_output, image_size); checkCUDAError("CUDA malloc"); // copy image to device memory cudaMemcpy(d_image, h_image, image_size, cudaMemcpyHostToDevice); checkCUDAError("CUDA memcpy to device"); //cuda layout and execution dim3 blocksPerGrid(IMAGE_DIM / 16, IMAGE_DIM / 16); dim3 threadsPerBlock(16, 16); //cudaBindTexture(0, sample1D, d_image, image_size); //cudaBindTexture2D(0, sample2D, d_image, desc, IMAGE_DIM, IMAGE_DIM, IMAGE_DIM*sizeof(uchar4)); cudaEventRecord(start, 0); Mosaic << <blocksPerGrid, threadsPerBlock >> >(d_image, d_image_output, c, IMAGE_DIM); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); //cudaUnbindTexture(sample1D); //cudaUnbindTexture(sample2D); checkCUDAError("kernel"); cudaMemcpy(h_output, d_image_output, image_size, cudaMemcpyDeviceToHost); checkCUDAError("CUDA memcpy from device"); output_image_file(output_FileName, h_output); //cleanup cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_image); cudaFree(d_image_output); //TODO: calculate the average colour value // Output the average colour value for the image printf("CUDA Average image colour red = ???, green = ???, blue = ??? \n"); printf("CUDA mode execution time took %f s \n", ms/1000); break; } case (ALL) : { //ToDo break; } } free(h_image); return 0; } void print_help(){ printf("mosaic_%s C M -i input_file -o output_file [options]\n", USER_NAME); printf("where:\n"); printf("\tC Is the mosaic cell size in range 0->32\n"); printf("\tM Is the mode with a value of either CPU, OPENMP, CUDA or\n" "\t ALL. The mode specifies which version of the simulation\n" "\t code should execute. ALL should execute each mode in\n" "\t turn.\n"); printf("\t-i input_file Specifies an input image file\n"); printf("\t-o output_file Specifies an output image file which will be used\n" "\t to write the mosaic image\n"); printf("[options]:\n"); printf("\t-f ppm_format PPM image output format either PPM_BINARY (default) or \n" "\t PPM_PLAIN_TEXT\n " "\t -part3 Any options for part 3 of the assignment\n"); } int process_command_line(int argc, char *argv[]){ if (argc < 7){ fprintf(stderr, "Error: Missing program arguments. Correct usage is...\n"); print_help(); return FAILURE; } //first argument is always the executable name if (strcmp(argv[0],"Mosaic_acq15jz.exe")!=0){ fprintf(stderr, "Error: Wrong program name arguments. Correct usage is...\n"); print_help(); return FAILURE; } //read in the non optional command line arguments c = (unsigned int)atoi(argv[1]); printf("cell size %d\n",c); //TODO: read in the mode if (strcmp(argv[2], "CPU") == 0)execution_mode = CPU; else if (strcmp(argv[2], "OPENMP") == 0)execution_mode = OPENMP; else if (strcmp(argv[2], "CUDA") == 0)execution_mode = CUDA; else{ fprintf(stderr, "Error: Wrong Mode arguments. Correct usage is...\n"); print_help(); return FAILURE; } //TODO: read in the input image name if (strcmp(argv[3], "-i") == 0)input_FileName = argv[4]; else{ fprintf(stderr, "Error: Wrong program arguments. Correct usage is...\n"); print_help(); return FAILURE; }printf("filenameinput %s \n", input_FileName); //TODO: read in the output image name if (strcmp(argv[5], "-o") == 0)output_FileName = argv[6]; else{ fprintf(stderr, "Error: Wrong program arguments. Correct usage is...\n"); print_help(); return FAILURE; }printf("filenameoutput %s \n", output_FileName); return SUCCESS; } void output_image_file(char* filename,uchar4* image) { FILE *f; //output file handle //open the output file and write header info for PPM filetype f = fopen(filename, "wb"); if (f == NULL){ fprintf(stderr, "Error opening %s output file\n", output_FileName); exit(1); } fprintf(f, "P6\n"); fprintf(f, "%d \n%d\n%d\n", IMAGE_DIM, IMAGE_DIM, 255); for (int x = 0; x < IMAGE_DIM; x++){ for (int y = 0; y < IMAGE_DIM; y++){ int i = x + y*IMAGE_DIM; fwrite(&image[i], sizeof(unsigned char), 3, f); //only write rgb (ignoring a) } } fclose(f); } void input_image_file(char* filename, uchar4* image) { FILE *f; //input file handle char temp[256]; int h, w, s; //open the input file and write header info for PPM filetype f = fopen(filename, "rb"); if (f == NULL){ fprintf(stderr, "Error opening %s input file\n",input_FileName); exit(1); } fscanf(f, "%s\n", &temp); fscanf(f, "%d\n", &h); fscanf(f, "%d\n", &w); fscanf(f, "%d\n", &s); if (h != w){ fprintf(stderr, "Error: Input image file has wrong dimensions\n"); exit(1); } for (int x = 0; x < h; x++){ for (int y = 0; y < w; y++){ int i = x + y*w; fread(&image[i], sizeof(unsigned char), 3, f); //only read rgb //image[i].w = 255; } } fclose(f); } int getImageSize(char* filename) { FILE *f; //input file handle char temp[256]; int x, y, s; //open the input file and write header info for PPM filetype f = fopen(filename, "rb"); if (f == NULL){ fprintf(stderr, "Error opening %s input file for getting image size\n", input_FileName); exit(1); } fscanf(f, "%s\n", &temp); fscanf(f, "%d\n %d\n", &x, &y); printf("img dim is: %d\n", x); fscanf(f, "%d\n", &s); if (x != y){ fprintf(stderr, "Error: Input image file has wrong dimensions\n"); exit(1); } fclose(f); return x; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA ERR1OR: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } ////launching parameters //dim3 blocksPerGrid((IMAGE_DIM + Mosaic_size - 1) / 32, (IMAGE_DIM + Mosaic_size - 1) / 32); // //dim3 threadsPerBlock(32, 32); //__global__ void _Mosaic(uchar4 *image, uchar4 *image_output, int Tile_size) { // // map from threadIdx/BlockIdx to pixel position // int x, y, i; // __shared__ uchar4 sdata[32][32]; // if (threadIdx.x < Tile_size&&threadIdx.y < Tile_size) // { // if (threadIdx.x + Tile_size * blockIdx.x < IMAGE_DIM&&threadIdx.y + Tile_size * blockIdx.y < IMAGE_DIM){ // x = threadIdx.x + Tile_size * blockIdx.x; // y = threadIdx.y + Tile_size * blockIdx.y; // i = x + y * blockDim.x * gridDim.x; // sdata[threadIdx.x][threadIdx.y] = image[i]; // } // // } // // __syncthreads(); // // int m, n; // int redsum; // int greensum; // int bluesum; // int count; // uchar4 pixel_temp; // float4 average = make_float4(0, 0, 0, 0); // // redsum = greensum = bluesum = count = 0; // if (threadIdx.x < Tile_size&&threadIdx.y < Tile_size) // { // if (threadIdx.x + Tile_size * blockIdx.x < IMAGE_DIM&&threadIdx.y + Tile_size * blockIdx.y < IMAGE_DIM){ // for (m = 0; m < Tile_size; m++){ // for (n = 0; n < Tile_size; n++){ // // //pixel_temp = sdata[m][n]; // pixel_temp = image[i]; // // //sum values // redsum += pixel_temp.x; // greensum += pixel_temp.y; // bluesum += pixel_temp.z; // count++; // // } // // } // // //calculate average // average.x = redsum / count; // average.y = greensum / count; // average.z = bluesum / count; // // image_output[i].x = (unsigned char)average.x; // image_output[i].y = (unsigned char)average.y; // image_output[i].z = (unsigned char)average.z; // image_output[i].w = 255; // } // // } // //}
bb3bc090ae2834a6615fc8c294da5ad7b370b378.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/upsample_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __device__ int translate_idx_inv( int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w*scale_factor+off_x; z = z*scale_factor+off_y; d2 *= scale_factor; d3 *= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } template <typename Dtype> __global__ void upscale(const Dtype *input, Dtype *output, int no_elements, int scale_factor, int d1, int d2, int d3) { int ii = threadIdx.x + blockDim.x * blockIdx.x; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } template <typename Dtype> __global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data, int no_elements, int scale_factor, int d1, int d2, int d3) { int ii = threadIdx.x + blockDim.x * blockIdx.x; if (ii >= no_elements) return; for (int i = 0; i < scale_factor; i++) { for (int j = 0; j < scale_factor; j++) { int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j); gradInput_data[ii] += gradOutput_data[ipidx]; } } } template <typename Dtype> void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int d1, d2, d3; d1 = top[0]->shape(1); d2 = top[0]->shape(2); d3 = top[0]->shape(3); int no_elements = top[0]->count(); upscale<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(no_elements)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3); } template <typename Dtype> void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int d1, d2, d3; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); d1 = bottom[0]->shape(1); d2 = bottom[0]->shape(2); d3 = bottom[0]->shape(3); int no_elements = bottom[0]->count(); caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff); downscale<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(no_elements)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3); } INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer); } // namespace caffe
bb3bc090ae2834a6615fc8c294da5ad7b370b378.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layers/upsample_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w/scale_factor; z = z/scale_factor; d2 /= scale_factor; d3 /= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } __device__ int translate_idx_inv( int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) { int x, y, z, w; w = ii % d3; ii = ii/d3; z = ii % d2; ii = ii/d2; y = ii % d1; ii = ii/d1; x = ii; w = w*scale_factor+off_x; z = z*scale_factor+off_y; d2 *= scale_factor; d3 *= scale_factor; return (((x*d1+y)*d2)+z)*d3+w; } template <typename Dtype> __global__ void upscale(const Dtype *input, Dtype *output, int no_elements, int scale_factor, int d1, int d2, int d3) { int ii = threadIdx.x + blockDim.x * blockIdx.x; if (ii >= no_elements) return; int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); output[ii]=input[ipidx]; } template <typename Dtype> __global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data, int no_elements, int scale_factor, int d1, int d2, int d3) { int ii = threadIdx.x + blockDim.x * blockIdx.x; if (ii >= no_elements) return; for (int i = 0; i < scale_factor; i++) { for (int j = 0; j < scale_factor; j++) { int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j); gradInput_data[ii] += gradOutput_data[ipidx]; } } } template <typename Dtype> void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int d1, d2, d3; d1 = top[0]->shape(1); d2 = top[0]->shape(2); d3 = top[0]->shape(3); int no_elements = top[0]->count(); upscale<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->gpu_data(), top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3); } template <typename Dtype> void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int d1, d2, d3; Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); d1 = bottom[0]->shape(1); d2 = bottom[0]->shape(2); d3 = bottom[0]->shape(3); int no_elements = bottom[0]->count(); caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff); downscale<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>( bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3); } INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer); } // namespace caffe
8ae41bdc47c2accebe2fcaa21729dd56420d8f22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // includes, project #define PI 3.1415926536f int MaxThreadsPerBlock; int MaxThreadsX; int MaxThreadsY; // Conversion d'un vecteur rel en vecteur complexe // Conversion d'un vecteur complexe en vecteur rel // Multiplie point par point un vecteur complex par un vecteur rel // Applique y = at*x +bt chaque point d'un vecteur rel // Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de rel // Alpha n'est pas modifi // Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de bytes // Alpha n'est pas modifi // Remplissage de la linearmem (tableau de pixels) associe la texture avec le tableau de rel // Alpha autorise l'affichage au dessus d'un certain seuil // Processus auto-rgressif X2 = a*X1 + b*X0 + N0; // Expansion // On applique une interpolation bi-linaire la source // Transformation Cartesian To Polar // On applique une interpolation bi-linaire la source __global__ void KGaborFilter2(double* filter, int NumElements, double fMul ) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >NumElements) return; filter[i] = sqrt(filter[i]*fMul); }
8ae41bdc47c2accebe2fcaa21729dd56420d8f22.cu
#include "includes.h" // includes, project #define PI 3.1415926536f int MaxThreadsPerBlock; int MaxThreadsX; int MaxThreadsY; // Conversion d'un vecteur réel en vecteur complexe // Conversion d'un vecteur complexe en vecteur réel // Multiplie point par point un vecteur complex par un vecteur réel // Applique y = at*x +bt à chaque point d'un vecteur réel // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha autorise l'affichage au dessus d'un certain seuil // Processus auto-régressif X2 = a*X1 + b*X0 + N0; // Expansion // On applique une interpolation bi-linéaire à la source // Transformation Cartesian To Polar // On applique une interpolation bi-linéaire à la source __global__ void KGaborFilter2(double* filter, int NumElements, double fMul ) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i >NumElements) return; filter[i] = sqrt(filter[i]*fMul); }
5b03906f7e3f0e3fd3ca73e712f8ff1fdaf75c6f.hip
// !!! This is a file automatically generated by hipify!!! #include "device/image_grayscale.h" #include <cassert> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include "device/cuda_debug.h" __global__ void compute_grayscale_kernel( void* color_img_ptr, uint32_t color_img_pitch, void* grayscale_img_ptr, uint32_t grayscale_image_pitch, uint32_t image_width, uint32_t image_height) { const uint32_t image_index_u = blockIdx.x*blockDim.x + threadIdx.x; const uint32_t image_index_v = blockIdx.y*blockDim.y + threadIdx.y; if (image_index_u >= image_width or image_index_v >= image_height) { return; } uint8_t* color_byte_ptr = reinterpret_cast<uint8_t*>(color_img_ptr); color_byte_ptr += color_img_pitch*image_index_v + sizeof(uchar4)*image_index_u; uint8_t* grayscale_byte_ptr = reinterpret_cast<uint8_t*>(grayscale_img_ptr); grayscale_byte_ptr += grayscale_image_pitch*image_index_v + sizeof(uchar1)*image_index_u; uchar4* color_pixel_ptr = reinterpret_cast<uchar4*>(color_byte_ptr); uchar1* grayscale_pixel_ptr = reinterpret_cast<uchar1*>(grayscale_byte_ptr); uchar4 color_pixel = *color_pixel_ptr; float grayscale_value = 0.3f*static_cast<float>(color_pixel.x) + 0.59f*static_cast<float>(color_pixel.y) + 0.11f*static_cast<float>(color_pixel.z); uchar1 grayscale_pixel = make_uchar1(__float2int_rn(grayscale_value)); *grayscale_pixel_ptr = grayscale_pixel; } void compute_grayscale(const PitchedCUDABuffer& color_image, PitchedCUDABuffer& grayscale_image) { // sanity check the input assert(color_image.get_element_size_in_bytes() == 4 * sizeof(uint8_t)); assert(grayscale_image.get_element_size_in_bytes() == sizeof(uint8_t)); assert(color_image.get_elements_per_row() == grayscale_image.get_elements_per_row()); assert(color_image.get_number_of_rows() == grayscale_image.get_number_of_rows()); const uint32_t image_width = color_image.get_elements_per_row(); const uint32_t image_height = color_image.get_number_of_rows(); const dim3 grayscale_block_dim(32, 32, 1); const dim3 grayscale_grid_dim(image_width/grayscale_block_dim.x + (image_width % grayscale_block_dim.x == 0 ? 0 : 1), image_height/grayscale_block_dim.y + (image_height % grayscale_block_dim.y == 0 ? 0 : 1), 1); hipLaunchKernelGGL(( compute_grayscale_kernel), dim3(grayscale_grid_dim), dim3(grayscale_block_dim), 0, 0, color_image.get_dev_ptr(), color_image.get_pitch_in_bytes(), grayscale_image.get_dev_ptr(), grayscale_image.get_pitch_in_bytes(), image_width, image_height); CUDA_SYNC_CHECK(); }
5b03906f7e3f0e3fd3ca73e712f8ff1fdaf75c6f.cu
#include "device/image_grayscale.h" #include <cassert> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include "device/cuda_debug.h" __global__ void compute_grayscale_kernel( void* color_img_ptr, uint32_t color_img_pitch, void* grayscale_img_ptr, uint32_t grayscale_image_pitch, uint32_t image_width, uint32_t image_height) { const uint32_t image_index_u = blockIdx.x*blockDim.x + threadIdx.x; const uint32_t image_index_v = blockIdx.y*blockDim.y + threadIdx.y; if (image_index_u >= image_width or image_index_v >= image_height) { return; } uint8_t* color_byte_ptr = reinterpret_cast<uint8_t*>(color_img_ptr); color_byte_ptr += color_img_pitch*image_index_v + sizeof(uchar4)*image_index_u; uint8_t* grayscale_byte_ptr = reinterpret_cast<uint8_t*>(grayscale_img_ptr); grayscale_byte_ptr += grayscale_image_pitch*image_index_v + sizeof(uchar1)*image_index_u; uchar4* color_pixel_ptr = reinterpret_cast<uchar4*>(color_byte_ptr); uchar1* grayscale_pixel_ptr = reinterpret_cast<uchar1*>(grayscale_byte_ptr); uchar4 color_pixel = *color_pixel_ptr; float grayscale_value = 0.3f*static_cast<float>(color_pixel.x) + 0.59f*static_cast<float>(color_pixel.y) + 0.11f*static_cast<float>(color_pixel.z); uchar1 grayscale_pixel = make_uchar1(__float2int_rn(grayscale_value)); *grayscale_pixel_ptr = grayscale_pixel; } void compute_grayscale(const PitchedCUDABuffer& color_image, PitchedCUDABuffer& grayscale_image) { // sanity check the input assert(color_image.get_element_size_in_bytes() == 4 * sizeof(uint8_t)); assert(grayscale_image.get_element_size_in_bytes() == sizeof(uint8_t)); assert(color_image.get_elements_per_row() == grayscale_image.get_elements_per_row()); assert(color_image.get_number_of_rows() == grayscale_image.get_number_of_rows()); const uint32_t image_width = color_image.get_elements_per_row(); const uint32_t image_height = color_image.get_number_of_rows(); const dim3 grayscale_block_dim(32, 32, 1); const dim3 grayscale_grid_dim(image_width/grayscale_block_dim.x + (image_width % grayscale_block_dim.x == 0 ? 0 : 1), image_height/grayscale_block_dim.y + (image_height % grayscale_block_dim.y == 0 ? 0 : 1), 1); compute_grayscale_kernel<<<grayscale_grid_dim, grayscale_block_dim>>>(color_image.get_dev_ptr(), color_image.get_pitch_in_bytes(), grayscale_image.get_dev_ptr(), grayscale_image.get_pitch_in_bytes(), image_width, image_height); CUDA_SYNC_CHECK(); }
8fa7d901a9a89c8506017ac926f74f2dde453326.hip
// !!! This is a file automatically generated by hipify!!! /* * GridTools * * Copyright (c) 2014-2023, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include <gridtools/fn/sid_neighbor_table.hpp> #include <array> #include <cstddef> #include <cstdint> #include <type_traits> #include <gtest/gtest.h> #include <cuda_test_helper.hpp> #include <gridtools/sid/synthetic.hpp> namespace gridtools::fn { namespace { using sid_neighbor_table::as_neighbor_table; using edge_dim_t = integral_constant<int_t, 0>; using edge_to_cell_dim_t = integral_constant<int_t, 1>; template <class Table> __device__ auto neighbor_table_neighbors_device(Table const &table, int index) -> array<int, 2> { return neighbor_table::neighbors(table, index); } TEST(sid_neighbor_table, correctness_cuda) { constexpr std::size_t num_elements = 3; constexpr std::size_t num_neighbors = 2; const int data[num_elements][num_neighbors] = {{0, 1}, {10, 11}, {20, 21}}; const auto device_data = cuda_util::cuda_malloc<int>(num_elements * num_neighbors); GT_CUDA_CHECK(hipMemcpy(device_data.get(), &data, sizeof data, hipMemcpyHostToDevice)); using dim_hymap_t = hymap::keys<edge_dim_t, edge_to_cell_dim_t>; auto contents = sid::synthetic() .set<sid::property::origin>(sid::host_device::simple_ptr_holder(device_data.get())) .set<sid::property::strides>(dim_hymap_t::make_values(num_neighbors, 1)); const auto table = as_neighbor_table<edge_dim_t, edge_to_cell_dim_t, num_neighbors>(contents); using table_t = std::decay_t<decltype(table)>; auto [n00, n01] = on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&neighbor_table_neighbors_device<table_t>), table, 0); auto [n10, n11] = on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&neighbor_table_neighbors_device<table_t>), table, 1); auto [n20, n21] = on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&neighbor_table_neighbors_device<table_t>), table, 2); EXPECT_EQ(n00, 0); EXPECT_EQ(n01, 1); EXPECT_EQ(n10, 10); EXPECT_EQ(n11, 11); EXPECT_EQ(n20, 20); EXPECT_EQ(n21, 21); } } // namespace } // namespace gridtools::fn
8fa7d901a9a89c8506017ac926f74f2dde453326.cu
/* * GridTools * * Copyright (c) 2014-2023, ETH Zurich * All rights reserved. * * Please, refer to the LICENSE file in the root directory. * SPDX-License-Identifier: BSD-3-Clause */ #include <gridtools/fn/sid_neighbor_table.hpp> #include <array> #include <cstddef> #include <cstdint> #include <type_traits> #include <gtest/gtest.h> #include <cuda_test_helper.hpp> #include <gridtools/sid/synthetic.hpp> namespace gridtools::fn { namespace { using sid_neighbor_table::as_neighbor_table; using edge_dim_t = integral_constant<int_t, 0>; using edge_to_cell_dim_t = integral_constant<int_t, 1>; template <class Table> __device__ auto neighbor_table_neighbors_device(Table const &table, int index) -> array<int, 2> { return neighbor_table::neighbors(table, index); } TEST(sid_neighbor_table, correctness_cuda) { constexpr std::size_t num_elements = 3; constexpr std::size_t num_neighbors = 2; const int data[num_elements][num_neighbors] = {{0, 1}, {10, 11}, {20, 21}}; const auto device_data = cuda_util::cuda_malloc<int>(num_elements * num_neighbors); GT_CUDA_CHECK(cudaMemcpy(device_data.get(), &data, sizeof data, cudaMemcpyHostToDevice)); using dim_hymap_t = hymap::keys<edge_dim_t, edge_to_cell_dim_t>; auto contents = sid::synthetic() .set<sid::property::origin>(sid::host_device::simple_ptr_holder(device_data.get())) .set<sid::property::strides>(dim_hymap_t::make_values(num_neighbors, 1)); const auto table = as_neighbor_table<edge_dim_t, edge_to_cell_dim_t, num_neighbors>(contents); using table_t = std::decay_t<decltype(table)>; auto [n00, n01] = on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&neighbor_table_neighbors_device<table_t>), table, 0); auto [n10, n11] = on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&neighbor_table_neighbors_device<table_t>), table, 1); auto [n20, n21] = on_device::exec( GT_MAKE_INTEGRAL_CONSTANT_FROM_VALUE(&neighbor_table_neighbors_device<table_t>), table, 2); EXPECT_EQ(n00, 0); EXPECT_EQ(n01, 1); EXPECT_EQ(n10, 10); EXPECT_EQ(n11, 11); EXPECT_EQ(n20, 20); EXPECT_EQ(n21, 21); } } // namespace } // namespace gridtools::fn
acca5454b95fb20974c2da676afa7f19e55305ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/activation/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> static __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> static __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> static __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> static __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> static __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> static __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int count = bottom[0]->count(); caffe_copy(count, bottom_data, top_data); hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(num * height * width)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, height*width, top_data,scale_data); hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, num, channels, height*width, scale_data, top_data); hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, top_data); hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(num * height * width)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, height*width, top_data,scale_data); hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, num, channels, height*width, scale_data, top_data); } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int count = bottom[0]->count(); caffe_copy(count, top_diff, bottom_diff); hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(num * height * width)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, height*width, top_diff, top_data, scale_data); hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, num, channels, height*width, scale_data, bottom_diff); caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } template <typename Dtype> void SoftmaxLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
acca5454b95fb20974c2da676afa7f19e55305ed.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layers/activation/softmax_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> static __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> static __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template <typename Dtype> static __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> static __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> static __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template <typename Dtype> static __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int count = bottom[0]->count(); caffe_copy(count, bottom_data, top_data); kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(num * height * width),CAFFE_CUDA_NUM_THREADS>>> (num, channels, height*width, top_data,scale_data); kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),CAFFE_CUDA_NUM_THREADS>>> (count, num, channels, height*width, scale_data, top_data); kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> (count, top_data, top_data); kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(num * height * width),CAFFE_CUDA_NUM_THREADS>>> (num, channels, height*width, top_data,scale_data); kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),CAFFE_CUDA_NUM_THREADS>>> (count, num, channels, height*width, scale_data, top_data); } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int count = bottom[0]->count(); caffe_copy(count, top_diff, bottom_diff); kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(num * height * width),CAFFE_CUDA_NUM_THREADS>>> (num, channels, height*width, top_diff, top_data, scale_data); kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),CAFFE_CUDA_NUM_THREADS>>> (count, num, channels, height*width, scale_data, bottom_diff); caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } template <typename Dtype> void SoftmaxLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
6e46697374a814630cc2e837bfd2c313cfcc0823.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////// // matrixVecMult_CUBLAS.cu // // Andrew Krepps // // Module 8 Assignment // // 4/2/2018 // //////////////////////////////// #include <chrono> #include <stdio.h> #include <stdlib.h> #include <rocblas.h> #define CUBLAS_KERNEL 0 #define CUSTOM_KERNEL 1 /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication using shared memory /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultSharedMem( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // load input vector into shared memory for each block extern __shared__ float inVecSharedMem[]; const unsigned int localIdx = threadIdx.x; // input vector could be larger than the block size, // so we need to figure out which elements each thread // is responsible for copying over to shared memory const unsigned int elementsToCopy = n/blockDim.x + 1; const unsigned int startElement = localIdx*elementsToCopy; for (unsigned int i = 0; i < elementsToCopy; ++i) { unsigned int dataIdx = startElement + i; if (dataIdx < n) { inVecSharedMem[dataIdx] = inVec[dataIdx]; } } __syncthreads(); // after all data is loaded, perform multiplication using vector in shared memory const unsigned int outIdx = blockIdx.x*blockDim.x + threadIdx.x; if (outIdx < m) { // intermediate results are stored in registers // before being written back to the output float sum = 0.0f; unsigned int matRowStart = outIdx*n; for (unsigned int i = 0; i < n; i++) { unsigned int matIdx = matRowStart + i; sum += inMat[matIdx]*inVec[i]; } outVec[outIdx] = sum; } } /////////////////////////////////////////////////////////////////////////////// /// \brief initialize input data on the host /// /// \param [out] mat the input matrix /// \param [out] vec the input vector /// \param [in] m the number of matrix rows /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void initializeInputData( float* mat, float* vec, const unsigned int m, const unsigned int n) { for (unsigned int i = 0; i < m; ++i) { for (unsigned int j = 0; j < n; ++j) { const unsigned int matIdx = i*n + j; mat[matIdx] = matIdx*0.001f; } vec[i] = i*0.01f; } } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a CUBLAS kernel that performs /// matrix vector multiplication /// /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void runCublasKernel( const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // allocate device memory float* d_inMat; float* d_inVec; float* d_outVec; hipblasAlloc(m*n, sizeof(float), (void**)&d_inMat); hipblasAlloc(n, sizeof(float), (void**)&d_inVec); hipblasAlloc(m, sizeof(float), (void**)&d_outVec); // copy input data to device hipblasSetMatrix(m, n, sizeof(float), inMat, m, d_inMat, m); hipblasSetVector(n, sizeof(float), inVec, 1, d_inVec, 1); // run kernel (host data is in row-major order so set transpose for column-major CUBLAS) hipblasSgemv('T', m, n, 1.0f, d_inMat, m, d_inVec, 1, 0.0f, d_outVec, 1); // copy output data to host hipblasGetVector(m, sizeof(float), d_outVec, 1, outVec, 1); // free device memory hipblasFree(d_inMat); hipblasFree(d_inVec); hipblasFree(d_outVec); } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a custom kernel that performs /// matrix vector multiplication /// /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void runCustomKernel( const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // allocate device memory float* d_inMat; float* d_inVec; float* d_outVec; const unsigned int matrixElements = m*n; const unsigned int matrixBytes = matrixElements*sizeof(float); const unsigned int inVectorBytes = n*sizeof(float); const unsigned int outVectorBytes = m*sizeof(float); hipMalloc((void**)&d_inMat, matrixBytes); hipMalloc((void**)&d_inVec, inVectorBytes); hipMalloc((void**)&d_outVec, outVectorBytes); // copy input data to device hipMemcpy(d_inMat, inMat, matrixBytes, hipMemcpyHostToDevice); hipMemcpy(d_inVec, inVec, inVectorBytes, hipMemcpyHostToDevice); // launch kernel const unsigned int numBlocks = m/blockSize; hipLaunchKernelGGL(( matVecMultSharedMem), dim3(numBlocks), dim3(blockSize), n*sizeof(float), 0, d_inMat, d_inVec, d_outVec, m, n); // copy output data to host hipMemcpy(outVec, d_outVec, outVectorBytes, hipMemcpyDeviceToHost); // free device memory hipFree(d_inMat); hipFree(d_inVec); hipFree(d_outVec); } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a kernel that performs matrix vector /// multiplication /// /// \param [in] kernel the index of the kernel to execute /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /// /// \returns the total execution time (in ms) including data transfer /////////////////////////////////////////////////////////////////////////////// float runTimingTest( const unsigned int kernel, const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // start clock auto start = std::chrono::high_resolution_clock::now(); // run kernel switch (kernel) { case CUBLAS_KERNEL: runCublasKernel(blockSize, inMat, inVec, outVec, m, n); break; case CUSTOM_KERNEL: runCustomKernel(blockSize, inMat, inVec, outVec, m, n); break; default: printf("Error: unrecognized kernel index: %d\n", kernel); } // calculate execution time in ms auto stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration(stop - start); return duration.count()*1000.0f; } int main(int argc, char** argv) { // configure run unsigned int dataSize = 512; unsigned int blockSize = 256; if (argc > 1) { dataSize = atoi(argv[1]); } if (argc > 2) { blockSize = atoi(argv[2]); } // allocate and initialize host memory const unsigned int matrixBytes = dataSize*dataSize*sizeof(float); const unsigned int vectorBytes = dataSize*sizeof(float); float* inMat = (float*)malloc(matrixBytes); float* inVec = (float*)malloc(vectorBytes); float* outVec = (float*)malloc(vectorBytes); initializeInputData(inMat, inVec, dataSize, dataSize); // initialze CUBLAS hipblasInit(); // run timing comparisons float cublasMs = runTimingTest(CUBLAS_KERNEL, blockSize, inMat, inVec, outVec, dataSize, dataSize); float customMs = runTimingTest(CUSTOM_KERNEL, blockSize, inMat, inVec, outVec, dataSize, dataSize); // show results printf("CUBLAS kernel time: %.3f ms\n", cublasMs); printf("Custom kernel time: %.3f ms\n", customMs); // shut down CUBLAS hipblasShutdown(); // free host memory free(inMat); free(inVec); free(outVec); }
6e46697374a814630cc2e837bfd2c313cfcc0823.cu
//////////////////////////////// // matrixVecMult_CUBLAS.cu // // Andrew Krepps // // Module 8 Assignment // // 4/2/2018 // //////////////////////////////// #include <chrono> #include <stdio.h> #include <stdlib.h> #include <cublas.h> #define CUBLAS_KERNEL 0 #define CUSTOM_KERNEL 1 /////////////////////////////////////////////////////////////////////////////// /// \brief perform matrix vector multiplication using shared memory /// /// \param [in] inMat the input matrix /// \param [in] inVec the input vector /// \param [out] outVec the output vector /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// __global__ void matVecMultSharedMem( const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // load input vector into shared memory for each block extern __shared__ float inVecSharedMem[]; const unsigned int localIdx = threadIdx.x; // input vector could be larger than the block size, // so we need to figure out which elements each thread // is responsible for copying over to shared memory const unsigned int elementsToCopy = n/blockDim.x + 1; const unsigned int startElement = localIdx*elementsToCopy; for (unsigned int i = 0; i < elementsToCopy; ++i) { unsigned int dataIdx = startElement + i; if (dataIdx < n) { inVecSharedMem[dataIdx] = inVec[dataIdx]; } } __syncthreads(); // after all data is loaded, perform multiplication using vector in shared memory const unsigned int outIdx = blockIdx.x*blockDim.x + threadIdx.x; if (outIdx < m) { // intermediate results are stored in registers // before being written back to the output float sum = 0.0f; unsigned int matRowStart = outIdx*n; for (unsigned int i = 0; i < n; i++) { unsigned int matIdx = matRowStart + i; sum += inMat[matIdx]*inVec[i]; } outVec[outIdx] = sum; } } /////////////////////////////////////////////////////////////////////////////// /// \brief initialize input data on the host /// /// \param [out] mat the input matrix /// \param [out] vec the input vector /// \param [in] m the number of matrix rows /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void initializeInputData( float* mat, float* vec, const unsigned int m, const unsigned int n) { for (unsigned int i = 0; i < m; ++i) { for (unsigned int j = 0; j < n; ++j) { const unsigned int matIdx = i*n + j; mat[matIdx] = matIdx*0.001f; } vec[i] = i*0.01f; } } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a CUBLAS kernel that performs /// matrix vector multiplication /// /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void runCublasKernel( const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // allocate device memory float* d_inMat; float* d_inVec; float* d_outVec; cublasAlloc(m*n, sizeof(float), (void**)&d_inMat); cublasAlloc(n, sizeof(float), (void**)&d_inVec); cublasAlloc(m, sizeof(float), (void**)&d_outVec); // copy input data to device cublasSetMatrix(m, n, sizeof(float), inMat, m, d_inMat, m); cublasSetVector(n, sizeof(float), inVec, 1, d_inVec, 1); // run kernel (host data is in row-major order so set transpose for column-major CUBLAS) cublasSgemv('T', m, n, 1.0f, d_inMat, m, d_inVec, 1, 0.0f, d_outVec, 1); // copy output data to host cublasGetVector(m, sizeof(float), d_outVec, 1, outVec, 1); // free device memory cublasFree(d_inMat); cublasFree(d_inVec); cublasFree(d_outVec); } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a custom kernel that performs /// matrix vector multiplication /// /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /////////////////////////////////////////////////////////////////////////////// void runCustomKernel( const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // allocate device memory float* d_inMat; float* d_inVec; float* d_outVec; const unsigned int matrixElements = m*n; const unsigned int matrixBytes = matrixElements*sizeof(float); const unsigned int inVectorBytes = n*sizeof(float); const unsigned int outVectorBytes = m*sizeof(float); cudaMalloc((void**)&d_inMat, matrixBytes); cudaMalloc((void**)&d_inVec, inVectorBytes); cudaMalloc((void**)&d_outVec, outVectorBytes); // copy input data to device cudaMemcpy(d_inMat, inMat, matrixBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_inVec, inVec, inVectorBytes, cudaMemcpyHostToDevice); // launch kernel const unsigned int numBlocks = m/blockSize; matVecMultSharedMem<<<numBlocks, blockSize, n*sizeof(float)>>>(d_inMat, d_inVec, d_outVec, m, n); // copy output data to host cudaMemcpy(outVec, d_outVec, outVectorBytes, cudaMemcpyDeviceToHost); // free device memory cudaFree(d_inMat); cudaFree(d_inVec); cudaFree(d_outVec); } /////////////////////////////////////////////////////////////////////////////// /// \brief allocate device memory and run a kernel that performs matrix vector /// multiplication /// /// \param [in] kernel the index of the kernel to execute /// \param [in] blockSize the number of threads per block to use /// \param [in] inMat the input matrix (on the host) /// \param [in] inVec the input vector (on the host) /// \param [out] outVec the output vector (on the host) /// \param [in] m the number of matrix rows and the output vector length /// \param [in] n the number of matrix columns and the input vector length /// /// \returns the total execution time (in ms) including data transfer /////////////////////////////////////////////////////////////////////////////// float runTimingTest( const unsigned int kernel, const unsigned int blockSize, const float* inMat, const float* inVec, float* outVec, const unsigned int m, const unsigned int n) { // start clock auto start = std::chrono::high_resolution_clock::now(); // run kernel switch (kernel) { case CUBLAS_KERNEL: runCublasKernel(blockSize, inMat, inVec, outVec, m, n); break; case CUSTOM_KERNEL: runCustomKernel(blockSize, inMat, inVec, outVec, m, n); break; default: printf("Error: unrecognized kernel index: %d\n", kernel); } // calculate execution time in ms auto stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration(stop - start); return duration.count()*1000.0f; } int main(int argc, char** argv) { // configure run unsigned int dataSize = 512; unsigned int blockSize = 256; if (argc > 1) { dataSize = atoi(argv[1]); } if (argc > 2) { blockSize = atoi(argv[2]); } // allocate and initialize host memory const unsigned int matrixBytes = dataSize*dataSize*sizeof(float); const unsigned int vectorBytes = dataSize*sizeof(float); float* inMat = (float*)malloc(matrixBytes); float* inVec = (float*)malloc(vectorBytes); float* outVec = (float*)malloc(vectorBytes); initializeInputData(inMat, inVec, dataSize, dataSize); // initialze CUBLAS cublasInit(); // run timing comparisons float cublasMs = runTimingTest(CUBLAS_KERNEL, blockSize, inMat, inVec, outVec, dataSize, dataSize); float customMs = runTimingTest(CUSTOM_KERNEL, blockSize, inMat, inVec, outVec, dataSize, dataSize); // show results printf("CUBLAS kernel time: %.3f ms\n", cublasMs); printf("Custom kernel time: %.3f ms\n", customMs); // shut down CUBLAS cublasShutdown(); // free host memory free(inMat); free(inVec); free(outVec); }
3fd952c09011a37458b8fecb788d598d80a3bbc0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <string> #include <iostream> // includes, project #include "matrixmul.h" #include "nocutil.h" #include "matrixmul_gold.cpp" #include "matrixmul_kernel.hip" #define TILE_WIDTH 16 //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; srand(5672); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices M = AllocateMatrix(rand() % 1024, rand() % 1024, 1); N = AllocateMatrix(M.width, rand() % 1024, 1); P = AllocateMatrix(M.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; //(int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; nocutReadFilei(argv[1], &params, &data_read, true); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } // M * N on the device MatrixMulOnDevice(M, N, P); printf("GPU computation complete\n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); printf("CPU computation complete\n"); // in this case check if the result is equivalent to the expected soluion bool res = nocutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); CopyToDeviceMatrix(Md, M); Matrix Nd = AllocateDeviceMatrix(N); CopyToDeviceMatrix(Nd, N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); CopyToDeviceMatrix(Pd, P); // Clear memory // Setup the execution configuration int dimX = (int)(ceil((float)P.width / TILE_WIDTH)); int dimY = (int)(ceil((float)P.height / TILE_WIDTH)); dim3 dimGrid(dimX, dimY); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); // Launch the device computation threads! hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md,Nd,Pd); // Read P from the device CopyFromDeviceMatrix(P, Pd); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; nocutReadFilef(file_name, &(M->elements), &data_read, true); return (data_read != (M->height * M->width)); } // Write a floating point matrix to file void WriteFile(Matrix M, char* file_name) { nocutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
3fd952c09011a37458b8fecb788d598d80a3bbc0.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Host code. */ // includes, system #include <string> #include <iostream> // includes, project #include "matrixmul.h" #include "nocutil.h" #include "matrixmul_gold.cpp" #include "matrixmul_kernel.cu" #define TILE_WIDTH 16 //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix M; Matrix N; Matrix P; int errorM = 0, errorN = 0; srand(5672); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices M = AllocateMatrix(rand() % 1024, rand() % 1024, 1); N = AllocateMatrix(M.width, rand() % 1024, 1); P = AllocateMatrix(M.height, N.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; //(int*)malloc(3 * sizeof(int)); unsigned int data_read = 3; nocutReadFilei(argv[1], &params, &data_read, true); if(data_read != 3){ printf("Error reading parameter file\n"); return 1; } M = AllocateMatrix(params[0], params[1], 0); N = AllocateMatrix(params[1], params[2], 0); P = AllocateMatrix(params[0], params[2], 0); errorM = ReadFile(&M, argv[2]); errorN = ReadFile(&N, argv[3]); if(errorM || errorN ) { printf("Error reading input files %d, %d\n", errorM, errorN); return 1; } } // M * N on the device MatrixMulOnDevice(M, N, P); printf("GPU computation complete\n"); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(P.height, P.width, 0); computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width); printf("CPU computation complete\n"); // in this case check if the result is equivalent to the expected soluion bool res = nocutComparefe(reference.elements, P.elements, P.height*P.width, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(P, argv[4]); } else if(argc == 2) { WriteFile(P, argv[1]); } // Free matrices FreeMatrix(&M); FreeMatrix(&N); FreeMatrix(&P); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P) { // Load M and N to the device Matrix Md = AllocateDeviceMatrix(M); CopyToDeviceMatrix(Md, M); Matrix Nd = AllocateDeviceMatrix(N); CopyToDeviceMatrix(Nd, N); // Allocate P on the device Matrix Pd = AllocateDeviceMatrix(P); CopyToDeviceMatrix(Pd, P); // Clear memory // Setup the execution configuration int dimX = (int)(ceil((float)P.width / TILE_WIDTH)); int dimY = (int)(ceil((float)P.height / TILE_WIDTH)); dim3 dimGrid(dimX, dimY); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); // Launch the device computation threads! MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd); // Read P from the device CopyFromDeviceMatrix(P, Pd); // Free device matrices FreeDeviceMatrix(&Md); FreeDeviceMatrix(&Nd); FreeDeviceMatrix(&Pd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a device matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. // If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = width; M.height = height; int size = M.width * M.height; M.elements = NULL; // don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX); } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a floating point matrix in from file // Returns zero if the number of elements read is // equals M.height * M.width, and 1 otherwise int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height*M->width; nocutReadFilef(file_name, &(M->elements), &data_read, true); return (data_read != (M->height * M->width)); } // Write a floating point matrix to file void WriteFile(Matrix M, char* file_name) { nocutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
3f358cccc108d73194a9aab44d75601e3e9597ca.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<uchar, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
3f358cccc108d73194a9aab44d75601e3e9597ca.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<uchar, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
ffa9f4592c7376a97907363caa53b4a59b539459.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zmgecsrmv.cu normal z -> s, Fri Jan 30 19:00:29 2015 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void smgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, float alpha, float * dval, magma_index_t * drowptr, magma_index_t * dcolind, float * dx, float beta, float * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ float dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_S_MAKE(0.0, 0.0); int start = drowptr[ row ] ; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; float val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_smgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, float alpha, magmaFloat_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( float ); // num_vecs vectors hipLaunchKernelGGL(( smgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE , 0, m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
ffa9f4592c7376a97907363caa53b4a59b539459.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zmgecsrmv.cu normal z -> s, Fri Jan 30 19:00:29 2015 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void smgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, float alpha, float * dval, magma_index_t * drowptr, magma_index_t * dcolind, float * dx, float beta, float * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ float dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_S_MAKE(0.0, 0.0); int start = drowptr[ row ] ; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; float val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_smgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, float alpha, magmaFloat_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( float ); // num_vecs vectors smgecsrmv_kernel<<< grid, threads, MEM_SIZE >>> (m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
85d8d407115f7a1aff9b445c58c6bef6f1b7695d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //# IntToFloat.cu: Convert integer input to float; transpose time and pol dims //# Copyright (C) 2012-2013 ASTRON (Netherlands Institute for Radio Astronomy) //# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands //# //# This file is part of the LOFAR software suite. //# The LOFAR software suite is free software: you can redistribute it and/or //# modify it under the terms of the GNU General Public License as published //# by the Free Software Foundation, either version 3 of the License, or //# (at your option) any later version. //# //# The LOFAR software suite is distributed in the hope that it will be useful, //# but WITHOUT ANY WARRANTY; without even the implied warranty of //# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //# GNU General Public License for more details. //# //# You should have received a copy of the GNU General Public License along //# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. //# //# $Id: IntToFloat.cu 27000 2013-10-17 09:11:13Z loose $ #include "IntToFloat.cuh" //#if NR_BITS_PER_SAMPLE == 4 //typedef char1 SampleType #if NR_BITS_PER_SAMPLE == 8 typedef char2 SampleType; #elif NR_BITS_PER_SAMPLE == 16 typedef short2 SampleType; #else #error unsupported NR_BITS_PER_SAMPLE: must be 4, 8, or 16 #endif typedef SampleType (*SampledDataType) [NR_STATIONS][NR_SAMPLES_PER_SUBBAND][NR_POLARIZATIONS]; typedef float2 (*ConvertedDataType)[NR_STATIONS][NR_POLARIZATIONS][NR_SAMPLES_PER_SUBBAND]; /** * This kernel performs a conversion of the integer valued input to floats and * transposes the data to get per station: first all samples with polX, then polY. * - It supports 8 and 16 bit (char and short) input, which is selectable using * the define NR_BITS_PER_SAMPLE * - In 8 bit mode the converted samples with value -128 are clamped to -127.0f * * @param[out] convertedDataPtr pointer to output data of ConvertedDataType, * a 4D array [station][polarizations][n_samples_subband][complex] * of floats (2 complex polarizations). * @param[in] sampledDataPtr pointer to input data; this can either be a * 4D array [station][n_samples_subband][polarizations][complex] * of shorts or chars, depending on NR_BITS_PER_SAMPLE. * * Required preprocessor symbols: * - NR_SAMPLES_PER_CHANNEL: > 0 * - NR_BITS_PER_SAMPLE: 8 or 16 * * Execution configuration: * - Use a 1D thread block. No restrictions. * - Use a 2D grid dim, where the x dim has 1 block and the y dim represents the * number of stations (i.e. antenna fields). */ extern "C" { __global__ void intToFloat(void *convertedDataPtr, const void *sampledDataPtr) { ConvertedDataType convertedData = (ConvertedDataType)convertedDataPtr; SampledDataType sampledData = (SampledDataType) sampledDataPtr; uint station = blockIdx.y; for (uint time = threadIdx.x; time < NR_SAMPLES_PER_SUBBAND; time += blockDim.x) { float4 sample; sample = make_float4(convertIntToFloat((*sampledData)[station][time][0].x), convertIntToFloat((*sampledData)[station][time][0].y), convertIntToFloat((*sampledData)[station][time][1].x), convertIntToFloat((*sampledData)[station][time][1].y)); float2 sampleX = make_float2(sample.x, sample.y); (*convertedData)[station][0][time] = sampleX; float2 sampleY = make_float2(sample.z, sample.w); (*convertedData)[station][1][time] = sampleY; } } }
85d8d407115f7a1aff9b445c58c6bef6f1b7695d.cu
//# IntToFloat.cu: Convert integer input to float; transpose time and pol dims //# Copyright (C) 2012-2013 ASTRON (Netherlands Institute for Radio Astronomy) //# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands //# //# This file is part of the LOFAR software suite. //# The LOFAR software suite is free software: you can redistribute it and/or //# modify it under the terms of the GNU General Public License as published //# by the Free Software Foundation, either version 3 of the License, or //# (at your option) any later version. //# //# The LOFAR software suite is distributed in the hope that it will be useful, //# but WITHOUT ANY WARRANTY; without even the implied warranty of //# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //# GNU General Public License for more details. //# //# You should have received a copy of the GNU General Public License along //# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. //# //# $Id: IntToFloat.cu 27000 2013-10-17 09:11:13Z loose $ #include "IntToFloat.cuh" //#if NR_BITS_PER_SAMPLE == 4 //typedef char1 SampleType #if NR_BITS_PER_SAMPLE == 8 typedef char2 SampleType; #elif NR_BITS_PER_SAMPLE == 16 typedef short2 SampleType; #else #error unsupported NR_BITS_PER_SAMPLE: must be 4, 8, or 16 #endif typedef SampleType (*SampledDataType) [NR_STATIONS][NR_SAMPLES_PER_SUBBAND][NR_POLARIZATIONS]; typedef float2 (*ConvertedDataType)[NR_STATIONS][NR_POLARIZATIONS][NR_SAMPLES_PER_SUBBAND]; /** * This kernel performs a conversion of the integer valued input to floats and * transposes the data to get per station: first all samples with polX, then polY. * - It supports 8 and 16 bit (char and short) input, which is selectable using * the define NR_BITS_PER_SAMPLE * - In 8 bit mode the converted samples with value -128 are clamped to -127.0f * * @param[out] convertedDataPtr pointer to output data of ConvertedDataType, * a 4D array [station][polarizations][n_samples_subband][complex] * of floats (2 complex polarizations). * @param[in] sampledDataPtr pointer to input data; this can either be a * 4D array [station][n_samples_subband][polarizations][complex] * of shorts or chars, depending on NR_BITS_PER_SAMPLE. * * Required preprocessor symbols: * - NR_SAMPLES_PER_CHANNEL: > 0 * - NR_BITS_PER_SAMPLE: 8 or 16 * * Execution configuration: * - Use a 1D thread block. No restrictions. * - Use a 2D grid dim, where the x dim has 1 block and the y dim represents the * number of stations (i.e. antenna fields). */ extern "C" { __global__ void intToFloat(void *convertedDataPtr, const void *sampledDataPtr) { ConvertedDataType convertedData = (ConvertedDataType)convertedDataPtr; SampledDataType sampledData = (SampledDataType) sampledDataPtr; uint station = blockIdx.y; for (uint time = threadIdx.x; time < NR_SAMPLES_PER_SUBBAND; time += blockDim.x) { float4 sample; sample = make_float4(convertIntToFloat((*sampledData)[station][time][0].x), convertIntToFloat((*sampledData)[station][time][0].y), convertIntToFloat((*sampledData)[station][time][1].x), convertIntToFloat((*sampledData)[station][time][1].y)); float2 sampleX = make_float2(sample.x, sample.y); (*convertedData)[station][0][time] = sampleX; float2 sampleY = make_float2(sample.z, sample.w); (*convertedData)[station][1][time] = sampleY; } } }
201636846aea76fab15b3f701899ceba310f87f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "dz1z5.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include <sys/types.h> #include <fcntl.h> #define NUM_OF_GPU_THREADS 1024 /*---< cluster_par() >-----------------------------------------------------------*/ int cluster_par(int numObjects, /* number of input objects */ int numAttributes, /* size of attribute of each object */ float **attributes, /* [numObjects][numAttributes] */ int num_nclusters, float threshold, /* in: */ float ***cluster_centres /* out: [best_nclusters][numAttributes] */ ) { int nclusters; int *membership; float **tmp_cluster_centres; membership = (int *)malloc(numObjects * sizeof(int)); nclusters = num_nclusters; srand(7); tmp_cluster_centres = kmeans_clustering_par(attributes, numAttributes, numObjects, nclusters, threshold, membership); if (*cluster_centres) { free((*cluster_centres)[0]); free(*cluster_centres); } *cluster_centres = tmp_cluster_centres; free(membership); return 0; } __device__ __inline float cuda_euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans = 0.0; for (i = 0; i < numdims; i++) ans += (pt1[i] - pt2[i]) * (pt1[i] - pt2[i]); return (ans); } __device__ int cuda_find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float *pts, /* [npts][nfeatures] */ int npts) { int index, i; float min_dist = FLT_MAX; /* find the cluster center id with min distance to pt */ for (i = 0; i < npts; i++) { float dist; dist = cuda_euclid_dist_2(pt, pts + i * nfeatures, nfeatures); /* no need square root */ if (dist < min_dist) { min_dist = dist; index = i; } } return (index); } __global__ void updateNewCenters(float* cuda_delta_array, int* cuda_membership, float* cuda_features, int nfeatures, float* cuda_clusters, int nclusters, int* cuda_new_centers_len, float* cuda_new_centers) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; int local_id = threadIdx.x; float delta = 0; int membership = cuda_membership[global_id]; // global mem access int index = cuda_find_nearest_point(cuda_features + global_id * nfeatures, nfeatures, cuda_clusters, nclusters); if (membership != index) { delta = 1; // local delta every thread // send to global delta array } cuda_delta_array[global_id] = delta; cuda_membership[global_id] = index; /*cuda_new_centers_len[index]++; // race condition for (j = 0; j < nfeatures; j++) new_centers[index][j] += feature[id][j];*/ } /*----< kmeans_clustering_par() >---------------------------------------------*/ float **kmeans_clustering_par(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int *membership) /* out: [npoints] */ { int i, j, n = 0, index, loop = 0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float delta; float **clusters; /* out: [nclusters][nfeatures] */ float **new_centers; /* [nclusters][nfeatures] */ /* allocate space for returning variable clusters[] */ clusters = (float **)malloc(nclusters * sizeof(float *)); clusters[0] = (float *)malloc(nclusters * nfeatures * sizeof(float)); for (i = 1; i < nclusters; i++) clusters[i] = clusters[i - 1] + nfeatures; /* randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { //n = (int)rand() % npoints; for (j = 0; j < nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } // INICIJALIZUJE CPU I KOPIRA U GLOBALNU MEMORIJU float * cuda_clusters; hipMalloc(&cuda_clusters, nclusters * nfeatures * sizeof(float)); hipMemcpy(cuda_clusters, clusters[0], nclusters * nfeatures * sizeof(float), hipMemcpyHostToDevice); for (i = 0; i < npoints; i++) membership[i] = -1; int* cuda_membership; hipMalloc(&cuda_membership, npoints * sizeof(int)); hipMemcpy(cuda_membership, membership, npoints * sizeof(int), hipMemcpyHostToDevice); // init cuda features float* cuda_features; hipMalloc(&cuda_features, npoints * nfeatures * sizeof(float)); hipMemcpy(cuda_features, feature[0], npoints * nfeatures * sizeof(float), hipMemcpyHostToDevice); // INICIJALIZUJE CPU /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int *)calloc(nclusters, sizeof(int)); int* cuda_new_centers_len; hipMalloc(&cuda_new_centers_len, nclusters * sizeof(int)); hipMemcpy(cuda_new_centers_len, new_centers_len, nclusters * sizeof(int), hipMemcpyHostToDevice); new_centers = (float **)malloc(nclusters * sizeof(float *)); new_centers[0] = (float *)calloc(nclusters * nfeatures, sizeof(float)); for (i = 1; i < nclusters; i++) new_centers[i] = new_centers[i - 1] + nfeatures; float * cuda_new_centers; hipMalloc(&cuda_new_centers, nclusters * nfeatures * sizeof(float)); hipMemcpy(cuda_new_centers, new_centers[0], nclusters * nfeatures * sizeof(float), hipMemcpyHostToDevice); // create cuda_delta array float* cuda_delta_array; hipMalloc(&cuda_delta_array, npoints * sizeof(float)); // SVAKA NIT KOPIRA SVOJ FEATURE[i] U SHARED MATRICU SHARED_FEATURES[1024][30] // SAMO NITI SA ID-EM 0..NCLUSTERS-1 KOPIRAJU CLUSTERS U SHARED_CLUSTERS dim3 gridDim((npoints + NUM_OF_GPU_THREADS - 1) / NUM_OF_GPU_THREADS); dim3 blockDim(NUM_OF_GPU_THREADS); //do //{ // copy new_centers, new_centers_len, clusters hipLaunchKernelGGL(( updateNewCenters) , dim3(gridDim), dim3(blockDim) , 0, 0, cuda_delta_array, cuda_membership, cuda_features, nfeatures, cuda_clusters, nclusters, cuda_new_centers_len, cuda_new_centers); // // KRAJ KERNELA // // IMAMO REDUKOVANO NEW_CENTER I NEW_CENTERS_LEN KOJI SU U GLOBALNOJ CUDA MEMORIJI. KOPIRAMO IH U CPU MEMORIJU, IZRACUNAMO NOVO I NA OSNOVU // // DELTA ODREDIMO DAL TREBANOVA ITERACIJA. AKO TREBA U CUDA PROSTOR KOPIRAMO CLUSTERS KOJI SMO IZRACUNALI // // reduced new_centers and new_centers_len // hipMemcpy(new_centers_len, cuda_new_centers_len, nclusters * sizeof(int), hipMemcpyDeviceToHost); // hipMemcpy(new_centers[0], cuda_new_centers, nclusters * nfeatures * sizeof(float), hipMemcpyDeviceToHost); // // CPU // /* replace old cluster centers with new_centers */ // /*int xi, xj; // for (xi = 0; xi < nclusters; xi++) // { // for (xj = 0; xj < nfeatures; xj++) // { // if (new_centers_len[xi] > 0) // clusters[xi][xj] = new_centers[xi][xj] / new_centers_len[xi]; // new_centers[xi][xj] = 0.0; /* set back to 0 */ // } // /*new_centers_len[xi] = 0; /* set back to 0 */ // //} // // delta reduction // // deltaReduction<<< gridDim, blockDim >>>(); // //delta /= npoints; //} while (delta > threshold);*/ //free(new_centers[0]); //free(new_centers); //free(new_centers_len); //return clusters;*/ }
201636846aea76fab15b3f701899ceba310f87f7.cu
#include "cuda_runtime.h" #include "dz1z5.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include <math.h> #include <sys/types.h> #include <fcntl.h> #define NUM_OF_GPU_THREADS 1024 /*---< cluster_par() >-----------------------------------------------------------*/ int cluster_par(int numObjects, /* number of input objects */ int numAttributes, /* size of attribute of each object */ float **attributes, /* [numObjects][numAttributes] */ int num_nclusters, float threshold, /* in: */ float ***cluster_centres /* out: [best_nclusters][numAttributes] */ ) { int nclusters; int *membership; float **tmp_cluster_centres; membership = (int *)malloc(numObjects * sizeof(int)); nclusters = num_nclusters; srand(7); tmp_cluster_centres = kmeans_clustering_par(attributes, numAttributes, numObjects, nclusters, threshold, membership); if (*cluster_centres) { free((*cluster_centres)[0]); free(*cluster_centres); } *cluster_centres = tmp_cluster_centres; free(membership); return 0; } __device__ __inline float cuda_euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans = 0.0; for (i = 0; i < numdims; i++) ans += (pt1[i] - pt2[i]) * (pt1[i] - pt2[i]); return (ans); } __device__ int cuda_find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float *pts, /* [npts][nfeatures] */ int npts) { int index, i; float min_dist = FLT_MAX; /* find the cluster center id with min distance to pt */ for (i = 0; i < npts; i++) { float dist; dist = cuda_euclid_dist_2(pt, pts + i * nfeatures, nfeatures); /* no need square root */ if (dist < min_dist) { min_dist = dist; index = i; } } return (index); } __global__ void updateNewCenters(float* cuda_delta_array, int* cuda_membership, float* cuda_features, int nfeatures, float* cuda_clusters, int nclusters, int* cuda_new_centers_len, float* cuda_new_centers) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; int local_id = threadIdx.x; float delta = 0; int membership = cuda_membership[global_id]; // global mem access int index = cuda_find_nearest_point(cuda_features + global_id * nfeatures, nfeatures, cuda_clusters, nclusters); if (membership != index) { delta = 1; // local delta every thread // send to global delta array } cuda_delta_array[global_id] = delta; cuda_membership[global_id] = index; /*cuda_new_centers_len[index]++; // race condition for (j = 0; j < nfeatures; j++) new_centers[index][j] += feature[id][j];*/ } /*----< kmeans_clustering_par() >---------------------------------------------*/ float **kmeans_clustering_par(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int *membership) /* out: [npoints] */ { int i, j, n = 0, index, loop = 0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float delta; float **clusters; /* out: [nclusters][nfeatures] */ float **new_centers; /* [nclusters][nfeatures] */ /* allocate space for returning variable clusters[] */ clusters = (float **)malloc(nclusters * sizeof(float *)); clusters[0] = (float *)malloc(nclusters * nfeatures * sizeof(float)); for (i = 1; i < nclusters; i++) clusters[i] = clusters[i - 1] + nfeatures; /* randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { //n = (int)rand() % npoints; for (j = 0; j < nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } // INICIJALIZUJE CPU I KOPIRA U GLOBALNU MEMORIJU float * cuda_clusters; cudaMalloc(&cuda_clusters, nclusters * nfeatures * sizeof(float)); cudaMemcpy(cuda_clusters, clusters[0], nclusters * nfeatures * sizeof(float), cudaMemcpyHostToDevice); for (i = 0; i < npoints; i++) membership[i] = -1; int* cuda_membership; cudaMalloc(&cuda_membership, npoints * sizeof(int)); cudaMemcpy(cuda_membership, membership, npoints * sizeof(int), cudaMemcpyHostToDevice); // init cuda features float* cuda_features; cudaMalloc(&cuda_features, npoints * nfeatures * sizeof(float)); cudaMemcpy(cuda_features, feature[0], npoints * nfeatures * sizeof(float), cudaMemcpyHostToDevice); // INICIJALIZUJE CPU /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int *)calloc(nclusters, sizeof(int)); int* cuda_new_centers_len; cudaMalloc(&cuda_new_centers_len, nclusters * sizeof(int)); cudaMemcpy(cuda_new_centers_len, new_centers_len, nclusters * sizeof(int), cudaMemcpyHostToDevice); new_centers = (float **)malloc(nclusters * sizeof(float *)); new_centers[0] = (float *)calloc(nclusters * nfeatures, sizeof(float)); for (i = 1; i < nclusters; i++) new_centers[i] = new_centers[i - 1] + nfeatures; float * cuda_new_centers; cudaMalloc(&cuda_new_centers, nclusters * nfeatures * sizeof(float)); cudaMemcpy(cuda_new_centers, new_centers[0], nclusters * nfeatures * sizeof(float), cudaMemcpyHostToDevice); // create cuda_delta array float* cuda_delta_array; cudaMalloc(&cuda_delta_array, npoints * sizeof(float)); // SVAKA NIT KOPIRA SVOJ FEATURE[i] U SHARED MATRICU SHARED_FEATURES[1024][30] // SAMO NITI SA ID-EM 0..NCLUSTERS-1 KOPIRAJU CLUSTERS U SHARED_CLUSTERS dim3 gridDim((npoints + NUM_OF_GPU_THREADS - 1) / NUM_OF_GPU_THREADS); dim3 blockDim(NUM_OF_GPU_THREADS); //do //{ // copy new_centers, new_centers_len, clusters updateNewCenters <<< gridDim, blockDim >>> (cuda_delta_array, cuda_membership, cuda_features, nfeatures, cuda_clusters, nclusters, cuda_new_centers_len, cuda_new_centers); // // KRAJ KERNELA // // IMAMO REDUKOVANO NEW_CENTER I NEW_CENTERS_LEN KOJI SU U GLOBALNOJ CUDA MEMORIJI. KOPIRAMO IH U CPU MEMORIJU, IZRACUNAMO NOVO I NA OSNOVU // // DELTA ODREDIMO DAL TREBANOVA ITERACIJA. AKO TREBA U CUDA PROSTOR KOPIRAMO CLUSTERS KOJI SMO IZRACUNALI // // reduced new_centers and new_centers_len // cudaMemcpy(new_centers_len, cuda_new_centers_len, nclusters * sizeof(int), cudaMemcpyDeviceToHost); // cudaMemcpy(new_centers[0], cuda_new_centers, nclusters * nfeatures * sizeof(float), cudaMemcpyDeviceToHost); // // CPU // /* replace old cluster centers with new_centers */ // /*int xi, xj; // for (xi = 0; xi < nclusters; xi++) // { // for (xj = 0; xj < nfeatures; xj++) // { // if (new_centers_len[xi] > 0) // clusters[xi][xj] = new_centers[xi][xj] / new_centers_len[xi]; // new_centers[xi][xj] = 0.0; /* set back to 0 */ // } // /*new_centers_len[xi] = 0; /* set back to 0 */ // //} // // delta reduction // // deltaReduction<<< gridDim, blockDim >>>(); // //delta /= npoints; //} while (delta > threshold);*/ //free(new_centers[0]); //free(new_centers); //free(new_centers_len); //return clusters;*/ }
22a7a308f68d47acfb9b7ebd597121e59f842906.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <helper_cuda.h> //#include <algorithm> #include <time.h> #include <limits.h> //#define RADIX 4294967296 //#define RADIX 2147483658 #define RADIX 65536 //#define numElements 1048576 #define numElements 30000 #define numIterations 10 #define BLOCKSIZE 128 // countlength/threadsperblock void __global__ d_doPrefix(int* __restrict__ d_count, int countLength, int* __restrict__ d_prefix, int prefixLength) { // printf("do prefix = %d \n", threadIdx.x); int sum = 0; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < prefixLength) { d_prefix[index] = 0; } __syncthreads(); for(int i=index; i>=0; i--) { sum += d_count[i]; } if(index < prefixLength) atomicAdd(d_prefix +index+1, sum); //printf("finished doPrefix \n"); } void __global__ d_doCount(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_count, int countLength, int offset) { //printf("do count \n"); int index = threadIdx.x + blockIdx.x * blockDim.x; //printf("index = %d \n", index); if(index <countLength) { d_count[index] = 0; } __syncthreads(); if(index < unsortedLength) { int numToSort = d_unsorted[index]; numToSort = numToSort >> offset; numToSort = (countLength-1)&(numToSort); //printf("num = %d \n", numToSort); atomicAdd(d_count + numToSort, 1); } //printf("finished count \n"); } /* * d_doReorder: * leftover from an attempt to find a parallel reorder strategy * did not get this working */ /* void __global__ d_doReorder(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; if( index <unsortedLength) { int currentNum; int newIndex; int prefix; //printf(" doReorder index %d \n", index); // shifting and masking currentNum = d_unsorted[index]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; if (currentNum < prefixLength) prefix = d_prefix[currentNum]; //else //prefix = sortedLength; newIndex = index % prefix; //printf("prefix check: prefix = %d masked number = %d, real number = %d, index = %d, newIndex = %d \n", prefix, currentNum, d_unsorted[index], index, newIndex); d_sorted[newIndex] = d_unsorted[index]; //d_unsorted = d_sorted; } } */ /* * d_lazyReorder: * sequential reordering done on the GPU, */ void __global__ d_lazyReorder(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset, int threadCount) { //printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset); //int index = threadIdx.x + blockIdx.x * blockDim.x; int loopMax = ceil((float)unsortedLength/(float)threadCount); int currentNum; int newIndex; if(threadIdx.x < 1) { for (int i =0; i<unsortedLength; i++) { // shifting and masking currentNum = d_unsorted[i]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; newIndex = d_prefix[currentNum]; d_prefix[currentNum]++; d_sorted[newIndex] = d_unsorted[i]; //d_unsorted = d_sorted; } } __syncthreads(); for (int i =0; i<loopMax; i++) { int index = threadIdx.x*loopMax + i; if( index < sortedLength) d_unsorted[index] = d_sorted[index]; } } /* * d_lazyReorderorig: * sequential reordering done on the GPU, */ void __global__ d_lazyReorderorig(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset) { //printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset); int currentNum; int newIndex; for (int i =0; i<unsortedLength; i++) { // shifting and masking currentNum = d_unsorted[i]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; newIndex = d_prefix[currentNum]; d_prefix[currentNum]++; d_sorted[newIndex] = d_unsorted[i]; //d_unsorted = d_sorted; } for (int i =0; i<unsortedLength; i++) { d_unsorted[i] = d_sorted[i]; } }
22a7a308f68d47acfb9b7ebd597121e59f842906.cu
//#include <helper_cuda.h> //#include <algorithm> #include <time.h> #include <limits.h> //#define RADIX 4294967296 //#define RADIX 2147483658 #define RADIX 65536 //#define numElements 1048576 #define numElements 30000 #define numIterations 10 #define BLOCKSIZE 128 // countlength/threadsperblock void __global__ d_doPrefix(int* __restrict__ d_count, int countLength, int* __restrict__ d_prefix, int prefixLength) { // printf("do prefix = %d \n", threadIdx.x); int sum = 0; int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < prefixLength) { d_prefix[index] = 0; } __syncthreads(); for(int i=index; i>=0; i--) { sum += d_count[i]; } if(index < prefixLength) atomicAdd(d_prefix +index+1, sum); //printf("finished doPrefix \n"); } void __global__ d_doCount(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_count, int countLength, int offset) { //printf("do count \n"); int index = threadIdx.x + blockIdx.x * blockDim.x; //printf("index = %d \n", index); if(index <countLength) { d_count[index] = 0; } __syncthreads(); if(index < unsortedLength) { int numToSort = d_unsorted[index]; numToSort = numToSort >> offset; numToSort = (countLength-1)&(numToSort); //printf("num = %d \n", numToSort); atomicAdd(d_count + numToSort, 1); } //printf("finished count \n"); } /* * d_doReorder: * leftover from an attempt to find a parallel reorder strategy * did not get this working */ /* void __global__ d_doReorder(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; if( index <unsortedLength) { int currentNum; int newIndex; int prefix; //printf(" doReorder index %d \n", index); // shifting and masking currentNum = d_unsorted[index]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; if (currentNum < prefixLength) prefix = d_prefix[currentNum]; //else //prefix = sortedLength; newIndex = index % prefix; //printf("prefix check: prefix = %d masked number = %d, real number = %d, index = %d, newIndex = %d \n", prefix, currentNum, d_unsorted[index], index, newIndex); d_sorted[newIndex] = d_unsorted[index]; //d_unsorted = d_sorted; } } */ /* * d_lazyReorder: * sequential reordering done on the GPU, */ void __global__ d_lazyReorder(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset, int threadCount) { //printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset); //int index = threadIdx.x + blockIdx.x * blockDim.x; int loopMax = ceil((float)unsortedLength/(float)threadCount); int currentNum; int newIndex; if(threadIdx.x < 1) { for (int i =0; i<unsortedLength; i++) { // shifting and masking currentNum = d_unsorted[i]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; newIndex = d_prefix[currentNum]; d_prefix[currentNum]++; d_sorted[newIndex] = d_unsorted[i]; //d_unsorted = d_sorted; } } __syncthreads(); for (int i =0; i<loopMax; i++) { int index = threadIdx.x*loopMax + i; if( index < sortedLength) d_unsorted[index] = d_sorted[index]; } } /* * d_lazyReorderorig: * sequential reordering done on the GPU, */ void __global__ d_lazyReorderorig(int* __restrict__ d_unsorted, int unsortedLength, int* __restrict__ d_sorted, int sortedLength, int* __restrict__ d_prefix, int prefixLength, int offset) { //printf("lazy sort prefixlength %d, offset %d \n", prefixLength, offset); int currentNum; int newIndex; for (int i =0; i<unsortedLength; i++) { // shifting and masking currentNum = d_unsorted[i]; currentNum = currentNum >> offset; currentNum = (prefixLength -1) & currentNum; newIndex = d_prefix[currentNum]; d_prefix[currentNum]++; d_sorted[newIndex] = d_unsorted[i]; //d_unsorted = d_sorted; } for (int i =0; i<unsortedLength; i++) { d_unsorted[i] = d_sorted[i]; } }
bf04a79fa76d13f7411de67295ca1444f8bdf46c.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cmath> #include <cstdio> #include <iostream> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <hip/hip_runtime_api.h> #include "cutil.h" #include <boost/numeric/ublas/matrix.hpp> #include <boost/numeric/ublas/matrix_proxy.hpp> #include <boost/numeric/ublas/io.hpp> using namespace boost::numeric::ublas; // Simple utility function to check for CUDA runtime errors void checkCUDAerror(const char* msg); //device code __global__ void doCalc(float* gamma1, float* gamma2, float* dist, float* ang, float* output) { //does all the i's simultaneously - one for each thread int idx = blockDim.x * blockIdx.x + threadIdx.x; // each thread has the gamma1 and gamma2 values for one point in the matrix // also the distance of this point from the point it's contributing to // and the angle from this point. These could be calculated on the GPU, // but are not. Because it's easier to pass the info. Might be quicker to calc tho. //this is a rather complicated calculation. float thetaMax = 32; //chould be passed to fn float xc = 0.15; // could be passed to fn float x = dist[idx] / thetaMax; float Q = (1.0 / (1 + exp(6 - 150*x) + exp(-47 + 50*x))) * (tanh(x/xc) / (x/xc)); //now for tangential component of shear. float gammaTan = gamma1[idx]*cos(ang[idx]) + gamma2[idx]*cos(ang[idx]+45); output[idx] = Q*gammaTan; } double execute_kernel_gpu(matrix<float> this_gamma1, matrix<float> this_gamma2){ /// what am I going to do? // This func has been called for one point in the 1024x1024 grid // I have the 64x64 matrix of point surrounding this space for g1 and g2 // I need to go over all of these points, take g1 and g2 for that point // and make the calc of the contribution of that point to the total // then I can sum them all together at the end and return the variable //start with the mem allocation int ncalc = 64*64; size_t sizeneeded = ncalc*sizeof(float); float *h_gamma1 = 0; float *h_gamma2 = 0; float *h_dist = 0; float *h_ang = 0; h_gamma1 = (float*) malloc(sizeneeded); h_gamma2 = (float*) malloc(sizeneeded); h_dist = (float*) malloc(sizeneeded); h_ang = (float*) malloc(sizeneeded); //convert the matrices to vectors. GPU can't handle matrices (in this format). int idx=0; for(int i=0;i<64;i++){ for(int j=0;j<64;j++){ idx = 64*i + j; h_gamma1[idx] = this_gamma1(i,j); h_gamma2[idx] = this_gamma2(i,j); //central point is 32,32 h_dist[idx] = sqrt(fabs(32-i)*fabs(32-i) + fabs(32-j)*fabs(32-j)); h_ang[idx] = atan( fabs(32-i)/fabs(32-j)); // this is in radians } } //allocate device memory float *d_gamma1, *d_gamma2, *d_dist, *d_ang; hipMalloc(&d_gamma1, sizeneeded); hipMalloc(&d_gamma2, sizeneeded); hipMalloc(&d_dist, sizeneeded); hipMalloc(&d_ang, sizeneeded); // output vector is going to be the calculated value for each point float *h_output, *d_output; h_output = (float*)malloc(sizeneeded); hipMalloc(&d_output, sizeneeded); //copy vectors from host to device memory hipMemcpy(d_gamma1, h_gamma1, sizeneeded, hipMemcpyHostToDevice); hipMemcpy(d_gamma2, h_gamma2, sizeneeded, hipMemcpyHostToDevice); hipMemcpy(d_dist, h_dist, sizeneeded, hipMemcpyHostToDevice); hipMemcpy(d_ang, h_ang, sizeneeded, hipMemcpyHostToDevice); hipMemcpy(d_output, h_output, sizeneeded, hipMemcpyHostToDevice); //check memory is alright if (0==h_gamma1 || 0==h_gamma2 || 0==h_dist || 0==h_ang || 0==h_output) printf("can't allocate memory on host \n"); if (0==d_gamma1 || 0==d_gamma2 || 0==d_dist || 0==d_ang || 0==d_output) printf("can't allocate memory on device \n"); checkCUDAerror("memory"); //kernel info - note 512 thread per block max! int threadsPerBlock = 512; int blocksPerGrid = 8; // need 64*64 threads total hipLaunchKernelGGL(( doCalc), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_gamma1, d_gamma2, d_dist, d_ang, d_output); checkCUDAerror("kernel"); //get the output back off the device hipMemcpy(h_output, d_output, sizeneeded, hipMemcpyDeviceToHost); // now sum this up. there is surely a better way to do this.. double thesum = 0; for(int i=0;i<ncalc;i++){ if(isnan(h_output[i])) continue;//goes wierd at some point thesum +=h_output[i]; } //free up the memory hipFree(d_gamma1); hipFree(d_gamma2); hipFree(d_dist); hipFree(d_ang); hipFree(d_output); free(h_gamma1); free(h_gamma2); free(h_dist); free(h_ang); free(h_output); // science note: I have to normalise by n points I summed over. return thesum/ncalc; } //simple function to check for errors. From Dr Dobbs. void checkCUDAerror(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
bf04a79fa76d13f7411de67295ca1444f8bdf46c.cu
#include <cstdlib> #include <cmath> #include <cstdio> #include <iostream> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <cuda_runtime_api.h> #include "cutil.h" #include <boost/numeric/ublas/matrix.hpp> #include <boost/numeric/ublas/matrix_proxy.hpp> #include <boost/numeric/ublas/io.hpp> using namespace boost::numeric::ublas; // Simple utility function to check for CUDA runtime errors void checkCUDAerror(const char* msg); //device code __global__ void doCalc(float* gamma1, float* gamma2, float* dist, float* ang, float* output) { //does all the i's simultaneously - one for each thread int idx = blockDim.x * blockIdx.x + threadIdx.x; // each thread has the gamma1 and gamma2 values for one point in the matrix // also the distance of this point from the point it's contributing to // and the angle from this point. These could be calculated on the GPU, // but are not. Because it's easier to pass the info. Might be quicker to calc tho. //this is a rather complicated calculation. float thetaMax = 32; //chould be passed to fn float xc = 0.15; // could be passed to fn float x = dist[idx] / thetaMax; float Q = (1.0 / (1 + exp(6 - 150*x) + exp(-47 + 50*x))) * (tanh(x/xc) / (x/xc)); //now for tangential component of shear. float gammaTan = gamma1[idx]*cos(ang[idx]) + gamma2[idx]*cos(ang[idx]+45); output[idx] = Q*gammaTan; } double execute_kernel_gpu(matrix<float> this_gamma1, matrix<float> this_gamma2){ /// what am I going to do? // This func has been called for one point in the 1024x1024 grid // I have the 64x64 matrix of point surrounding this space for g1 and g2 // I need to go over all of these points, take g1 and g2 for that point // and make the calc of the contribution of that point to the total // then I can sum them all together at the end and return the variable //start with the mem allocation int ncalc = 64*64; size_t sizeneeded = ncalc*sizeof(float); float *h_gamma1 = 0; float *h_gamma2 = 0; float *h_dist = 0; float *h_ang = 0; h_gamma1 = (float*) malloc(sizeneeded); h_gamma2 = (float*) malloc(sizeneeded); h_dist = (float*) malloc(sizeneeded); h_ang = (float*) malloc(sizeneeded); //convert the matrices to vectors. GPU can't handle matrices (in this format). int idx=0; for(int i=0;i<64;i++){ for(int j=0;j<64;j++){ idx = 64*i + j; h_gamma1[idx] = this_gamma1(i,j); h_gamma2[idx] = this_gamma2(i,j); //central point is 32,32 h_dist[idx] = sqrt(fabs(32-i)*fabs(32-i) + fabs(32-j)*fabs(32-j)); h_ang[idx] = atan( fabs(32-i)/fabs(32-j)); // this is in radians } } //allocate device memory float *d_gamma1, *d_gamma2, *d_dist, *d_ang; cudaMalloc(&d_gamma1, sizeneeded); cudaMalloc(&d_gamma2, sizeneeded); cudaMalloc(&d_dist, sizeneeded); cudaMalloc(&d_ang, sizeneeded); // output vector is going to be the calculated value for each point float *h_output, *d_output; h_output = (float*)malloc(sizeneeded); cudaMalloc(&d_output, sizeneeded); //copy vectors from host to device memory cudaMemcpy(d_gamma1, h_gamma1, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_gamma2, h_gamma2, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_dist, h_dist, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_ang, h_ang, sizeneeded, cudaMemcpyHostToDevice); cudaMemcpy(d_output, h_output, sizeneeded, cudaMemcpyHostToDevice); //check memory is alright if (0==h_gamma1 || 0==h_gamma2 || 0==h_dist || 0==h_ang || 0==h_output) printf("can't allocate memory on host \n"); if (0==d_gamma1 || 0==d_gamma2 || 0==d_dist || 0==d_ang || 0==d_output) printf("can't allocate memory on device \n"); checkCUDAerror("memory"); //kernel info - note 512 thread per block max! int threadsPerBlock = 512; int blocksPerGrid = 8; // need 64*64 threads total doCalc<<<blocksPerGrid, threadsPerBlock>>>(d_gamma1, d_gamma2, d_dist, d_ang, d_output); checkCUDAerror("kernel"); //get the output back off the device cudaMemcpy(h_output, d_output, sizeneeded, cudaMemcpyDeviceToHost); // now sum this up. there is surely a better way to do this.. double thesum = 0; for(int i=0;i<ncalc;i++){ if(isnan(h_output[i])) continue;//goes wierd at some point thesum +=h_output[i]; } //free up the memory cudaFree(d_gamma1); cudaFree(d_gamma2); cudaFree(d_dist); cudaFree(d_ang); cudaFree(d_output); free(h_gamma1); free(h_gamma2); free(h_dist); free(h_ang); free(h_output); // science note: I have to normalise by n points I summed over. return thesum/ncalc; } //simple function to check for errors. From Dr Dobbs. void checkCUDAerror(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
1d5e5927736564ae4d1db76a3ed6d6a19115781a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "projektcuda.h" #include "project_comm.h" //#include "mex.h" /* Kernel to square elements of the array on the GPU */ /* create in 28.11.2009 It should be runging only in one block. separate vectors in parts in length of VECTOR_BLOCK_SIZE */ /* N size of Vector */ __global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx > N) return; if(idx == 0)out[idx] = 0; __syncthreads(); //block index int blockRow = blockIdx.x; // thread index int row = threadIdx.x; //int aBegin = blockRow*VECTOR_BLOCK_SIZE; //int aEnd = aBegin + VECTOR_BLOCK_SIZE - 1; //working only in one block int aBegin = 0; int aEnd = N; int aStep = VECTOR_BLOCK_SIZE; // // comupted by the thread t_ve outValue = 0; //for (int a = aBegin;(a <= aEnd)&&(a <= N);a += aStep){ for (int a = aBegin;a <= aEnd;a += aStep){ // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[VECTOR_BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[VECTOR_BLOCK_SIZE]; __shared__ float Cs[VECTOR_BLOCK_SIZE]; Cs[row] = 0; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(row) = in1[a + row]; BS(row) = in2[a + row]; // Synchronize to make sure the matrices are loaded __syncthreads(); Cs[row] = AS(row) * BS(row); /* // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(k < N); ++k) //for (int k = 0; (k < VECTOR_BLOCK_SIZE); ++k) outValue += AS(k) * BS(k); */ // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); // computing summe in one thread for one Loop if (row == 0) { //for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(idx < N); k++) //out[blockIdx.x] += Cs[k]; for (int k = 1; (k < VECTOR_BLOCK_SIZE)&&(idx < N); k++){ Cs[0]+= Cs[k]; } outValue += Cs[0] //outValue += 1; } __syncthreads(); } //__syncthreads(); //if(idx==0){ // for(int k = 1; k <= gridDim.x; k++)out[0] += out[k]; //} out[0] = outValue; }
1d5e5927736564ae4d1db76a3ed6d6a19115781a.cu
#include "cuda.h" #include <stdio.h> #include "projektcuda.h" #include "project_comm.h" //#include "mex.h" /* Kernel to square elements of the array on the GPU */ /* create in 28.11.2009 It should be runging only in one block. separate vectors in parts in length of VECTOR_BLOCK_SIZE */ /* N size of Vector */ __global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx > N) return; if(idx == 0)out[idx] = 0; __syncthreads(); //block index int blockRow = blockIdx.x; // thread index int row = threadIdx.x; //int aBegin = blockRow*VECTOR_BLOCK_SIZE; //int aEnd = aBegin + VECTOR_BLOCK_SIZE - 1; //working only in one block int aBegin = 0; int aEnd = N; int aStep = VECTOR_BLOCK_SIZE; // // comupted by the thread t_ve outValue = 0; //for (int a = aBegin;(a <= aEnd)&&(a <= N);a += aStep){ for (int a = aBegin;a <= aEnd;a += aStep){ // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[VECTOR_BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[VECTOR_BLOCK_SIZE]; __shared__ float Cs[VECTOR_BLOCK_SIZE]; Cs[row] = 0; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(row) = in1[a + row]; BS(row) = in2[a + row]; // Synchronize to make sure the matrices are loaded __syncthreads(); Cs[row] = AS(row) * BS(row); /* // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(k < N); ++k) //for (int k = 0; (k < VECTOR_BLOCK_SIZE); ++k) outValue += AS(k) * BS(k); */ // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); // computing summe in one thread for one Loop if (row == 0) { //for (int k = 0; (k < VECTOR_BLOCK_SIZE)&&(idx < N); k++) //out[blockIdx.x] += Cs[k]; for (int k = 1; (k < VECTOR_BLOCK_SIZE)&&(idx < N); k++){ Cs[0]+= Cs[k]; } outValue += Cs[0] //outValue += 1; } __syncthreads(); } //__syncthreads(); //if(idx==0){ // for(int k = 1; k <= gridDim.x; k++)out[0] += out[k]; //} out[0] = outValue; }
711adc7b71484f493125a138e95045666b5e3dc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <accelerate_cuda.h> static TexInt32 arrIn0_0; extern "C" __global__ void scanl(const Int64 shIn0_0, const Int64 shOut_0, Int32* __restrict__ arrOut_0, const Int64 shBlk_0, Int32* __restrict__ arrBlk_0, Int32* __restrict__ arrSum_0) { extern volatile __shared__ Int32 sdata0[]; Int32 x0; Int32 y0; Int32 z0; const Int64 sh0 = shIn0_0; const int shapeSize = sh0; const int intervalSize = (shapeSize + gridDim.x - 1) / gridDim.x; int carryIn = 0; if (threadIdx.x == 0) { if (gridDim.x > 1) { z0 = arrBlk_0[blockIdx.x]; } else { z0 = (Int32) 0; } } const int start = blockIdx.x * intervalSize; const int end = min(start + intervalSize, shapeSize); const int numElements = end - start; int seg; for (seg = threadIdx.x; seg < numElements; seg += blockDim.x) { const int ix = start + seg; x0 = indexArray(arrIn0_0, ix); if (threadIdx.x == 0) { x0 = z0 + x0; } sdata0[threadIdx.x] = x0; __syncthreads(); if (blockDim.x > 1) { if (threadIdx.x >= 1) { y0 = sdata0[threadIdx.x - 1]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 2) { if (threadIdx.x >= 2) { y0 = sdata0[threadIdx.x - 2]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 4) { if (threadIdx.x >= 4) { y0 = sdata0[threadIdx.x - 4]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 8) { if (threadIdx.x >= 8) { y0 = sdata0[threadIdx.x - 8]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 16) { if (threadIdx.x >= 16) { y0 = sdata0[threadIdx.x - 16]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 32) { if (threadIdx.x >= 32) { y0 = sdata0[threadIdx.x - 32]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 64) { if (threadIdx.x >= 64) { y0 = sdata0[threadIdx.x - 64]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 128) { if (threadIdx.x >= 128) { y0 = sdata0[threadIdx.x - 128]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 256) { if (threadIdx.x >= 256) { y0 = sdata0[threadIdx.x - 256]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 512) { if (threadIdx.x >= 512) { y0 = sdata0[threadIdx.x - 512]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (1) { if (threadIdx.x == 0) { x0 = z0; } else { x0 = sdata0[threadIdx.x - 1]; } } arrOut_0[ix] = x0; if (threadIdx.x == 0) { const int last = min(numElements - seg, blockDim.x) - 1; z0 = sdata0[last]; } } if (threadIdx.x == 0 && blockIdx.x == gridDim.x - 1) { arrSum_0[0] = z0; } }
711adc7b71484f493125a138e95045666b5e3dc6.cu
#include <accelerate_cuda.h> static TexInt32 arrIn0_0; extern "C" __global__ void scanl(const Int64 shIn0_0, const Int64 shOut_0, Int32* __restrict__ arrOut_0, const Int64 shBlk_0, Int32* __restrict__ arrBlk_0, Int32* __restrict__ arrSum_0) { extern volatile __shared__ Int32 sdata0[]; Int32 x0; Int32 y0; Int32 z0; const Int64 sh0 = shIn0_0; const int shapeSize = sh0; const int intervalSize = (shapeSize + gridDim.x - 1) / gridDim.x; int carryIn = 0; if (threadIdx.x == 0) { if (gridDim.x > 1) { z0 = arrBlk_0[blockIdx.x]; } else { z0 = (Int32) 0; } } const int start = blockIdx.x * intervalSize; const int end = min(start + intervalSize, shapeSize); const int numElements = end - start; int seg; for (seg = threadIdx.x; seg < numElements; seg += blockDim.x) { const int ix = start + seg; x0 = indexArray(arrIn0_0, ix); if (threadIdx.x == 0) { x0 = z0 + x0; } sdata0[threadIdx.x] = x0; __syncthreads(); if (blockDim.x > 1) { if (threadIdx.x >= 1) { y0 = sdata0[threadIdx.x - 1]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 2) { if (threadIdx.x >= 2) { y0 = sdata0[threadIdx.x - 2]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 4) { if (threadIdx.x >= 4) { y0 = sdata0[threadIdx.x - 4]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 8) { if (threadIdx.x >= 8) { y0 = sdata0[threadIdx.x - 8]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 16) { if (threadIdx.x >= 16) { y0 = sdata0[threadIdx.x - 16]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 32) { if (threadIdx.x >= 32) { y0 = sdata0[threadIdx.x - 32]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 64) { if (threadIdx.x >= 64) { y0 = sdata0[threadIdx.x - 64]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 128) { if (threadIdx.x >= 128) { y0 = sdata0[threadIdx.x - 128]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 256) { if (threadIdx.x >= 256) { y0 = sdata0[threadIdx.x - 256]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (blockDim.x > 512) { if (threadIdx.x >= 512) { y0 = sdata0[threadIdx.x - 512]; x0 = y0 + x0; } __syncthreads(); sdata0[threadIdx.x] = x0; __syncthreads(); } if (1) { if (threadIdx.x == 0) { x0 = z0; } else { x0 = sdata0[threadIdx.x - 1]; } } arrOut_0[ix] = x0; if (threadIdx.x == 0) { const int last = min(numElements - seg, blockDim.x) - 1; z0 = sdata0[last]; } } if (threadIdx.x == 0 && blockIdx.x == gridDim.x - 1) { arrSum_0[0] = z0; } }
033537bc7f7900d594637cea8ce275b4fcce7a57.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/datasets/make_regression.hpp> #include <random/make_regression.cuh> namespace ML { namespace Datasets { template <typename DataT, typename IdxT> void make_regression_helper(const raft::handle_t& handle, DataT* out, DataT* values, IdxT n_rows, IdxT n_cols, IdxT n_informative, DataT* coef, IdxT n_targets, DataT bias, IdxT effective_rank, DataT tail_strength, DataT noise, bool shuffle, uint64_t seed) { const auto& handle_impl = handle; hipStream_t stream = handle_impl.get_stream(); hipblasHandle_t cublas_handle = handle_impl.get_cublas_handle(); hipsolverDnHandle_t cusolver_handle = handle_impl.get_cusolver_dn_handle(); auto allocator = handle_impl.get_device_allocator(); MLCommon::Random::make_regression( handle, out, values, n_rows, n_cols, n_informative, stream, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, float* out, float* values, int64_t n_rows, int64_t n_cols, int64_t n_informative, float* coef, int64_t n_targets, float bias, int64_t effective_rank, float tail_strength, float noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, double* out, double* values, int64_t n_rows, int64_t n_cols, int64_t n_informative, double* coef, int64_t n_targets, double bias, int64_t effective_rank, double tail_strength, double noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, float* out, float* values, int n_rows, int n_cols, int n_informative, float* coef, int n_targets, float bias, int effective_rank, float tail_strength, float noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, double* out, double* values, int n_rows, int n_cols, int n_informative, double* coef, int n_targets, double bias, int effective_rank, double tail_strength, double noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } } // namespace Datasets } // namespace ML
033537bc7f7900d594637cea8ce275b4fcce7a57.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/datasets/make_regression.hpp> #include <random/make_regression.cuh> namespace ML { namespace Datasets { template <typename DataT, typename IdxT> void make_regression_helper(const raft::handle_t& handle, DataT* out, DataT* values, IdxT n_rows, IdxT n_cols, IdxT n_informative, DataT* coef, IdxT n_targets, DataT bias, IdxT effective_rank, DataT tail_strength, DataT noise, bool shuffle, uint64_t seed) { const auto& handle_impl = handle; cudaStream_t stream = handle_impl.get_stream(); cublasHandle_t cublas_handle = handle_impl.get_cublas_handle(); cusolverDnHandle_t cusolver_handle = handle_impl.get_cusolver_dn_handle(); auto allocator = handle_impl.get_device_allocator(); MLCommon::Random::make_regression( handle, out, values, n_rows, n_cols, n_informative, stream, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, float* out, float* values, int64_t n_rows, int64_t n_cols, int64_t n_informative, float* coef, int64_t n_targets, float bias, int64_t effective_rank, float tail_strength, float noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, double* out, double* values, int64_t n_rows, int64_t n_cols, int64_t n_informative, double* coef, int64_t n_targets, double bias, int64_t effective_rank, double tail_strength, double noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, float* out, float* values, int n_rows, int n_cols, int n_informative, float* coef, int n_targets, float bias, int effective_rank, float tail_strength, float noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } void make_regression(const raft::handle_t& handle, double* out, double* values, int n_rows, int n_cols, int n_informative, double* coef, int n_targets, double bias, int effective_rank, double tail_strength, double noise, bool shuffle, uint64_t seed) { make_regression_helper(handle, out, values, n_rows, n_cols, n_informative, coef, n_targets, bias, effective_rank, tail_strength, noise, shuffle, seed); } } // namespace Datasets } // namespace ML
0cb660d0de5264ebc78624f9bee63469b7c9b4f9.hip
// !!! This is a file automatically generated by hipify!!! #include "sphere_hip.cuh" __device__ bool Sphere::hit(const Ray& r, float t_min, float t_max, HitRecord& rec) const { Vec3 oc = r.origin() - center_; float a = dot(r.direction(), r.direction()); float b = dot(oc, r.direction()); float c = dot(oc, oc) - radius_ * radius_; float discriminant = b * b - a * c; if (discriminant > 0) { float tmp = (-b - sqrt(discriminant)) / a; // if(t_min < tmp && tmp < t_max) { if (tmp < t_max && tmp > t_min) { rec.t = tmp; rec.p = r.point_at_parameter(rec.t); rec.normal = (rec.p - center_) / radius_; rec.mat_ptr = mat_ptr_; return true; } tmp = (-b + sqrt(discriminant)) / a; if (tmp < t_max && tmp > t_min) { rec.t = tmp; rec.p = r.point_at_parameter(rec.t); rec.normal = (rec.p - center_) / radius_; rec.mat_ptr = mat_ptr_; return true; } } return false; }
0cb660d0de5264ebc78624f9bee63469b7c9b4f9.cu
#include "sphere.cuh" __device__ bool Sphere::hit(const Ray& r, float t_min, float t_max, HitRecord& rec) const { Vec3 oc = r.origin() - center_; float a = dot(r.direction(), r.direction()); float b = dot(oc, r.direction()); float c = dot(oc, oc) - radius_ * radius_; float discriminant = b * b - a * c; if (discriminant > 0) { float tmp = (-b - sqrt(discriminant)) / a; // if(t_min < tmp && tmp < t_max) { if (tmp < t_max && tmp > t_min) { rec.t = tmp; rec.p = r.point_at_parameter(rec.t); rec.normal = (rec.p - center_) / radius_; rec.mat_ptr = mat_ptr_; return true; } tmp = (-b + sqrt(discriminant)) / a; if (tmp < t_max && tmp > t_min) { rec.t = tmp; rec.p = r.point_at_parameter(rec.t); rec.normal = (rec.p - center_) / radius_; rec.mat_ptr = mat_ptr_; return true; } } return false; }
7469b19d455997a95847586bb68e86bd377de8bb.hip
// !!! This is a file automatically generated by hipify!!! #ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define LIMIT -999 #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/time.h> #include <hip/hip_runtime.h> // kernel #define SCORE(i, j) input_itemsets_l[j + i * (BLOCK_SIZE+1)] #define REF(i, j) reference_l[j + i * BLOCK_SIZE] __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } //global variables int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; // local variables void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); fprintf(stderr, "\t<file> - filename\n"); exit(1); } double get_time() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } __global__ void kernel1 (int* d_input_itemsets, const int* d_reference, const int offset_r, const int offset_c, const int max_cols, const int blk, const int penalty) { __shared__ int input_itemsets_l [(BLOCK_SIZE + 1) *(BLOCK_SIZE+1)]; __shared__ int reference_l [BLOCK_SIZE*BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // Base elements int base = offset_r * max_cols + offset_c; int b_index_x = bx; int b_index_y = blk - 1 - bx; int index = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( max_cols + 1 ); int index_n = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( max_cols ); int index_nw = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; if (tx == 0) SCORE(tx, 0) = d_input_itemsets[index_nw + tx]; __syncthreads(); for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) { REF(ty, tx) = d_reference[index + max_cols * ty]; } __syncthreads(); SCORE((tx + 1), 0) = d_input_itemsets[index_w + max_cols * tx]; __syncthreads(); SCORE(0, (tx + 1)) = d_input_itemsets[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } __syncthreads(); for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) { d_input_itemsets[index + max_cols * ty] = SCORE((ty+1), (tx+1)); } } __global__ void kernel2 (int* d_input_itemsets, const int* d_reference, const int block_width, const int offset_r, const int offset_c, const int max_cols, const int blk, const int penalty) { __shared__ int input_itemsets_l [(BLOCK_SIZE + 1) *(BLOCK_SIZE+1)]; __shared__ int reference_l [BLOCK_SIZE*BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // Base elements int base = offset_r * max_cols + offset_c; int b_index_x = bx + block_width - blk ; int b_index_y = block_width - bx -1; int index = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( max_cols + 1 ); int index_n = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( max_cols ); int index_nw = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; if (tx == 0) SCORE(tx, 0) = d_input_itemsets[index_nw]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) REF(ty, tx) = d_reference[index + max_cols * ty]; __syncthreads(); SCORE((tx + 1), 0) = d_input_itemsets[index_w + max_cols * tx]; __syncthreads(); SCORE(0, (tx + 1)) = d_input_itemsets[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) d_input_itemsets[index + ty * max_cols] = SCORE((ty+1), (tx+1)); } int main(int argc, char **argv){ printf("WG size of kernel = %d \n", BLOCK_SIZE); int max_rows_t, max_cols_t, penalty_t; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 3) { max_rows_t = atoi(argv[1]); max_cols_t = atoi(argv[1]); penalty_t = atoi(argv[2]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } // make constant variable to avoid kernel argument set at every loop iteration const int max_rows = max_rows_t + 1; const int max_cols = max_cols_t + 1; const int penalty = penalty_t; int *reference; int *input_itemsets; int *output_itemsets; reference = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); srand(7); //initialization for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } for( int i=1; i< max_rows ; i++){ //initialize the cols input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //initialize the rows input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ reference[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; double offload_start = get_time(); int workgroupsize = BLOCK_SIZE; #ifdef DEBUG if(workgroupsize < 0){ printf("ERROR: invalid or missing <num_work_items>[/<work_group_size>]\n"); return -1; } #endif // set global and local workitems const size_t local_work = (size_t)workgroupsize; size_t global_work; const int worksize = max_cols - 1; #ifdef DEBUG printf("worksize = %d\n", worksize); #endif //these two parameters are for extension use, don't worry about it. const int offset_r = 0; const int offset_c = 0; const int block_width = worksize/BLOCK_SIZE ; int *d_input_itemsets; int *d_reference; hipMalloc((void**)&d_input_itemsets, max_cols * max_rows * sizeof(int)); hipMalloc((void**)&d_reference, max_cols * max_rows * sizeof(int)); hipMemcpy(d_input_itemsets, input_itemsets, max_cols * max_rows * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_reference, reference, max_cols * max_rows * sizeof(int), hipMemcpyHostToDevice); #ifdef DEBUG printf("Processing upper-left matrix\n"); #endif for( int blk = 1 ; blk <= block_width ; blk++){ global_work = blk; hipLaunchKernelGGL(( kernel1), dim3(global_work), dim3(local_work), 0, 0, d_input_itemsets, d_reference, offset_r, offset_c, max_cols, blk, penalty); } #ifdef DEBUG printf("Processing lower-right matrix\n"); #endif for( int blk = block_width - 1 ; blk >= 1 ; blk--){ global_work = blk; hipLaunchKernelGGL(( kernel2), dim3(global_work), dim3(local_work), 0, 0, d_input_itemsets, d_reference, block_width, offset_r, offset_c, max_cols, blk, penalty); } hipMemcpy(output_itemsets, d_input_itemsets, max_cols * max_rows * sizeof(int), hipMemcpyDeviceToHost); double offload_end = get_time(); printf("Device offloading time = %lf(s)\n", offload_end - offload_start); #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + reference[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); #endif //printf("Computation Done\n"); free(reference); free(input_itemsets); free(output_itemsets); hipFree(d_input_itemsets); hipFree(d_reference); return 0; }
7469b19d455997a95847586bb68e86bd377de8bb.cu
#ifdef RD_WG_SIZE_0_0 #define BLOCK_SIZE RD_WG_SIZE_0_0 #elif defined(RD_WG_SIZE_0) #define BLOCK_SIZE RD_WG_SIZE_0 #elif defined(RD_WG_SIZE) #define BLOCK_SIZE RD_WG_SIZE #else #define BLOCK_SIZE 16 #endif #define LIMIT -999 #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/time.h> #include <cuda.h> // kernel #define SCORE(i, j) input_itemsets_l[j + i * (BLOCK_SIZE+1)] #define REF(i, j) reference_l[j + i * BLOCK_SIZE] __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } //global variables int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; // local variables void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); fprintf(stderr, "\t<file> - filename\n"); exit(1); } double get_time() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } __global__ void kernel1 (int* d_input_itemsets, const int* d_reference, const int offset_r, const int offset_c, const int max_cols, const int blk, const int penalty) { __shared__ int input_itemsets_l [(BLOCK_SIZE + 1) *(BLOCK_SIZE+1)]; __shared__ int reference_l [BLOCK_SIZE*BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // Base elements int base = offset_r * max_cols + offset_c; int b_index_x = bx; int b_index_y = blk - 1 - bx; int index = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( max_cols + 1 ); int index_n = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( max_cols ); int index_nw = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; if (tx == 0) SCORE(tx, 0) = d_input_itemsets[index_nw + tx]; __syncthreads(); for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) { REF(ty, tx) = d_reference[index + max_cols * ty]; } __syncthreads(); SCORE((tx + 1), 0) = d_input_itemsets[index_w + max_cols * tx]; __syncthreads(); SCORE(0, (tx + 1)) = d_input_itemsets[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } __syncthreads(); for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) { d_input_itemsets[index + max_cols * ty] = SCORE((ty+1), (tx+1)); } } __global__ void kernel2 (int* d_input_itemsets, const int* d_reference, const int block_width, const int offset_r, const int offset_c, const int max_cols, const int blk, const int penalty) { __shared__ int input_itemsets_l [(BLOCK_SIZE + 1) *(BLOCK_SIZE+1)]; __shared__ int reference_l [BLOCK_SIZE*BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // Base elements int base = offset_r * max_cols + offset_c; int b_index_x = bx + block_width - blk ; int b_index_y = block_width - bx -1; int index = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( max_cols + 1 ); int index_n = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( max_cols ); int index_nw = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; if (tx == 0) SCORE(tx, 0) = d_input_itemsets[index_nw]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) REF(ty, tx) = d_reference[index + max_cols * ty]; __syncthreads(); SCORE((tx + 1), 0) = d_input_itemsets[index_w + max_cols * tx]; __syncthreads(); SCORE(0, (tx + 1)) = d_input_itemsets[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) d_input_itemsets[index + ty * max_cols] = SCORE((ty+1), (tx+1)); } int main(int argc, char **argv){ printf("WG size of kernel = %d \n", BLOCK_SIZE); int max_rows_t, max_cols_t, penalty_t; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 3) { max_rows_t = atoi(argv[1]); max_cols_t = atoi(argv[1]); penalty_t = atoi(argv[2]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } // make constant variable to avoid kernel argument set at every loop iteration const int max_rows = max_rows_t + 1; const int max_cols = max_cols_t + 1; const int penalty = penalty_t; int *reference; int *input_itemsets; int *output_itemsets; reference = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); srand(7); //initialization for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } for( int i=1; i< max_rows ; i++){ //initialize the cols input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //initialize the rows input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ reference[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; double offload_start = get_time(); int workgroupsize = BLOCK_SIZE; #ifdef DEBUG if(workgroupsize < 0){ printf("ERROR: invalid or missing <num_work_items>[/<work_group_size>]\n"); return -1; } #endif // set global and local workitems const size_t local_work = (size_t)workgroupsize; size_t global_work; const int worksize = max_cols - 1; #ifdef DEBUG printf("worksize = %d\n", worksize); #endif //these two parameters are for extension use, don't worry about it. const int offset_r = 0; const int offset_c = 0; const int block_width = worksize/BLOCK_SIZE ; int *d_input_itemsets; int *d_reference; cudaMalloc((void**)&d_input_itemsets, max_cols * max_rows * sizeof(int)); cudaMalloc((void**)&d_reference, max_cols * max_rows * sizeof(int)); cudaMemcpy(d_input_itemsets, input_itemsets, max_cols * max_rows * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_reference, reference, max_cols * max_rows * sizeof(int), cudaMemcpyHostToDevice); #ifdef DEBUG printf("Processing upper-left matrix\n"); #endif for( int blk = 1 ; blk <= block_width ; blk++){ global_work = blk; kernel1<<<global_work, local_work>>>(d_input_itemsets, d_reference, offset_r, offset_c, max_cols, blk, penalty); } #ifdef DEBUG printf("Processing lower-right matrix\n"); #endif for( int blk = block_width - 1 ; blk >= 1 ; blk--){ global_work = blk; kernel2<<<global_work, local_work>>>(d_input_itemsets, d_reference, block_width, offset_r, offset_c, max_cols, blk, penalty); } cudaMemcpy(output_itemsets, d_input_itemsets, max_cols * max_rows * sizeof(int), cudaMemcpyDeviceToHost); double offload_end = get_time(); printf("Device offloading time = %lf(s)\n", offload_end - offload_start); #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } //traceback = maximum(nw, w, n); int new_nw, new_w, new_n; new_nw = nw + reference[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); #endif //printf("Computation Done\n"); free(reference); free(input_itemsets); free(output_itemsets); cudaFree(d_input_itemsets); cudaFree(d_reference); return 0; }
ecb5a243067add8b72fe67ec4d9ca94f9968f734.hip
// !!! This is a file automatically generated by hipify!!! #include <mpi.h> #include <hiprand/hiprand.h> #include <conf.h> #include "inc/conf.h" #include "d/q.h" #include "d/api.h" #include "d/ker.h" #include "utils/error.h" #include "inc/def.h" #include "utils/msg.h" #include "utils/cc.h" #include "utils/mc.h" #include "utils/kl.h" #include "mpi/wrapper.h" #include "inc/type.h" #include "inc/dev.h" #include "utils/imp.h" #include "coords/imp.h" #include "array3d/imp.h" #include "tex3d/type.h" #include "tex3d/imp.h" #include "field/imp.h" #include "bounce/imp.h" #include "label/imp.h" #include "math/tform/type.h" #include "math/tform/imp.h" #include "math/tform/dev.h" #include "tform/imp.h" #include "algo/utils/shfl.h" #include "algo/utils/dev.h" #include "type.h" #include "imp.h" #include "dev.h" namespace sdf_dev { #include "dev/main.h" } #include "imp/type.h" #include "imp/gen.h" #include "imp/split.h" #include "imp/main.h"
ecb5a243067add8b72fe67ec4d9ca94f9968f734.cu
#include <mpi.h> #include <curand.h> #include <conf.h> #include "inc/conf.h" #include "d/q.h" #include "d/api.h" #include "d/ker.h" #include "utils/error.h" #include "inc/def.h" #include "utils/msg.h" #include "utils/cc.h" #include "utils/mc.h" #include "utils/kl.h" #include "mpi/wrapper.h" #include "inc/type.h" #include "inc/dev.h" #include "utils/imp.h" #include "coords/imp.h" #include "array3d/imp.h" #include "tex3d/type.h" #include "tex3d/imp.h" #include "field/imp.h" #include "bounce/imp.h" #include "label/imp.h" #include "math/tform/type.h" #include "math/tform/imp.h" #include "math/tform/dev.h" #include "tform/imp.h" #include "algo/utils/shfl.h" #include "algo/utils/dev.h" #include "type.h" #include "imp.h" #include "dev.h" namespace sdf_dev { #include "dev/main.h" } #include "imp/type.h" #include "imp/gen.h" #include "imp/split.h" #include "imp/main.h"
905912a989984bd16bfe7b91afa81b924dfff430.hip
// !!! This is a file automatically generated by hipify!!! #include "reduction_tools.h" #include "colwisesum.h" #include "colwisesum_kernel.h" // usage: 'stand-alone' function (allocates inputdata on device) // parameter: // x: double pointer to the R matrix object // nCols: number of columns in matrix // nRows: number of rows in matrix // sums: vector for final results __host__ void colwisesum(const double * x, size_t nCols, size_t nRows, double * sums) { // predefined settings int maxBlocks = 64; int maxThreads = 256; // to be computed int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(nRows, maxBlocks, maxThreads, numBlocks, numThreads); double* d_idata = NULL; double* d_odata = NULL; hipMalloc((void**) &d_idata, nCols*nRows*sizeof(double)); hipMalloc((void**) &d_odata, numBlocks*nCols*sizeof(double)); // copy data to device memory hipMemcpy(d_idata, x, nCols*nRows*sizeof(double), hipMemcpyHostToDevice); //execute the kernel colwisesum_wrapper<double>(nRows, nCols, numThreads, numBlocks, d_idata, d_odata); // sum partial block sums on GPU int s=numBlocks; int cpuFinalThreshold = 1; while(s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads); colwisesum_wrapper<double>(s, nCols, threads, blocks, d_odata, d_odata); s = (s + (threads*2-1)) / (threads*2); } // copy final sum from device to host hipMemcpy( sums, d_odata, nCols*sizeof(double), hipMemcpyDeviceToHost); hipFree(d_idata); hipFree(d_odata); }
905912a989984bd16bfe7b91afa81b924dfff430.cu
#include "reduction_tools.h" #include "colwisesum.h" #include "colwisesum_kernel.h" // usage: 'stand-alone' function (allocates inputdata on device) // parameter: // x: double pointer to the R matrix object // nCols: number of columns in matrix // nRows: number of rows in matrix // sums: vector for final results __host__ void colwisesum(const double * x, size_t nCols, size_t nRows, double * sums) { // predefined settings int maxBlocks = 64; int maxThreads = 256; // to be computed int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(nRows, maxBlocks, maxThreads, numBlocks, numThreads); double* d_idata = NULL; double* d_odata = NULL; cudaMalloc((void**) &d_idata, nCols*nRows*sizeof(double)); cudaMalloc((void**) &d_odata, numBlocks*nCols*sizeof(double)); // copy data to device memory cudaMemcpy(d_idata, x, nCols*nRows*sizeof(double), cudaMemcpyHostToDevice); //execute the kernel colwisesum_wrapper<double>(nRows, nCols, numThreads, numBlocks, d_idata, d_odata); // sum partial block sums on GPU int s=numBlocks; int cpuFinalThreshold = 1; while(s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads); colwisesum_wrapper<double>(s, nCols, threads, blocks, d_odata, d_odata); s = (s + (threads*2-1)) / (threads*2); } // copy final sum from device to host cudaMemcpy( sums, d_odata, nCols*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_idata); cudaFree(d_odata); }
cb7a16de7ecf4eb9da2e7fa3b97cdec3f218d255.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main(void) { // host copies of a, b, c int a, b, c; // device copies of a, b, c int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // Setup input values a = 2; b = 7; // Copy inputs to device hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c); // Copy result back to host hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost); // Cleanup hipFree(d_a); hipFree(d_b); hipFree(d_c); // Print the result std::cout << a << " + " << b << " = " << c << std::endl; return 0; }
cb7a16de7ecf4eb9da2e7fa3b97cdec3f218d255.cu
#include <iostream> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main(void) { // host copies of a, b, c int a, b, c; // device copies of a, b, c int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Setup input values a = 2; b = 7; // Copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to host cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Print the result std::cout << a << " + " << b << " = " << c << std::endl; return 0; }
71aea26471df6409d64c9cd3386120324dbc2ee2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /*****************************************************************************/ // nvcc -O1 -o bpsw bpsw.cu -lrt -lm // Assertion to check for errors __global__ void kernel_jacobi(long* nArray, long* dArray, long len) { int bx = blockIdx.x; // ID thread int tx = threadIdx.x; int result, t; long d, dAbs, sign, temp, n1, d1; // Identify the row and column of the Pd element to work on long memIndex = bx*TILE_WIDTH + tx; if (memIndex < len) //out of bounds checking - some threads will be doing nothing { result = 0; dAbs = 5; sign = 1; while (result != -1) //if result != -1, increment d and try again { n1 = nArray[memIndex]; //reinitialize n1 to n d = dAbs*sign; t = 1; d1 = d; //reinitialize d1 to d d1 = d1 % n1; while (d1 != 0) { while (d1 % 2 == 0) //while d is even { d1 = d1 / 2; if (n1 % 8 == 3 || n1 % 8 == 5) t = -t; } temp = d1; d1 = n1; n1 = temp; if ((d1 % 4 == 3) && (n1 % 4 == 3)) t = -t; d1 = d1 % n1; } if (n1 == 1) result = t; else result = 0; dAbs = dAbs + 2; sign = sign * -1; } } __syncthreads(); if (memIndex < len) dArray[memIndex] = d; __syncthreads(); }
71aea26471df6409d64c9cd3386120324dbc2ee2.cu
#include "includes.h" /*****************************************************************************/ // nvcc -O1 -o bpsw bpsw.cu -lrt -lm // Assertion to check for errors __global__ void kernel_jacobi(long* nArray, long* dArray, long len) { int bx = blockIdx.x; // ID thread int tx = threadIdx.x; int result, t; long d, dAbs, sign, temp, n1, d1; // Identify the row and column of the Pd element to work on long memIndex = bx*TILE_WIDTH + tx; if (memIndex < len) //out of bounds checking - some threads will be doing nothing { result = 0; dAbs = 5; sign = 1; while (result != -1) //if result != -1, increment d and try again { n1 = nArray[memIndex]; //reinitialize n1 to n d = dAbs*sign; t = 1; d1 = d; //reinitialize d1 to d d1 = d1 % n1; while (d1 != 0) { while (d1 % 2 == 0) //while d is even { d1 = d1 / 2; if (n1 % 8 == 3 || n1 % 8 == 5) t = -t; } temp = d1; d1 = n1; n1 = temp; if ((d1 % 4 == 3) && (n1 % 4 == 3)) t = -t; d1 = d1 % n1; } if (n1 == 1) result = t; else result = 0; dAbs = dAbs + 2; sign = sign * -1; } } __syncthreads(); if (memIndex < len) dArray[memIndex] = d; __syncthreads(); }
abe0825779fa851266c002c9891208852e80d9a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <math.h> using namespace std; __global__ void sum(float* input) { int tid = threadIdx.x; float number_of_threads = blockDim.x; int step_size = 1; while(number_of_threads > 0){ if(tid < number_of_threads) { int first = tid*step_size*2; int second = first + step_size; input[first] += input[second]; } step_size *= 2; number_of_threads = number_of_threads!=1 ? (int)ceil(number_of_threads/2) : 0; } } int main(int argc, char const *argv[]) { // User input int count; cout << "Enter size : "; cin >> count; // Host array float hostArray[count]; for (int i = 0; i < count; i++) hostArray[i] = rand()%count+1; // Device array float *deviceArray; hipMalloc(&deviceArray, count*sizeof(float)); hipMemcpy(deviceArray, hostArray, count*sizeof(float), hipMemcpyHostToDevice); // Cuda code hipLaunchKernelGGL(( sum), dim3(1), dim3((count/2)+1), 0, 0, deviceArray); float mean; hipMemcpy(&mean, deviceArray, sizeof(float), hipMemcpyDeviceToHost); mean = (float)mean/count; cout << "Elements : "; for(int i = 0; i < count; i++) cout << hostArray[i] << " "; cout << "\nArithmetic mean: " << mean << endl; // Recalculation for(int i = 0; i < count; i++) hostArray[i] = (hostArray[i]-mean)*(hostArray[i]-mean); hipMemcpy(deviceArray, hostArray, count*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( sum), dim3(1), dim3((count/2)+1), 0, 0, deviceArray); float variance; hipMemcpy(&variance, deviceArray, sizeof(float), hipMemcpyDeviceToHost); variance = (float)variance/count; cout << "Standard deviation : " << sqrt(variance) << endl; } /* Enter size : 3 Elements : 2 2 1 Arithmetic mean: 1.66667 Standard deviation : 0.471404 */
abe0825779fa851266c002c9891208852e80d9a6.cu
#include <iostream> #include <stdio.h> #include <math.h> using namespace std; __global__ void sum(float* input) { int tid = threadIdx.x; float number_of_threads = blockDim.x; int step_size = 1; while(number_of_threads > 0){ if(tid < number_of_threads) { int first = tid*step_size*2; int second = first + step_size; input[first] += input[second]; } step_size *= 2; number_of_threads = number_of_threads!=1 ? (int)ceil(number_of_threads/2) : 0; } } int main(int argc, char const *argv[]) { // User input int count; cout << "Enter size : "; cin >> count; // Host array float hostArray[count]; for (int i = 0; i < count; i++) hostArray[i] = rand()%count+1; // Device array float *deviceArray; cudaMalloc(&deviceArray, count*sizeof(float)); cudaMemcpy(deviceArray, hostArray, count*sizeof(float), cudaMemcpyHostToDevice); // Cuda code sum<<<1, (count/2)+1>>>(deviceArray); float mean; cudaMemcpy(&mean, deviceArray, sizeof(float), cudaMemcpyDeviceToHost); mean = (float)mean/count; cout << "Elements : "; for(int i = 0; i < count; i++) cout << hostArray[i] << " "; cout << "\nArithmetic mean: " << mean << endl; // Recalculation for(int i = 0; i < count; i++) hostArray[i] = (hostArray[i]-mean)*(hostArray[i]-mean); cudaMemcpy(deviceArray, hostArray, count*sizeof(float), cudaMemcpyHostToDevice); sum<<<1, (count/2)+1>>>(deviceArray); float variance; cudaMemcpy(&variance, deviceArray, sizeof(float), cudaMemcpyDeviceToHost); variance = (float)variance/count; cout << "Standard deviation : " << sqrt(variance) << endl; } /* Enter size : 3 Elements : 2 2 1 Arithmetic mean: 1.66667 Standard deviation : 0.471404 */
e1e312c5f308c4abd4a4ae206c73122e6315cd6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; if(y >= numRows || x >= numCols){ return; } int index = y * numCols + x; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char)(0.299f * color.x + 0.587f * color.y + 0.114f * color.z); greyImage[index] = grey; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockwidth = 32; const dim3 blockSize(blockwidth, blockwidth, 1); //TODO int gridy = numRows/blockwidth +1; int gridx = numCols/blockwidth +1; const dim3 gridSize( gridx, gridy, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
e1e312c5f308c4abd4a4ae206c73122e6315cd6f.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int y = threadIdx.y + blockIdx.y * blockDim.y; int x = threadIdx.x + blockIdx.x * blockDim.x; if(y >= numRows || x >= numCols){ return; } int index = y * numCols + x; uchar4 color = rgbaImage[index]; unsigned char grey = (unsigned char)(0.299f * color.x + 0.587f * color.y + 0.114f * color.z); greyImage[index] = grey; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int blockwidth = 32; const dim3 blockSize(blockwidth, blockwidth, 1); //TODO int gridy = numRows/blockwidth +1; int gridx = numCols/blockwidth +1; const dim3 gridSize( gridx, gridy, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
119102f4087be60c2bb783d7f7e569fe5d10bf92.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "multipass.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *input = NULL; hipMalloc(&input, XSIZE*YSIZE); double *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( multipass), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( multipass), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( multipass), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
119102f4087be60c2bb783d7f7e569fe5d10bf92.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "multipass.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); double *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); multipass<<<gridBlock,threadBlock>>>(input,output); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { multipass<<<gridBlock,threadBlock>>>(input,output); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { multipass<<<gridBlock,threadBlock>>>(input,output); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5fafcca37763ea55cb4ab06fe9f5e2b636ac1879.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __CUDACC_RTC__ #define __CUDACC_RTC__ #endif #include <hip/device_functions.h> #include <math.h> #include <hip/hip_runtime.h> #include "preprocess_hip.cuh" #include "error_util.h" using namespace std; __global__ void resize(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h) { int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y; int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x; if ((int)xpixelPerBlock[blockIdx.x] == (int)xpixelPerBlock[blockIdx.x + 1] || (int)ypixelPerBlock[blockIdx.y] == (int)ypixelPerBlock[blockIdx.y + 1]) {} else { //idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]); //idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]); idxCol = min(idxCol, (int)((blockIdx.y + 1)*interpolation_ygap)); idxRow = min(idxRow, (int)((blockIdx.x + 1)*interpolation_xgap)); float X2 = interpolation_xgap * (blockIdx.x + 1); float X1 = interpolation_xgap * blockIdx.x; float Y2 = interpolation_ygap * (blockIdx.y + 1); float Y1 = interpolation_ygap * blockIdx.y; float px = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x + 1]; float qx = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x + 1]; dstData[idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1; //dstData[idxCol * resized_w + idxRow] = (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx); dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1; dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1; } } __global__ void copyChannel(float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap,int resized_w, int resized_h) { int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y; int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x; idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]); idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]); float X2 = interpolation_xgap * (blockIdx.x + 1); float X1 = interpolation_xgap * blockIdx.x; float Y2 = interpolation_ygap * (blockIdx.y + 1); float Y1 = interpolation_ygap * blockIdx.y; dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow]; dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow]; } void resizeCuda(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h, dim3 threadsPerBlock, dim3 numOfBlocks) { resize << < numOfBlocks, threadsPerBlock >> > ( srcData, dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, original_image_w, original_image_h, resized_w, resized_h); //hipDeviceSynchronize(); /*copyChannel <<<numOfBlocks, threadsPerBlock>>>(dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, resized_w, resized_h); hipDeviceSynchronize();*/ return; }
5fafcca37763ea55cb4ab06fe9f5e2b636ac1879.cu
#ifndef __CUDACC_RTC__ #define __CUDACC_RTC__ #endif #include <device_functions.h> #include <math.h> #include <cuda.h> #include "preprocess.cuh" #include "error_util.h" using namespace std; __global__ void resize(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h) { int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y; int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x; if ((int)xpixelPerBlock[blockIdx.x] == (int)xpixelPerBlock[blockIdx.x + 1] || (int)ypixelPerBlock[blockIdx.y] == (int)ypixelPerBlock[blockIdx.y + 1]) {} else { //idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]); //idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]); idxCol = min(idxCol, (int)((blockIdx.y + 1)*interpolation_ygap)); idxRow = min(idxRow, (int)((blockIdx.x + 1)*interpolation_xgap)); float X2 = interpolation_xgap * (blockIdx.x + 1); float X1 = interpolation_xgap * blockIdx.x; float Y2 = interpolation_ygap * (blockIdx.y + 1); float Y1 = interpolation_ygap * blockIdx.y; float px = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[blockIdx.y * (original_image_w) + blockIdx.x + 1]; float qx = ((X2 - (float)idxRow) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x] + (((float)idxRow - X1) / (X2 - X1)) * srcData[(blockIdx.y + 1) * (original_image_w) + blockIdx.x + 1]; dstData[idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1; //dstData[idxCol * resized_w + idxRow] = (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx); dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1; dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = 2 * (((Y2 - (float)idxCol) / (Y2 - Y1)) * px + (((float)idxCol - Y1) / (Y2 - Y1)) * qx) / float(255) - 1; } } __global__ void copyChannel(float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap,int resized_w, int resized_h) { int idxCol = (int)ypixelPerBlock[blockIdx.y] + 1 + threadIdx.y; int idxRow = (int)xpixelPerBlock[blockIdx.x] + 1 + threadIdx.x; idxCol = min(idxCol, (int)ypixelPerBlock[blockIdx.y + 1]); idxRow = min(idxRow, (int)xpixelPerBlock[blockIdx.x + 1]); float X2 = interpolation_xgap * (blockIdx.x + 1); float X1 = interpolation_xgap * blockIdx.x; float Y2 = interpolation_ygap * (blockIdx.y + 1); float Y1 = interpolation_ygap * blockIdx.y; dstData[resized_w * resized_h * 1 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow]; dstData[resized_w * resized_h * 2 + idxCol * resized_w + idxRow] = dstData[idxCol * resized_w + idxRow]; } void resizeCuda(float* srcData, float* dstData, float* xpixelPerBlock, float* ypixelPerBlock, float interpolation_xgap, float interpolation_ygap, int original_image_w, int original_image_h, int resized_w, int resized_h, dim3 threadsPerBlock, dim3 numOfBlocks) { resize << < numOfBlocks, threadsPerBlock >> > ( srcData, dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, original_image_w, original_image_h, resized_w, resized_h); //cudaDeviceSynchronize(); /*copyChannel <<<numOfBlocks, threadsPerBlock>>>(dstData, xpixelPerBlock, ypixelPerBlock, interpolation_xgap, interpolation_ygap, resized_w, resized_h); cudaDeviceSynchronize();*/ return; }
9b773ff11ee63a527d2541f1fae1018371e299a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for conversion operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Conversion template template <typename Destination, typename Source, int Count> __global__ void convert( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_f16_rn) { int const kN = 1; using Source = float; using Destination = cutlass::half_t; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<float, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = float(i); } source.sync_device(); hipLaunchKernelGGL(( test::core::kernel::convert<Destination, Source, 1>), dim3(grid), dim3(block) , 0, 0, reinterpret_cast<cutlass::Array<Destination, 1> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, 1> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == source.host_data()[i]); } } TEST(NumericConversion, f32x8_to_f16x8_rn) { int const kN = 8; using Source = float; using Destination = cutlass::half_t; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = float(i); } source.sync_device(); hipLaunchKernelGGL(( test::core::kernel::convert<Destination, Source, kN>), dim3(grid), dim3(block) , 0, 0, reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == source.host_data()[i]); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f16_to_f32_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = float; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<float, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = Source(i); } source.sync_device(); hipLaunchKernelGGL(( test::core::kernel::convert<Destination, Source, kN>), dim3(grid), dim3(block) , 0, 0, reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i])); } } TEST(NumericConversion, f16x8_to_f32x8_rn) { int const kN = 8; using Source = cutlass::half_t; using Destination = float; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<float, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = float(i); } source.sync_device(); hipLaunchKernelGGL(( test::core::kernel::convert<Destination, Source, kN>), dim3(grid), dim3(block) , 0, 0, reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i])); } } /////////////////////////////////////////////////////////////////////////////////////////////////
9b773ff11ee63a527d2541f1fae1018371e299a0.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Unit tests for conversion operators. */ #include "../common/cutlass_unit_test.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace test { namespace core { namespace kernel { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Conversion template template <typename Destination, typename Source, int Count> __global__ void convert( cutlass::Array<Destination, Count> *destination, cutlass::Array<Source, Count> const *source) { cutlass::NumericArrayConverter<Destination, Source, Count> convert; *destination = convert(*source); } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace kernel } // namespace core } // namespace test ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f32_to_f16_rn) { int const kN = 1; using Source = float; using Destination = cutlass::half_t; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<float, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = float(i); } source.sync_device(); test::core::kernel::convert<Destination, Source, 1><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, 1> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, 1> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == source.host_data()[i]); } } TEST(NumericConversion, f32x8_to_f16x8_rn) { int const kN = 8; using Source = float; using Destination = cutlass::half_t; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<Destination, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<Source, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = float(i); } source.sync_device(); test::core::kernel::convert<Destination, Source, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == source.host_data()[i]); } } ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(NumericConversion, f16_to_f32_rn) { int const kN = 1; using Source = cutlass::half_t; using Destination = float; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<float, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = Source(i); } source.sync_device(); test::core::kernel::convert<Destination, Source, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i])); } } TEST(NumericConversion, f16x8_to_f32x8_rn) { int const kN = 8; using Source = cutlass::half_t; using Destination = float; dim3 grid(1, 1); dim3 block(1, 1); cutlass::HostTensor<float, cutlass::layout::RowMajor> destination({1, kN}); cutlass::HostTensor<cutlass::half_t, cutlass::layout::RowMajor> source({1, kN}); for (int i = 0; i < kN; ++i) { source.host_data()[i] = float(i); } source.sync_device(); test::core::kernel::convert<Destination, Source, kN><<< grid, block >>>( reinterpret_cast<cutlass::Array<Destination, kN> *>(destination.device_data()), reinterpret_cast<cutlass::Array<Source, kN> const *>(source.device_data()) ); destination.sync_host(); for (int i = 0; i < kN; ++i) { EXPECT_TRUE(float(destination.host_data()[i]) == float(source.host_data()[i])); } } /////////////////////////////////////////////////////////////////////////////////////////////////